content
stringlengths 5
1.05M
|
|---|
'''
Created on June 6, 2018
Filer Guidelines: esma32-60-254_esef_reporting_manual.pdf
Taxonomy Architecture:
Taxonomy package expected to be installed:
@author: Mark V Systems Limited
(c) Copyright 2018 Mark V Systems Limited, All rights reserved.
'''
import os, re
from collections import defaultdict
from lxml.etree import _ElementTree, _Comment, _ProcessingInstruction
from arelle import LeiUtil, ModelDocument, XbrlConst, XmlUtil
from arelle.ModelDtsObject import ModelResource
from arelle.ModelInstanceObject import ModelFact, ModelInlineFact, ModelInlineFootnote
from arelle.ModelObject import ModelObject
from arelle.ModelValue import qname
from arelle.PythonUtil import strTruncate
from arelle.ValidateXbrlCalcs import inferredDecimals, rangeValue
from arelle.XbrlConst import ixbrlAll, xhtml, link, parentChild, summationItem, dimensionDomain, domainMember
from arelle.XmlValidate import VALID
from .Const import allowedImgMimeTypes, browserMaxBase64ImageLength, mandatory, untransformableTypes
from .Dimensions import checkFilingDimensions
from .DTS import checkFilingDTS
datetimePattern = re.compile(r"\s*([0-9]{4})-([0-9]{2})-([0-9]{2})([T ]([0-9]{2}):([0-9]{2}):([0-9]{2}))?\s*")
styleIxHiddenPattern = re.compile(r"(.*[^\w]|^)-esef-ix-hidden\s*:\s*([\w.-]+).*")
def etreeIterWithDepth(node, depth=0):
yield (node, depth)
for child in node.iterchildren():
etreeIterWithDepth(child, depth+1)
def dislosureSystemTypes(disclosureSystem, *args, **kwargs):
# return ((disclosure system name, variable name), ...)
return (("ESMA", "ESMAplugin"),)
def disclosureSystemConfigURL(disclosureSystem, *args, **kwargs):
return os.path.join(os.path.dirname(__file__), "config.xml")
def validateXbrlStart(val, parameters=None, *args, **kwargs):
val.validateESMAplugin = val.validateDisclosureSystem and getattr(val.disclosureSystem, "ESMAplugin", False)
if not (val.validateESMAplugin):
return
def validateXbrlFinally(val, *args, **kwargs):
if not (val.validateESMAplugin):
return
_xhtmlNs = "{{{}}}".format(xhtml)
_xhtmlNsLen = len(_xhtmlNs)
modelXbrl = val.modelXbrl
modelDocument = modelXbrl.modelDocument
_statusMsg = _("validating {0} filing rules").format(val.disclosureSystem.name)
modelXbrl.profileActivity()
modelXbrl.modelManager.showStatus(_statusMsg)
reportXmlLang = None
firstRootmostXmlLangDepth = 9999999
if modelDocument.type == ModelDocument.Type.INSTANCE:
modelXbrl.error("esma:instanceShallBeInlineXBRL",
_("RTS on ESEF requires inline XBRL instances."),
modelObject=modelXbrl)
checkFilingDimensions(val) # sets up val.primaryItems and val.domainMembers
val.hasExtensionSchema = val.hasExtensionPre = val.hasExtensionCal = val.hasExtensionDef = val.hasExtensionLbl = False
checkFilingDTS(val, modelXbrl.modelDocument, [])
modelXbrl.profileActivity("... filer DTS checks", minTimeToShow=1.0)
if not (val.hasExtensionSchema and val.hasExtensionPre and val.hasExtensionCal and val.hasExtensionDef and val.hasExtensionLbl):
missingFiles = []
if not val.hasExtensionSchema: missingFiles.append("schema file")
if not val.hasExtensionPre: missingFiles.append("presentation linkbase")
if not val.hasExtensionCal: missingFiles.append("calculation linkbase")
if not val.hasExtensionDef: missingFiles.append("definition linkbase")
if not val.hasExtensionLbl: missingFiles.append("label linkbase")
modelXbrl.warning("esma:3.1.1.extensionTaxonomyWrongFilesStructure",
_("Extension taxonomies MUST consist of at least a schema file and presentation, calculation, definition and label linkbases"
": missing %(missingFiles)s"),
modelObject=modelXbrl, missingFiles=", ".join(missingFiles))
if modelDocument.type in (ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET, ModelDocument.Type.INSTANCE):
footnotesRelationshipSet = modelXbrl.relationshipSet("XBRL-footnotes")
orphanedFootnotes = set()
noLangFootnotes = set()
footnoteRoleErrors = set()
transformRegistryErrors = set()
def checkFootnote(elt, text):
if text: # non-empty footnote must be linked to a fact if not empty
if not any(isinstance(rel.fromModelObject, ModelFact)
for rel in footnotesRelationshipSet.toModelObject(elt)):
orphanedFootnotes.add(elt)
if not elt.xmlLang:
noLangFootnotes.add(elt)
if elt.role != XbrlConst.footnote or not all(
rel.arcrole == XbrlConst.factFootnote and rel.linkrole == XbrlConst.defaultLinkRole
for rel in footnotesRelationshipSet.toModelObject(elt)):
footnoteRoleErrors.add(elt)
# check file name of each inline document (which might be below a top-level IXDS)
for doc in modelXbrl.urlDocs.values():
if doc.type == ModelDocument.Type.INLINEXBRL:
_baseName, _baseExt = os.path.splitext(doc.basename)
if _baseExt not in (".xhtml",):
modelXbrl.warning("esma:TBD.fileNameExtension",
_("FileName should have the extension .xhtml: %(fileName)s"),
modelObject=doc, fileName=doc.basename)
if modelDocument.type in (ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
ixNStag = modelXbrl.modelDocument.ixNStag
ixTags = set(ixNStag + ln for ln in ("nonNumeric", "nonFraction", "references", "relationship"))
ixTextTags = set(ixNStag + ln for ln in ("nonFraction", "continuation", "footnote"))
ixExcludeTag = ixNStag + "exclude"
ixTupleTag = ixNStag + "tuple"
ixFractionTag = ixNStag + "fraction"
hiddenEltIds = {}
presentedHiddenEltIds = defaultdict(list)
eligibleForTransformHiddenFacts = []
requiredToDisplayFacts = []
requiredToDisplayFactIds = {}
firstIxdsDoc = True
for ixdsHtmlRootElt in modelXbrl.ixdsHtmlElements: # ix root elements for all ix docs in IXDS
for elt, depth in etreeIterWithDepth(ixdsHtmlRootElt):
eltTag = elt.tag
if isinstance(elt, ModelObject) and elt.namespaceURI == xhtml:
eltTag = elt.localName
if firstIxdsDoc and (not reportXmlLang or depth < firstRootmostXmlLangDepth):
xmlLang = elt.get("{http://www.w3.org/XML/1998/namespace}lang")
if xmlLang:
reportXmlLang = xmlLang
firstRootmostXmlLangDepth = depth
elif isinstance(elt, (_ElementTree, _Comment, _ProcessingInstruction)):
continue # comment or other non-parsed element
else:
eltTag = elt.tag
if eltTag.startswith(_xhtmlNs):
eltTag = eltTag[_xhtmlNsLen:]
if ((eltTag in ("object", "script")) or
(eltTag == "a" and "javascript:" in elt.get("href","")) or
(eltTag == "img" and "javascript:" in elt.get("src",""))):
modelXbrl.error("esma.2.5.1.executableCodePresent",
_("Inline XBRL documents MUST NOT contain executable code: %(element)s"),
modelObject=elt, element=eltTag)
elif eltTag == "img":
src = elt.get("src","").strip()
hasParentIxTextTag = False # check if image is in an ix text-bearing element
_ancestorElt = elt
while (_ancestorElt is not None):
if _ancestorElt.tag == ixExcludeTag: # excluded from any parent text-bearing ix element
break
if _ancestorElt.tag in ixTextTags:
hasParentIxTextTag = True
break
_ancestorElt = _ancestorElt.getparent()
if scheme(href) in ("http", "https", "ftp"):
modelXbrl.error("esma.3.5.1.inlinXbrlContainsExternalReferences",
_("Inline XBRL instance documents MUST NOT contain any reference pointing to resources outside the reporting package: %(element)s"),
modelObject=elt, element=eltTag)
if not src.startswith("data:image"):
if hasParentIxTextTag:
modelXbrl.error("esma.2.5.1.imageInIXbrlElementNotEmbedded",
_("Images appearing within an inline XBRL element MUST be embedded regardless of their size."),
modelObject=elt)
else:
# presume it to be an image file, check image contents
try:
base = elt.modelDocument.baseForElement(elt)
normalizedUri = elt.modelXbrl.modelManager.cntlr.webCache.normalizeUrl(graphicFile, base)
if not elt.modelXbrl.fileSource.isInArchive(normalizedUri):
normalizedUri = elt.modelXbrl.modelManager.cntlr.webCache.getfilename(normalizedUri)
imglen = 0
with elt.modelXbrl.fileSource.file(normalizedUri,binary=True)[0] as fh:
imglen += len(fh.read())
if imglen < browserMaxBase64ImageLength:
modelXbrl.error("esma.2.5.1.embeddedImageNotUsingBase64Encoding",
_("Images MUST be included in the XHTML document as a base64 encoded string unless their size exceeds support of browsers."),
modelObject=elt)
except IOError as err:
modelXbrl.error("esma.2.5.1.imageFileCannotBeLoaded",
_("Image file which isn't openable '%(src)s', error: %(error)s"),
modelObject=elt, src=src, error=err)
elif not any(src.startswith(m) for m in allowedImgMimeTypes):
modelXbrl.error("esma.2.5.1.embeddedImageNotUsingBase64Encoding",
_("Images MUST be included in the XHTML document as a base64 encoded string, encoding disallowed: %(src)s."),
modelObject=elt, src=attrValue[:128])
elif eltTag == "a":
href = elt.get("href","").strip()
if scheme(href) in ("http", "https", "ftp"):
modelXbrl.error("esma.3.5.1.inlinXbrlContainsExternalReferences",
_("Inline XBRL instance documents MUST NOT contain any reference pointing to resources outside the reporting package: %(element)s"),
modelObject=elt, element=eltTag)
elif eltTag == "base" or elt.tag == "{http://www.w3.org/XML/1998/namespace}base":
modelXbrl.error("esma.2.4.2.htmlOrXmlBaseUsed",
_("The HTML <base> elements and xml:base attributes MUST NOT be used in the Inline XBRL document."),
modelObject=elt, element=eltTag)
if eltTag in ixTags and elt.get("target"):
modelXbrl.error("esma.2.5.3.targetAttributeUsed",
_("Target attribute MUST not be used: element %(localName)s, target attribute %(target)s."),
modelObject=elt, localName=elt.elementQname, target=elt.get("target"))
if eltTag == ixTupleTag:
modelXbrl.error("esma.2.4.1.tupleElementUsed",
_("The ix:tuple element MUST not be used in the Inline XBRL document."),
modelObject=elt)
if eltTag == ixFractionTag:
modelXbrl.error("esma.2.4.1.fractionElementUsed",
_("The ix:fraction element MUST not be used in the Inline XBRL document."),
modelObject=elt)
if elt.get("{http://www.w3.org/XML/1998/namespace}base") is not None:
modelXbrl.error("esma.2.4.1.xmlBaseUsed",
_("xml:base attributes MUST NOT be used in the Inline XBRL document: element %(localName)s, base attribute %(base)s."),
modelObject=elt, localName=elt.elementQname, base=elt.get("{http://www.w3.org/XML/1998/namespace}base"))
if isinstance(elt, ModelInlineFootnote):
checkFootnote(elt, elt.value)
elif isinstance(elt, ModelResource) and elt.qname == XbrlConst.qnLinkFootnote:
checkFootnote(elt, elt.value)
elif isinstance(elt, ModelInlineFact):
if elt.format is not None and elt.format.namespaceURI != 'http://www.xbrl.org/inlineXBRL/transformation/2015-02-26':
transformRegistryErrors.add(elt)
for ixHiddenElt in ixdsHtmlRootElt.iterdescendants(tag=ixNStag + "hidden"):
for tag in (ixNStag + "nonNumeric", ixNStag+"nonFraction"):
for ixElt in ixHiddenElt.iterdescendants(tag=tag):
if (getattr(ixElt, "xValid", 0) >= VALID # may not be validated
): # add future "and" conditions on elements which can be in hidden
if (ixElt.concept.baseXsdType not in untransformableTypes and
not ixElt.isNil):
eligibleForTransformHiddenFacts.append(ixElt)
elif ixElt.id is None:
requiredToDisplayFacts.append(ixElt)
if ixElt.id:
hiddenEltIds[ixElt.id] = ixElt
firstIxdsDoc = False
if eligibleForTransformHiddenFacts:
modelXbrl.warning("esma.2.4.1.transformableElementIncludedInHiddenSection",
_("The ix:hidden section of Inline XBRL document MUST not include elements eligible for transformation. "
"%(countEligible)s fact(s) were eligible for transformation: %(elements)s"),
modelObject=eligibleForTransformHiddenFacts,
countEligible=len(eligibleForTransformHiddenFacts),
elements=", ".join(sorted(set(str(f.qname) for f in eligibleForTransformHiddenFacts))))
for ixdsHtmlRootElt in modelXbrl.ixdsHtmlElements:
for ixElt in ixdsHtmlRootElt.getroottree().iterfind("//{http://www.w3.org/1999/xhtml}*[@style]"):
hiddenFactRefMatch = styleIxHiddenPattern.match(ixElt.get("style",""))
if hiddenFactRefMatch:
hiddenFactRef = hiddenFactRefMatch.group(2)
if hiddenFactRef not in hiddenEltIds:
modelXbrl.error("esma.2.4.1.esefIxHiddenStyleNotLinkingFactInHiddenSection",
_("\"-esef-ix-hidden\" style identifies @id, %(id)s of a fact that is not in ix:hidden section."),
modelObject=ixElt, id=hiddenFactRef)
else:
presentedHiddenEltIds[hiddenFactRef].append(ixElt)
for hiddenEltId, ixElt in hiddenEltIds.items():
if (hiddenEltId not in presentedHiddenEltIds and
getattr(ixElt, "xValid", 0) >= VALID and # may not be validated
(ixElt.concept.baseXsdType in untransformableTypes or ixElt.isNil)):
requiredToDisplayFacts.append(ixElt)
if requiredToDisplayFacts:
modelXbrl.warning("esma.2.4.1.factInHiddenSectionNotInReport",
_("The ix:hidden section contains %(countUnreferenced)s fact(s) whose @id is not applied on any \"-esef-ix- hidden\" style: %(elements)s"),
modelObject=requiredToDisplayFacts,
countUnreferenced=len(requiredToDisplayFacts),
elements=", ".join(sorted(set(str(f.qname) for f in requiredToDisplayFacts))))
del eligibleForTransformHiddenFacts, hiddenEltIds, presentedHiddenEltIds, requiredToDisplayFacts
elif modelDocument.type == ModelDocument.Type.INSTANCE:
for elt in modelDocument.xmlRootElement.iter():
if elt.qname == XbrlConst.qnLinkFootnote: # for now assume no private elements extend link:footnote
checkFootnote(elt, elt.stringValue)
contextsWithDisallowedOCEs = []
contextsWithDisallowedOCEcontent = []
contextsWithPeriodTime = []
contextsWithPeriodTimeZone = []
contextIdentifiers = defaultdict(list)
nonStandardTypedDimensions = defaultdict(set)
for context in modelXbrl.contexts.values():
if XmlUtil.hasChild(context, XbrlConst.xbrli, "segment"):
contextsWithDisallowedOCEs.append(context)
for segScenElt in context.iterdescendants("{http://www.xbrl.org/2003/instance}scenario"):
if isinstance(segScenElt,ModelObject):
if any(True for child in segScenElt.iterchildren()
if isinstance(child,ModelObject) and
child.tag not in ("{http://xbrl.org/2006/xbrldi}explicitMember",
"{http://xbrl.org/2006/xbrldi}typedMember")):
contextsWithDisallowedOCEcontent.append(context)
# check periods here
contextIdentifiers[context.entityIdentifier].append(context)
if contextsWithDisallowedOCEs:
modelXbrl.error("esma.2.1.3.segmentUsed",
_("xbrli:segment container MUST NOT be used in contexts: %(contextIds)s"),
modelObject=contextsWithDisallowedOCEs, contextIds=", ".join(c.id for c in contextsWithDisallowedOCEs))
if contextsWithDisallowedOCEcontent:
modelXbrl.error("esma.2.1.3.scenarioContainsNonDimensionalContent",
_("xbrli:scenario in contexts MUST NOT contain any other content than defined in XBRL Dimensions specification: %(contextIds)s"),
modelObject=contextsWithDisallowedOCEcontent, contextIds=", ".join(c.id for c in contextsWithDisallowedOCEcontent))
if len(contextIdentifiers) > 1:
modelXbrl.error("esma.2.1.4.multipleIdentifiers",
_("All entity identifiers in contexts MUST have identical content: %(contextIdentifiers)s"),
modelObject=modelXbrl, contextIds=", ".join(i[1] for i in contextIdentifiers))
for (contextScheme, contextIdentifier), contextElts in contextIdentifiers.items():
if contextScheme != "http://standards.iso.org/iso/17442":
modelXbrl.warning("esma.2.1.1.nonLEIContextScheme",
_("The scheme attribute of the xbrli:identifier element should have \"http://standards.iso.org/iso/17442\" as its content: %(scheme)s"),
modelObject=contextElts, scheme=contextScheme)
else:
leiValidity = LeiUtil.checkLei(contextIdentifier)
if leiValidity == LeiUtil.LEI_INVALID_LEXICAL:
modelXbrl.warning("esma.2.1.1.invalidIdentifierFormat",
_("The LEI context idenntifier has an invalid format: %(identifier)s"),
modelObject=contextElts, identifier=contextIdentifier)
elif leiValidity == LeiUtil.LEI_INVALID_CHECKSUM:
modelXbrl.warning("esma.2.1.1.invalidIdentifier",
_("The LEI context idenntifier has checksum error: %(identifier)s"),
modelObject=contextElts, identifier=contextIdentifier)
if contextsWithPeriodTime:
modelXbrl.warning("esma.2.1.2.periodWithTimeContent",
_("Context period startDate, endDate and instant elements should be in whole days without time: %(contextIds)s"),
modelObject=contextsWithPeriodTime, contextIds=", ".join(c.id for c in contextsWithPeriodTime))
if contextsWithPeriodTimeZone:
modelXbrl.warning("esma.2.1.2.periodWithTimeZone",
_("Context period startDate, endDate and instant elements should be in whole days without a timezone: %(contextIds)s"),
modelObject=contextsWithPeriodTimeZone, contextIds=", ".join(c.id for c in contextsWithPeriodTimeZone))
# identify unique contexts and units
mapContext = {}
mapUnit = {}
uniqueContextHashes = {}
for context in modelXbrl.contexts.values():
h = context.contextDimAwareHash
if h in uniqueContextHashes:
if context.isEqualTo(uniqueContextHashes[h]):
mapContext[context] = uniqueContextHashes[h]
else:
uniqueContextHashes[h] = context
del uniqueContextHashes
uniqueUnitHashes = {}
for unit in modelXbrl.units.values():
h = unit.hash
if h in uniqueUnitHashes:
if unit.isEqualTo(uniqueUnitHashes[h]):
mapUnit[unit] = uniqueUnitHashes[h]
else:
uniqueUnitHashes[h] = unit
del uniqueUnitHashes
reportedMandatory = set()
precisionFacts = set()
numFactsByConceptContextUnit = defaultdict(list)
textFactsByConceptContext = defaultdict(list)
footnotesRelationshipSet = modelXbrl.relationshipSet(XbrlConst.factFootnote, XbrlConst.defaultLinkRole)
noLangFacts = []
textFactsMissingReportLang = []
conceptsUsed = set()
for qn, facts in modelXbrl.factsByQname.items():
if qn in mandatory:
reportedMandatory.add(qn)
for f in facts:
if f.precision is not None:
precisionFacts.add(f)
if f.isNumeric:
numFactsByConceptContextUnit[(f.qname, mapContext.get(f.context,f.context), mapUnit.get(f.unit, f.unit))].append(f)
elif f.concept is not None and f.concept.type is not None:
if f.concept.type.isOimTextFactType:
if not f.xmlLang:
noLangFacts.append(f)
elif f.context is not None:
textFactsByConceptContext[(f.qname, mapContext.get(f.context,f.context))].append(f)
conceptsUsed.add(f.concept)
if f.context is not None:
for dim in f.context.qnameDims.values():
conceptsUsed.add(dim.dimension)
if dim.isExplicit:
conceptsUsed.add(dim.member)
elif dim.isTyped:
conceptsUsed.add(dim.typedMember)
if noLangFacts:
modelXbrl.error("esma.2.5.2.undefinedLanguageForTextFact",
_("Each tagged text fact MUST have the 'xml:lang' attribute assigned or inherited."),
modelObject=noLangFacts)
# missing report lang text facts
for fList in textFactsByConceptContext.values():
if not any(f.xmlLang == reportXmlLang for f in fList):
modelXbrl.error("esma.2.5.2.taggedTextFactOnlyInLanguagesOtherThanLanguageOfAReport",
_("Each tagged text fact MUST have the 'xml:lang' provided in at least the language of the report: %(element)s"),
modelObject=fList, element=fList[0].qname)
# 2.2.4 test
for fList in numFactsByConceptContextUnit.values():
if len(fList) > 1:
f0 = fList[0]
if any(f.isNil for f in fList):
_inConsistent = not all(f.isNil for f in fList)
elif all(inferredDecimals(f) == inferredDecimals(f0) for f in fList[1:]): # same decimals
v0 = rangeValue(f0.value)
_inConsistent = not all(rangeValue(f.value) == v0 for f in fList[1:])
else: # not all have same decimals
aMax, bMin = rangeValue(f0.value, inferredDecimals(f0))
for f in fList[1:]:
a, b = rangeValue(f.value, inferredDecimals(f))
if a > aMax: aMax = a
if b < bMin: bMin = b
_inConsistent = (bMin < aMax)
if _inConsistent:
modelXbrl.error(("esma:2.2.4.inconsistentDuplicateNumericFactInInlineXbrlDocument"),
"Inconsistent duplicate numeric facts MUST NOT appear in the content of an inline XBRL document. %(fact)s that was used more than once in contexts equivalent to %(contextID)s: values %(values)s. ",
modelObject=fList, fact=f0.qname, contextID=f0.contextID, values=", ".join(strTruncate(f.value, 128) for f in fList))
if precisionFacts:
modelXbrl.warning("esma:2.2.1.precisionAttributeUsed",
_("The accuracy of numeric facts SHOULD be defined with the 'decimals' attribute rather than the 'precision' attribute: %(elements)s."),
modelObject=precisionFacts, elements=", ".join(sorted(str(qn) for qn in precisionFacts)))
missingElements = (mandatory - reportedMandatory)
if missingElements:
modelXbrl.error("esma:???.missingRequiredElements",
_("Required elements missing from document: %(elements)s."),
modelObject=modelXbrl, elements=", ".join(sorted(str(qn) for qn in missingElements)))
if transformRegistryErrors:
modelXbrl.warning("esma:2.2.3.transformRegistry",
_("ESMA recommends applying the latest available version of the Transformation Rules Registry marked with 'Recommendation' status for these elements: %(elements)s."),
modelObject=transformRegistryErrors,
elements=", ".join(sorted(str(fact.qname) for fact in transformRegistryErrors)))
if orphanedFootnotes:
modelXbrl.error("esma.2.3.1.unusedFootnote",
_("Non-empty footnotes must be connected to fact(s)."),
modelObject=orphanedFootnotes)
if noLangFootnotes:
modelXbrl.error("esma.2.3.2.undefinedLanguageForFootnote",
_("Each footnote MUST have the 'xml:lang' attribute whose value corresponds to the language of the text in the content of the respective footnote."),
modelObject=noLangFootnotes)
if footnoteRoleErrors:
modelXbrl.error("esma.2.3.2.nonStandardRoleForFootnote",
_("The xlink:role attribute of a link:footnote and link:footnoteLink element as well as xlink:arcrole attribute of a link:footnoteArc MUST be defined in the XBRL Specification 2.1."),
modelObject=footnoteRoleErrors)
nonStdFootnoteElts = list()
for modelLink in modelXbrl.baseSets[("XBRL-footnotes",None,None,None)]:
for elt in ixdsHtmlRootElt.iter():
if isinstance(elt, (_ElementTree, _Comment, _ProcessingInstruction)):
continue # comment or other non-parsed element
if elt.namespaceURI != link or elt.localName not in ("loc", "link", "footnoteArc"):
nonStdFootnoteElts.append(elt)
if nonStdFootnoteElts:
modelXbrl.error("esma.2.3.2.nonStandardElementInFootnote",
_("A link:footnoteLink element MUST have no children other than link:loc, link:footnote, and link:footnoteArc."),
modelObject=nonStdFootnoteElts)
for qn in modelXbrl.qnameDimensionDefaults.values():
conceptsUsed.add(modelXbrl.qnameConcepts.get(qn))
# unused elements in linkbases
for arcroles, err in (((parentChild,), "elementsNotUsedForTaggingAppliedInPresentationLinkbase"),
((summationItem,), "elementsNotUsedForTaggingAppliedInCalculationLinkbase"),
((dimensionDomain,domainMember), "elementsNotUsedForTaggingAppliedInDefinitionLinkbase")):
lbElts = set()
for arcrole in arcroles:
for rel in modelXbrl.relationshipSet(arcrole).modelRelationships:
fr = rel.fromModelObject
to = rel.toModelObject
if arcrole in (parentChild, summationItem):
if fr is not None and not fr.isAbstract:
lbElts.add(fr)
if to is not None and not to.isAbstract:
lbElts.add(to)
elif arcrole == dimensionDomain:
if fr is not None: # dimension, always abstract
lbElts.add(fr)
if to is not None and rel.isUsable:
lbElts.add(to)
elif arcrole == domainMember:
if to is not None and rel.isUsable:
lbElts.add(to)
unreportedLbElts = lbElts - conceptsUsed
if unreportedLbElts:
modelXbrl.error("esma.3.4.6." + err,
_("All usable concepts in extension taxonomy relationships MUST be applied by tagged facts: %(elements)s."),
modelObject=unreportedLbElts, elements=", ".join(sorted((str(c.qname) for c in unreportedLbElts))))
modelXbrl.profileActivity(_statusMsg, minTimeToShow=0.0)
modelXbrl.modelManager.showStatus(None)
__pluginInfo__ = {
# Do not use _( ) in pluginInfo itself (it is applied later, after loading
'name': 'Validate ESMA',
'version': '1.2019.07',
'description': '''ESEF Reporting Manual Validations.''',
'license': 'Apache-2',
'author': 'Mark V Systems',
'copyright': '(c) Copyright 2018-19 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'DisclosureSystem.Types': dislosureSystemTypes,
'DisclosureSystem.ConfigURL': disclosureSystemConfigURL,
'Validate.XBRL.Start': validateXbrlStart,
'Validate.XBRL.Finally': validateXbrlFinally,
}
|
#!/usr/bin/env python
import json
import os
import six
import sys
from subprocess import Popen, PIPE
from ansible.module_utils.basic import *
DOCUMENTATION = '''
---
module: discovery_diff
short_description: Provide difference in hardware configuration
author: "Swapnil Kulkarni, @coolsvap"
'''
def get_node_hardware_data(hw_id, upenv):
'''Read the inspector data about the given node from Swift'''
p = Popen(('swift', 'download', '--output', '-', 'ironic-inspector', hw_id),
env=upenv, stdout=PIPE, stderr=PIPE)
if p.wait() == 0:
return json.loads(p.stdout.read())
def all_equal(coll):
if len(coll) <= 1:
return True
first = coll[0]
for item in coll[1:]:
if item != first:
return False
return True
def process_nested_dict(d, prefix=None):
'''
Turn a nested dictionary into a flat one.
Example:
inspector_data = {
'memory_mb': 6000,
'system': {
'os': {
'version': 'CentOS Linux release 7.2.1511 (Core)',
}
},
'network': {
'eth0': {
'businfo': 'pci@0000:00:03.0'
}
}
}
>>> process_nested_dict(inspector_data)
{
'memory_mb': 6000,
'system/os/version': 'CentOS Linux release 7.2.1511 (Core)',
'network/eth0/businfo': 'pci@0000:00:03.0',
}
'''
result = {}
for k, v in six.iteritems(d):
if prefix:
new_key = prefix + '/' + k
else:
new_key = k
if isinstance(v, dict):
for k, v in six.iteritems(process_nested_dict(v, new_key)):
result[k] = v
else:
result[new_key] = v
return result
def process_nested_list(l):
'''
Turn a list of lists into a single key/value dict.
Example:
inspector_data = [
['memory_mb', 6000],
['system', 'os', 'version', 'CentOS Linux release 7.2.1511 (Core)'],
['network', 'eth0', 'businfo', 'pci@0000:00:03.0'],
]
>>> process_nested_list(inspector_data)
{
'memory_mb': 6000,
'system/os/version': 'CentOS Linux release 7.2.1511 (Core)',
'network/eth0/businfo': 'pci@0000:00:03.0',
}
'''
result = {}
for item in l:
key = '/'.join(item[:-1])
value = item[-1]
result[key] = value
return result
def process_inspector_data(hw_item):
'''
Convert the raw ironic inspector data into something easier to work with.
The inspector posts either a list of lists or a nested dictionary. We turn
it to a flat dictionary with nested keys separated by a slash.
'''
if isinstance(hw_item, dict):
return process_nested_dict(hw_item)
elif isinstance(hw_item, list):
return process_nested_list(hw_item)
else:
msg = "The hardware item '{}' must be either a dictionary or a list"
raise Exception(msg.format(repr(hw_item)))
def main():
module = AnsibleModule(
argument_spec={
'os_tenant_name': dict(required=True, type='str'),
'os_username': dict(required=True, type='str'),
'os_password': dict(required=True, type='str'),
}
)
env = os.environ.copy()
# NOTE(shadower): Undercloud OS_AUTH_URL should already be in Ansible's env
env['OS_TENANT_NAME'] = module.params.get('os_tenant_name')
env['OS_USERNAME'] = module.params.get('os_username')
env['OS_PASSWORD'] = module.params.get('os_password')
# TODO(shadower): use python-swiftclient here
p = Popen(('swift', 'list', 'ironic-inspector'), env=env, stdout=PIPE, stderr=PIPE)
if p.wait() != 0:
msg = "Error running `swift list ironic-inspector`: {}".format(
p.stderr.read())
module.fail_json(msg=msg)
hardware_ids = [i.strip() for i in p.stdout.read().splitlines() if i.strip()]
inspector_data = [get_node_hardware_data(i, env) for i in hardware_ids]
processed_data = [process_inspector_data(hw) for hw in inspector_data]
all_keys = set()
for hw in processed_data:
all_keys.update(hw.keys())
# TODO(shadower): checks for values that must be different (e.g. mac addresses)
diffs = []
for key in all_keys:
values = [hw.get(key) for hw in processed_data]
if not all_equal(values):
msg = "The key '{}' has differing values: {}"
diffs.append(msg.format(key, repr(values)))
if diffs:
msg = 'Found some differences between the introspected hardware.'
else:
msg = 'No differences found.'
result = {
'changed': True,
'msg': msg,
'warnings': diffs,
}
module.exit_json(**result)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
.. module:: __init__
"""
from os import path
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from django.views.static import serve
PROJECT_ROOT = path.dirname(path.dirname(__file__))
urlpatterns = [
url(r'', include('apps.volontulo.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^static/(?P<path>.*)$', serve, {
'document_root': path.join(PROJECT_ROOT, 'static'),
}),
url(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
]
|
"""
Module containing a class for converting a PySB model to a set of ordinary
differential equations for integration or analysis in Mathematica.
For information on how to use the model exporters, see the documentation
for :py:mod:`pysb.export`.
Output for the Robertson example model
======================================
The Mathematica code produced will follow the form as given below for
``pysb.examples.robertson``::
(*
A simple three-species chemical kinetics system known as "Robertson's
example", as presented in:
H. H. Robertson, The solution of a set of reaction rate equations, in Numerical
Analysis: An Introduction, J. Walsh, ed., Academic Press, 1966, pp. 178-182.
Mathematica model definition file for model robertson.
Generated by pysb.export.mathematica.MathematicaExporter.
Run with (for example):
tmax = 10
soln = NDSolve[Join[odes, initconds], slist, {t, 0, tmax}]
Plot[s0[t] /. soln, {t, 0, tmax}, PlotRange -> All]
*)
(* Parameters *)
k1 = 0.040000000000000001;
k2 = 30000000;
k3 = 10000;
A0 = 1;
B0 = 0;
C0 = 0;
(* List of Species *)
(* s0[t] = A() *)
(* s1[t] = B() *)
(* s2[t] = C() *)
(* ODEs *)
odes = {
s0'[t] == -k1*s0[t] + k3*s1[t]*s2[t],
s1'[t] == k1*s0[t] - k2*s1[t]^2 - k3*s1[t]*s2[t],
s2'[t] == k2*s1[t]^2
}
(* Initial Conditions *)
initconds = {
s0[0] == A0,
s1[0] == B0,
s2[0] == C0
}
(* List of Variables (e.g., as an argument to NDSolve) *)
solvelist = {
s0[t],
s1[t],
s2[t]
}
(* Run the simulation -- example *)
tmax = 100
soln = NDSolve[Join[odes, initconds], solvelist, {t, 0, tmax}]
(* Observables *)
Atotal = (s0[t] * 1) /. soln
Btotal = (s1[t] * 1) /. soln
Ctotal = (s2[t] * 1) /. soln
The output consists of a block of commands that define the ODEs, parameters,
species and other variables for the model, along with a set of descriptive
comments. The sections are as follows:
* The header comments identify the model and show an example of how to
integrate the ODEs in Mathematica.
* The parameters block defines the numerical values of the named parameters.
* The list of species gives the mapping between the indexed species (``s0``,
``s1``, ``s2``) and their representation in PySB (``A()``, ``B()``, ``C()``).
* The ODEs block defines the set of ordinary differential equations and assigns
the set of equations to the variable ``odes``.
* The initial conditions block defines the initial values for each species and
assigns the set of conditions to the variable ``initconds``.
* The "list of variables" block enumerates all of the species in the model
(``s0[t]``, ``s1[t]``, ``s2[t]``) and assigns them to the variable
``solvelist``; this list can be passed to the Mathematica command ``NDSolve``
to indicate the variables to be solved for.
* This is followed by an example of how to call ``NDSolve`` to integrate the
equations.
* Finally, the observables block enumerates the observables in the model,
expressing each one as a linear combination of the appropriate species in
the model. The interpolating functions returned by ``NDSolve`` are substituted
in from the solution variable ``soln``, allowing the observables to be
plotted.
Note that Mathematica does not permit underscores in variable names, so
any underscores used in PySB variables will be removed (e.g., ``A_total`` will
be converted to ``Atotal``).
"""
import pysb
import pysb.bng
import sympy
import re
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from pysb.export import Exporter, ExpressionsNotSupported, \
CompartmentsNotSupported
class MathematicaExporter(Exporter):
"""A class for returning the ODEs for a given PySB model for use in
Mathematica.
Inherits from :py:class:`pysb.export.Exporter`, which implements
basic functionality for all exporters.
"""
def export(self):
"""Generate the corresponding Mathematica ODEs for the PySB model
associated with the exporter.
Returns
-------
string
String containing the Mathematica code for the model's ODEs.
"""
if self.model.expressions:
raise ExpressionsNotSupported()
if self.model.compartments:
raise CompartmentsNotSupported()
output = StringIO()
pysb.bng.generate_equations(self.model)
# Add docstring if there is one
if self.docstring:
output.write('(*\n' + self.docstring + '\n')
else:
output.write("(*\n")
# Header comment
output.write("Mathematica model definition file for ")
output.write("model " + self.model.name + ".\n")
output.write("Generated by " \
"pysb.export.mathematica.MathematicaExporter.\n")
output.write("\n")
output.write("Run with (for example):\n")
output.write("tmax = 10\n")
output.write("soln = NDSolve[Join[odes, initconds], slist, " \
"{t, 0, tmax}]\n")
output.write("Plot[s0[t] /. soln, {t, 0, tmax}, PlotRange -> All]\n")
output.write("*)\n\n")
# PARAMETERS
# Note that in Mathematica, underscores are not allowed in variable
# names, so we simply strip them out here
params_str = ''
for i, p in enumerate(self.model.parameters):
# Remove underscores
pname = p.name.replace('_', '')
# Convert parameter values to scientific notation
# If the parameter is 0, don't take the log!
if p.value == 0:
params_str += '%s = %g;\n' % (pname, p.value)
# Otherwise, take the log (base 10) and format accordingly
else:
val_str = '%.17g' % p.value
if 'e' in val_str:
(mantissa, exponent) = val_str.split('e')
params_str += '%s = %s * 10^%s;\n' % \
(pname, mantissa, exponent)
else:
params_str += '%s = %s;\n' % (pname, val_str)
## ODEs ###
odes_str = 'odes = {\n'
# Concatenate the equations
odes_str += ',\n'.join(['s%d == %s' %
(i, sympy.ccode(self.model.odes[i]))
for i in range(len(self.model.odes))])
# Replace, e.g., s0 with s[0]
odes_str = re.sub(r's(\d+)', lambda m: 's%s[t]' % (int(m.group(1))),
odes_str)
# Add the derivative symbol ' to the left hand sides
odes_str = re.sub(r's(\d+)\[t\] ==', r"s\1'[t] ==", odes_str)
# Correct the exponentiation syntax
odes_str = re.sub(r'pow\(([^,]+), ([^)]+)\)', r'\1^\2', odes_str)
odes_str += '\n}'
#c_code = odes_str
# Eliminate underscores from parameter names in equations
for i, p in enumerate(self.model.parameters):
odes_str = re.sub(r'\b(%s)\b' % p.name, p.name.replace('_', ''),
odes_str)
## INITIAL CONDITIONS
ic_values = ['0'] * len(self.model.odes)
for i, ic in enumerate(self.model.initials):
idx = self.model.get_species_index(ic.pattern)
ic_values[idx] = ic.value.name.replace('_', '')
init_conds_str = 'initconds = {\n'
init_conds_str += ',\n'.join(['s%s[0] == %s' % (i, val)
for i, val in enumerate(ic_values)])
init_conds_str += '\n}'
## SOLVE LIST
solvelist_str = 'solvelist = {\n'
solvelist_str += ',\n'.join(['s%s[t]' % (i)
for i in range(0, len(self.model.odes))])
solvelist_str += '\n}'
## OBSERVABLES
observables_str = ''
for obs in self.model.observables:
# Remove underscores
observables_str += obs.name.replace('_', '') + ' = '
#groups = self.model.observable_groups[obs_name]
observables_str += ' + '.join(['(s%s[t] * %d)' % (s, c)
for s, c in zip(obs.species, obs.coefficients)])
observables_str += ' /. soln\n'
# Add comments identifying the species
species_str = '\n'.join(['(* s%d[t] = %s *)' % (i, s) for i, s in
enumerate(self.model.species)])
output.write('(* Parameters *)\n')
output.write(params_str + "\n")
output.write('(* List of Species *)\n')
output.write(species_str + "\n\n")
output.write('(* ODEs *)\n')
output.write(odes_str + "\n\n")
output.write('(* Initial Conditions *)\n')
output.write(init_conds_str + "\n\n")
output.write('(* List of Variables (e.g., as an argument to NDSolve) ' \
'*)\n')
output.write(solvelist_str + '\n\n')
output.write('(* Run the simulation -- example *)\n')
output.write('tmax = 100\n')
output.write('soln = NDSolve[Join[odes, initconds], ' \
'solvelist, {t, 0, tmax}]\n\n')
output.write('(* Observables *)\n')
output.write(observables_str + '\n')
return output.getvalue()
|
from django.db import models
from django.core.mail import send_mail
from django.contrib.auth.models import User
help_text="Please use the following format: <em>YYYY-MM-DD</em>"
class Address( models.Model ):
first_line = models.CharField( max_length=64 )
second_line = models.CharField( max_length=64, blank=True )
city = models.CharField( max_length=32, blank=True )
postcode = models.CharField( max_length=8, blank=True )
creator = models.ForeignKey( User )
def __unicode__( self ):
return self.first_line + ' ' + self.second_line + ' (' + self.postcode + ')'
class Car( models.Model ):
registration_plate = models.CharField( max_length=32, blank=True )
make = models.CharField( max_length=32 )
model = models.CharField( max_length=32 )
colour = models.CharField( max_length=32 )
desc = models.CharField( max_length=512, blank=True )
creator = models.ForeignKey( User )
def __unicode__( self ):
return self.make + ', ' + self.model + ' (' + self.registration_plate + ')'
class Driver( models.Model ):
first_name = models.CharField( max_length=32 )
last_name = models.CharField( max_length=32 )
phone_number = models.IntegerField()
car = models.ForeignKey( Car )
creator = models.ForeignKey( User )
def __unicode__( self ):
return self.first_name + ' ' + self.last_name
class Customer( models.Model ):
first_name = models.CharField( max_length=32, blank=True )
last_name = models.CharField( max_length=32, blank=True )
phone_number = models.IntegerField()
creator = models.ForeignKey( User )
def __unicode__( self ):
return self.first_name + ' ' + self.last_name
class Booking( models.Model ):
pickup_address = models.ForeignKey( Address, related_name='booking_pickup' )
drop_off_address = models.ForeignKey( Address, related_name='booking_drop_off' )
customer = models.ForeignKey( Customer )
order_received = models.DateTimeField( editable=False, auto_now_add=True )
order_modified_time = models.DateTimeField( editable=False, auto_now=True )
collection_time = models.DateTimeField( blank=True, help_text=help_text )
no_passengers = models.IntegerField( blank=True )
baggage_or_instructions = models.CharField( max_length=512, blank=True )
driver = models.ForeignKey( Driver )
confirmationSent = models.BooleanField( editable=False )
price = models.DecimalField( blank=True, decimal_places=2, max_digits=8 )
creator = models.ForeignKey( User ) # or should this be Profile??????????????????????????????????????????????
def __unicode__( self ):
return self.pickup_address.first_line + ' to ' + self.drop_off_address.first_line
def save( self ):
super( Booking, self ).save()
msg_body = '' % ()
result = send_mail( 'Booking made', self,
'digibookingswebfaction@noelevans.co.uk',
[ 'noelevans@gmail.com' ] )
if result:
pass # do confirmationSent = True
|
# You're going to hate me for this
# Import all required packages
import os
import discord
from discord.ext import commands
from dotenv import load_dotenv
# Load the vars as system environment variables
load_dotenv()
# Define the client
client = commands.Bot(command_prefix='&_')
# This tells d.py that this function is an event
@client.event
async def on_ready():
print('Hi!')
# tells d.py that this is a command
@client.command()
# define command name
async def userinfo(ctx):
# tells bot what to send
await ctx.send(f"Username: {ctx.author.name}#{ctx.author.discriminator}")
# turn off the geckos too loud
@client.command()
async def pissbaby(ctx):
await ctx.send('hey you little piss baby, you think youre so fucking cool? huh? you think youre so fucking tough? you talk a lotta big game for someone with such a small truck')
client.run(os.getenv('TOKEN'))
# if there's a token here and youre reading it tell me so i can fix that
|
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from action_logging import Logger
class Response:
def __init__(self, data = None, logger = None):
"""
:param data:
:param logger:
"""
if logger is None:
self.logger = Logger(log_flag = True, log_file = "user", log_path = "../logs/")
else:
self.logger = logger
self.data = data # data has 'Timestamp', 'User', 'Message', 'Date', 'Weekday' columns
self.grouped_data = None
self.pd_date_wise_response = None
def group_the_data(self):
"""
:return:
"""
self.logger.write_logger('In response.py (group_the_data): Grouping the data starts')
self.grouped_data = self.data.groupby(['User', 'Timestamp'])['Message'].count().reset_index().sort_values(
['Timestamp'])
self.grouped_data['Date'] = self.grouped_data['Timestamp'].dt.strftime('%d-%b-%Y')
self.logger.write_logger('In response.py (group_the_data): Grouping the data ends')
return self
def create_response_time(self):
"""
:return:
"""
self.logger.write_logger('In response.py (create_response_time): Creating Response time starts')
self.grouped_data['Response (Min)'] = self.grouped_data['Timestamp'].diff().astype('timedelta64[m]').fillna(0)
self.logger.write_logger('In response.py (create_response_time): Creating Response time ends')
return self
def date_wise_response(self):
"""
:return:
"""
self.logger.write_logger('In response.py (date_wise_response): Creating Date wise Response starts')
grp1 = self.grouped_data.groupby('Date')['Response (Min)'].mean().reset_index().sort_values('Response (Min)')
grp2 = self.grouped_data.groupby('Date')['Message'].count().reset_index()
grp_merged = grp1.merge(grp2, on = 'Date', how = 'left')
self.pd_date_wise_response = grp_merged
self.logger.write_logger('In response.py (date_wise_response): Creating Date wise Response ends')
return self
@staticmethod
def bayesian_adjusted_rating(v, r):
_tmp_df = pd.DataFrame({"V": v, "R": r})
_tmp_df['VxR'] = _tmp_df['V'] * _tmp_df['R']
_tmp_df['BayesianRating'] = (_tmp_df['VxR'] + sum(_tmp_df['VxR'])) / (_tmp_df['V'] + sum(_tmp_df['V']))
return _tmp_df['BayesianRating'].tolist()
def score_sort_responses(self):
"""
:return:
"""
self.logger.write_logger('In response.py (score_sort_responses): Score and Sort the response starts')
scaler = MinMaxScaler()
self.pd_date_wise_response['Final Score'] = self.bayesian_adjusted_rating(
1 - scaler.fit_transform(self.pd_date_wise_response[['Response (Min)']])[:, 0],
self.pd_date_wise_response['Message'])
self.pd_date_wise_response = self.pd_date_wise_response.sort_values('Final Score', ascending = False)
self.logger.write_logger('In response.py (score_sort_responses): Score and Sort the response ends')
def get_the_longest_conversation_date(self):
"""
:return:
"""
self.logger.write_logger(
'In response.py (get_the_longest_conversation_date): Getting the longest conversation date starts')
self.group_the_data(). \
create_response_time(). \
date_wise_response(). \
score_sort_responses()
self.logger.write_logger(
'In response.py (get_the_longest_conversation_date): Getting the longest conversation date ends')
return self.pd_date_wise_response['Date'].to_list()[0]
|
# Made by Christian Oliveros on 09/10/2017 for MMKF15
# Imports Used
try:
from .instruction import Instruction, InterpretedInstruction
except SystemError as e:
from instruction import Instruction, InterpretedInstruction
try:
from .vector import Vector3, interpolatePoints
except SystemError as e:
from vector import Vector3, interpolatePoints
try:
from .constants import START_VELOCITY, START_POSITION, LSQR, VL, POS_X_1, POS_X_2, POS_X_3, POS_Y_1, POS_Y_2, POS_Y_3
except SystemError as e:
from constants import START_VELOCITY, START_POSITION, LSQR, VL, POS_X_1, POS_X_2, POS_X_3, POS_Y_1, POS_Y_2, POS_Y_3
"""Generates the cartesian trajectory from a file reader, a start velocity and a start position"""
def generateCartesianPathTrajectory(fileReader, start_vel=START_VELOCITY, start_pos=START_POSITION):
traj = []
current_vel = start_vel
current_pos = start_pos
pre_pos = None
started = False
for instruction in fileReader:
if instruction.V is not None:
current_vel = instruction.V
traj.clear()
# Amount of points in interpolation, used to know if we moved
count = 0
for pos in interpolatePoints(current_pos, instruction.pos):
# Lets not put the original starting position of the arm
traj.append(InterpretedInstruction(pos, (pos - current_pos).normalized() * current_vel))
pre_pos = current_pos
current_pos = pos
count += 1
if not started:
started = True
del traj[0]
# If we did not move from the origin
if len(traj) == 0:
started = False
continue
# We actually moved
if count > 1 :
#traj[len(traj) - 1].vel = Vector3()
pass
# We didn't move
else:
size = len(traj)
if size > 0:
del traj[size - 1]
current_pos = pre_pos
if size - 1 == 0:
continue
for ans in traj:
yield ans
"""Calculate one joint position"""
def _iKinePos(X, Y, Z):
return -(LSQR - (X)**2 - (Y)**2).sqrt() - Z + VL
"""Calculate one joint velocity"""
def _iKineVel(X, Y, Z, vel, q):
return - ((X * vel.x + Y * vel.y) / (Z - VL + q)) - vel.z
"""Receives a cartesian instruction and converts it to an joint one"""
def iKine(cartesian_instruction):
sub_x_1 = cartesian_instruction.pos.x - POS_X_1
sub_y_1 = cartesian_instruction.pos.y - POS_Y_1
sub_x_2 = cartesian_instruction.pos.x - POS_X_2
sub_y_2 = cartesian_instruction.pos.y - POS_Y_2
sub_x_3 = cartesian_instruction.pos.x - POS_X_3
sub_y_3 = cartesian_instruction.pos.y - POS_Y_3
q = Vector3()
q.x = _iKinePos(sub_x_1, sub_y_1, cartesian_instruction.pos.z)
q.y = _iKinePos(sub_x_2, sub_y_2, cartesian_instruction.pos.z)
q.z = _iKinePos(sub_x_3, sub_y_3, cartesian_instruction.pos.z)
q_vel = Vector3()
q_vel.x = _iKineVel(sub_x_1, sub_y_1, cartesian_instruction.pos.z, cartesian_instruction.vel, q.x)
q_vel.y = _iKineVel(sub_x_2, sub_y_2, cartesian_instruction.pos.z, cartesian_instruction.vel, q.y)
q_vel.z = _iKineVel(sub_x_3, sub_y_3, cartesian_instruction.pos.z, cartesian_instruction.vel, q.z)
return InterpretedInstruction(q, q_vel)
"""Generates the joint trajectory from a file reader, a start velocity and a start position"""
def generateJointPathTrajectory(fileReader, start_vel=START_VELOCITY, start_pos=START_POSITION):
for instruction in generateCartesianPathTrajectory(fileReader, start_vel, start_pos):
yield iKine(instruction)
# Set constant joint start position
def _setConstants():
try:
from .constants import __name__ as constants_module_name
except SystemError as e:
from constants import __name__ as constants_module_name
sub_x_1 = START_POSITION.x - POS_X_1
sub_y_1 = START_POSITION.y - POS_Y_1
sub_x_2 = START_POSITION.x - POS_X_2
sub_y_2 = START_POSITION.y - POS_Y_2
sub_x_3 = START_POSITION.x - POS_X_3
sub_y_3 = START_POSITION.y - POS_Y_3
q = Vector3()
q.x = _iKinePos(sub_x_1, sub_y_1, START_POSITION.z)
q.y = _iKinePos(sub_x_2, sub_y_2, START_POSITION.z)
q.z = _iKinePos(sub_x_3, sub_y_3, START_POSITION.z)
import sys
module = sys.modules[constants_module_name]
setattr(module, 'START_JOINT_POSITION', q)
_setConstants()
if __name__ == '__main__':
print("START_JOINT_POSITION Test")
try:
from .constants import START_JOINT_POSITION
except SystemError as e:
from constants import START_JOINT_POSITION
print("START_JOINT_POSITION=%s" % START_JOINT_POSITION)
import os
print("Test of Path Trajectory")
print("Generation Start")
import fileReader as reader
f = reader.FileReader(os.path.relpath(os.path.join("Test", "correct_test.txt"), start=os.curdir))
traj = [x for x in generateCartesianPathTrajectory(f)]
print("Generation Done")
print("len=%s" % len(traj))
print("pos0=%s" % traj[0])
print("pos%s=%s" % (len(traj) - 1,traj[len(traj) - 1]))
print("pos%s=%s" % (len(traj) - 2,traj[len(traj) - 2]))
print("pos%s=%s" % (len(traj) - 3,traj[len(traj) - 3]))
print("iKine Test")
print(iKine(InterpretedInstruction(Vector3(10, 20, 30), Vector3(10, 20, 30).normalized())))
print("Joint Trajectory")
f = reader.FileReader(os.path.relpath(os.path.join("Test", "correct_test.txt"), start=os.curdir))
print("Generation Start")
jtraj = [x for x in generateJointPathTrajectory(f)]
print("Generation Done")
print("len=%s" % len(jtraj))
print("pos0=%s" % jtraj[0])
print("pos%s=%s" % (len(jtraj) - 1,jtraj[len(jtraj) - 1]))
print("pos%s=%s" % (len(jtraj) - 2,jtraj[len(jtraj) - 2]))
print("pos%s=%s" % (len(jtraj) - 3,jtraj[len(jtraj) - 3]))
|
#
# Copyright (c) 2018-2020 by Kristoffer Paulsson <kristoffer.paulsson@talenten.se>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
"""Utilities for working with packaging."""
import os
import re
INDEX_REGEX = """(?:########## ([A-Z0-9_]*)(?<!_END) ##########)"""
CLIP_REGEX = """(?s)(?<=########## {0} ##########).*?(?=########## {0}_END ##########)"""
class ScriptIndexer:
"""Utility that scans after scriptlets in a directory of scripts and index them."""
def __init__(self):
self.__regex = INDEX_REGEX
self.__index = dict()
def walk(self, path: str) -> int:
"""Walk all files and directories at given path."""
hits = 0
for root, _, files in os.walk(path):
for file in files:
filepath = os.path.join(root, file)
with open(filepath) as script:
for hit in re.finditer(self.__regex, script.read()):
ingredient = hit.group(1)
if ingredient in self.__index.keys():
raise ValueError("Duplicate script! %s" % ingredient)
self.__index[ingredient] = filepath
hits += 1
return hits
@property
def index(self) -> dict:
"""Access to the index dictionary."""
return self.__index
class ScriptScissor:
"""Utility to clip and stitch scripts by recipe."""
def __init__(self, index: dict):
self.__clip_template = CLIP_REGEX
self.__index = index
def clip(self, ingredient: str) -> str:
"""Copy snippet from script."""
if ingredient not in self.__index.keys():
raise ValueError("The following snippet not in index! %s" % ingredient)
with open(self.__index[ingredient]) as script:
match = re.search(self.__clip_template.format(ingredient), script.read())
if match:
return match.group(0)
else:
raise ValueError("Snippet not found in script! %s" % ingredient)
def stitch(self, recipe: list) -> str:
"""Stitch a new script based on recipe."""
script = ""
for ingredient in recipe:
script += self.clip(ingredient)
return script
|
# -*- coding: utf-8 -*-
'''
Project: Product Aesthetic Design: A Machine Learning Augmentation
Authors: Alex Burnap, Yale University
Email: alex.burnap@yale.edu
License: MIT License
OSS Code Attribution (see Licensing Inheritance):
Portions of Code From or Modified from Open Source Projects:
https://github.com/tkarras/progressive_growing_of_gans
https://github.com/AaltoVision/pioneer
https://github.com/DmitryUlyanov/AGE
https://github.com/akanimax/attn_gan_pytorch/
'''
from __future__ import print_function
from contextlib import ExitStack
import os
import sys
from comet_ml import Experiment as CometExperiment
import numpy as np
import pandas as pd
import torch
import torch.nn.parallel
import torch.optim
import torch.utils.data
from numpy.random import RandomState
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVR
from models import baselines_conventional
from training.train_pretrained_model import train_pretrained_model, evaluate_pretrained_model
from utils import logging_utils
from data.vehicles import Vehicles
from data.chairs import Chairs
from training.train import train_combined_model
from training.session import CombinedTrainSession, PretrainedSession
#----------------------------------------------------------------------------------------------------------------------
# Globals
#----------------------------------------------------------------------------------------------------------------------
torch.backends.cudnn.benchmark = True
# if torch.cuda.is_available():
# torch.backends.cudnn.deterministic = True
# ----------------------------------------------------------------------------
class Experiment(object):
'''
Experiment object that holds experiment data and runs several classes of
baseline, pretrained deep learning, and custom model deep learning.
Allows introspection and saving state to disk for reproducibility.
'''
def __init__(
self,
c=None, # Configuration params
):
assert c is not None
self.c = c # Main configuration file. Required for experiment.
# Globals
np.random.seed(self.c.random_seed)
os.environ.update(self.c.env)
# ------- Main run code --------------------------------------------------
def run_experiment(self, skip_initialization=False):
'''
Main experiment run code.
'''
try:
print("\nBeginning experiment on: " + torch.cuda.get_device_name(0))
print("Using ", torch.cuda.device_count(), " GPUs\n")
print('PyTorch {}'.format(torch.__version__))
except RuntimeError:
raise RuntimeError('Out of memory on GPU.')
if not skip_initialization:
print("Initializing Experiment...")
print("Loading Data...")
self.init_experiment_data()
self.print_vehicle_data_details()
print("Initializing Models...")
self.init_experiment()
if self.c.save_output_log:
if self.c.experiment_type == 'baseline_conventional_ML_and_CV':
self.c.use_cometML = False
self.init_logging()
if self.c.experiment_type == 'combined_model_train':
train_combined_model(self.session.generator,
self.session.encoder,
self.session.predictor,
self.dataset,
session=self.session,
total_steps=self.c.total_kimg * 1000)
elif self.c.experiment_type == 'pretrain_only':
with self.session.comet_experiment.train() if self.session.c.use_cometML else ExitStack():
for epoch in range(0, self.c.epochs_pretrained):
if self.c.adjust_learning_rate_during_training:
self.adjust_learning_rate(self.session.optimizer, epoch)
train_pretrained_model(self.dataset,
self.session,
epoch)
evaluate_pretrained_model(self.dataset,
self.session,
epoch,
data_split='valid')
evaluate_pretrained_model(self.dataset,
self.session,
epoch,
data_split='test')
elif self.c.experiment_type == 'baseline_conventional_ML_and_CV':
print("Beginning training of baseline model")
self.model.fit(self.dataset.train_x_features, self.dataset.train_y)
print("Finished training of baseline model")
test_y_hat = self.model.predict(self.dataset.test_x_features)
mae = np.mean(np.abs(test_y_hat-self.dataset.test_y))
print("Test Accuracy: {}".format(mae))
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
return True
#-----------------------------------------------------------------------------------
# Experiment Initialization
#-----------------------------------------------------------------------------------
def init_experiment(self):
'''
Experimental set up of all experiment attributes depending on experiment type.
'''
# Begin Model Estimation and Evaluation
if self.c.experiment_type == 'combined_model_train':
self.session = CombinedTrainSession(c=self.c)
self.session.setup()
elif self.c.experiment_type == 'pretrain_only':
# Pretrained Deep Learning Model Session
self.session = PretrainedSession(c=self.c)
self.session.setup()
elif self.c.experiment_type == 'baseline_conventional_ML_and_CV':
# TODO 10/27/20: remove other baseline classifiers. Just stick with random_forest since it did best.
if self.c.conventional_baseline_model == 'svr':
self.model = LinearSVR(
dual=True,
loss='l1',
fit_intercept=True,
C=1e-2,
verbose=True,
random_state=self.c.random_seed,
max_iter=1e4)
elif self.c.conventional_baseline_model == 'random_forest':
self.model = RandomForestRegressor(
criterion='mae',
n_estimators=self.c.num_random_forest_trees,
n_jobs=self.c.num_baseline_cpu_jobs,
random_state=self.c.random_seed,
verbose=3)
else:
raise ValueError("Baseline model not found")
else:
raise ValueError('Experiment type not found.')
#-----------------------------------------------------------------------------------
# Data Initialization
#-----------------------------------------------------------------------------------
def init_experiment_data(self):
'''
Sets up all data depending on experiment type.
'''
# Obtain Raw Rating Data
ratings_df_full = pd.read_csv(self.c.ratings_dataset_path, header=0)
ratings_df_full = ratings_df_full[["real_value", "design_id", "name"]]
ratings_df_full['design_id'] = ratings_df_full['design_id'].astype(int)
ratings_df_full.index = ratings_df_full.design_id
# Vehicle Detail Lists
if self.c.dataset == 'vehicles':
self.products_list = np.loadtxt(self.c.suvs_list_path, delimiter=',', dtype=str)
# Get Design ID to Image ID labels
design_labels = np.load(self.c.labels_dir)
design_ids_of_images = design_labels['arr_5']
self.design_ids_of_images = design_ids_of_images.flatten()
elif self.c.dataset == 'chairs':
ratings_df_full['name'] = ratings_df_full['name'].astype(int)
# Train/Valid/Test Split
if self.c.train_test_split_unique_models and self.c.dataset=="vehicles":
ratings_df_full["model"] = ratings_df_full["name"].apply(lambda x: " ".join(x.split(" ")[2:]))
unique_models = ratings_df_full["model"].unique()
# Stratified Splitting
if self.c.train_test_split_unique_models_and_average_ratings:
for ind, unique_model in enumerate(unique_models):
# Average rating for unique models
matched_designs = ratings_df_full[ratings_df_full["model"] == unique_model]
ratings_df_full.loc[matched_designs['design_id'], ['real_value']] = matched_designs.real_value.mean()
train_inds, valid_and_test_inds = train_test_split(np.arange(len(unique_models)),
test_size=self.c.train_valid_test_ratio,
random_state=self.c.random_seed)
else:
ratings_df_full["model"] = ratings_df_full["name"]
unique_models = ratings_df_full["model"].unique()
train_inds, valid_and_test_inds = train_test_split(np.arange(ratings_df_full.shape[0]),
test_size=self.c.train_valid_test_ratio,
random_state=self.c.random_seed)
half_length = int(valid_and_test_inds.shape[0] / 2)
valid_inds, test_inds = valid_and_test_inds[:half_length], valid_and_test_inds[half_length:]
self.ratings_df_train = ratings_df_full[ratings_df_full['model'].isin(unique_models[train_inds])]
self.ratings_df_valid = ratings_df_full[ratings_df_full['model'].isin(unique_models[valid_inds])]
self.ratings_df_test = ratings_df_full[ratings_df_full['model'].isin(unique_models[test_inds])]
self.train_x, self.train_y = self.ratings_df_train['design_id'].values, self.ratings_df_train['real_value'].values
self.valid_x, self.valid_y = self.ratings_df_valid['design_id'].values, self.ratings_df_valid['real_value'].values
self.test_x, self.test_y = self.ratings_df_test['design_id'].values, self.ratings_df_test['real_value'].values
if self.c.percentage_of_training_data != 1.0:
print("Artificially reducing training data to {}% of full training set.".format(int(self.c.percentage_of_training_data*100)))
new_num_products = int(self.c.percentage_of_training_data*self.train_x.shape[0])
training_data_mask = np.random.choice(np.arange(self.train_x.shape[0]), new_num_products, replace=False)
self.train_x = self.train_x[training_data_mask]
self.train_y = self.train_y[training_data_mask]
if self.c.create_duplicate_ratings_for_viewpoints:
self.train_x, self.train_y = np.repeat(
self.train_x, repeats=self.c.number_viewpoints_per_product), np.repeat(
self.train_y, repeats=self.c.number_viewpoints_per_product)
self.valid_x, self.valid_y = np.repeat(
self.valid_x, repeats=self.c.number_viewpoints_per_product), np.repeat(
self.valid_y, repeats=self.c.number_viewpoints_per_product)
self.test_x, self.test_y = np.repeat(
self.test_x, repeats=self.c.number_viewpoints_per_product), np.repeat(
self.test_y, repeats=self.c.number_viewpoints_per_product)
print("Using {} Train, {} Validation, {} Test Data".format(
self.train_x.shape[0], self.valid_x.shape[0], self.test_x.shape[0]))
print("Shuffling Data...")
self.shuffle_experiment_data(seed=self.c.random_seed)
# Setup data attributes particular to the experiment
if self.c.experiment_type == 'combined_model_train' or self.c.experiment_type == 'pretrain_only' or self.c.experiment_type == 'baseline_conventional_ML_and_CV':
if self.c.dataset == 'vehicles':
self.dataset = Vehicles(use_RAM=self.c.use_ram_for_image_load,
train_x=self.train_x,
train_y=self.train_y,
valid_x=self.valid_x,
valid_y=self.valid_y,
test_x=self.test_x,
test_y=self.test_y,
c=self.c)
elif self.c.dataset == 'chairs':
self.dataset = Chairs(use_RAM=self.c.use_ram_for_image_load,
train_x=self.train_x,
train_y=self.train_y,
valid_x=self.valid_x,
valid_y=self.valid_y,
test_x=self.test_x,
test_y=self.test_y,
c=self.c)
if self.c.experiment_type == 'baseline_conventional_ML_and_CV':
print("Loading Images")
key = self.dataset._base_key + '{}'.format(self.c.image_size)
train_image_ids = self.dataset.get_random_image_ids_given_design_ids(self.dataset.train_x)
train_images = np.array([self.dataset.dataset[key][i[0]][i[1]] / 127.5 - 1 for i in train_image_ids], dtype=np.float32)
test_image_ids = self.dataset.get_side_image_id_given_design_ids(self.dataset.test_x)
test_images = np.array([self.dataset.dataset[key][i[0]][i[1]] / 127.5 -1 for i in test_image_ids], dtype=np.float32)
print("Beginning Feature Extraction")
train_x_features = baselines_conventional.extract_features_array(train_images)
test_x_features = baselines_conventional.extract_features_array(test_images)
self.dataset.train_x_features = train_x_features
self.dataset.test_x_features = test_x_features
assert self.dataset.train_x_features.shape[1] == self.dataset.test_x_features.shape[1]
print("Using {} Train and {} Validation Data with feature size of {}".
format(self.dataset.train_x_features.shape[0], self.dataset.test_x_features.shape[0],
self.dataset.train_x_features.shape[1]))
else:
pass
# raise ValueError('Experiment type not found.')
#-----------------------------------------------------------------------------------
# Helper Functions (logging, shuffling, etc.)
#-----------------------------------------------------------------------------------
def init_logging(self):
if self.c.experiment_type == 'combined_model_train':
if not hasattr(self, 'session'):
raise ValueError('Could not find Training Session')
checkpoint_str = 'ContCheckpoint' if self.c.load_checkpoint else 'NewRun'
self.c.experiment_description = '{}_{}_{}_iter{}_seed{}_{}fracdata'.format(
self.c.experiment_type,
self.c.attribute,
checkpoint_str,
self.session.sample_i,
self.c.random_seed,
self.c.percentage_of_training_data)
elif self.c.experiment_type == 'pretrain_only':
self.c.experiment_description = '{}_{}_e{}_seed{}_{}fracdata'.format(
self.c.experiment_type,
self.c.attribute,
self.c.epochs_pretrained,
self.c.random_seed,
self.c.percentage_of_training_data)
elif self.c.experiment_type == 'baseline_conventional_ML_and_CV':
self.c.experiment_description = '{}_{}_{}_seed{}'.format(
self.c.experiment_type,
self.c.attribute,
self.c.conventional_baseline_model,
self.c.random_seed)
print("Creating result directory and logging for reproducibility")
self.c.result_subdir = logging_utils.create_result_subdir(self.c.result_dir, self.c.experiment_description)
self.c.summary_dir = self.c.result_subdir + "/summary"
self.c.save_dir = self.c.result_subdir
logging_utils.make_dirs(self.c)
logging_utils.save_config(self.c)
logging_utils.set_output_log_file(os.path.join(self.c.result_subdir, 'experiment_log.txt'))
logging_utils.init_output_logging()
f = open('{}/config.txt'.format(self.c.save_dir), 'w')
for key, val in self.c.items():
f.write("{}={}\n".format(key, val))
f.close()
# Setup Online Logging
# TODO: 10/27/20 setup comet ml logging for existing experiment - low priority
if self.c.use_cometML:
self.session.comet_experiment = CometExperiment(api_key=self.c.comet_api_key,
project_name=self.c.comet_project_name,
workspace=self.c.comet_workspace,
log_graph=True)
exp_name = '_'.join(self.c.save_dir.split('/')[-2:])
self.session.comet_experiment.set_name(exp_name)
self.session.comet_experiment.set_filename(exp_name)
self.session.comet_experiment.log_parameters(self.c)
def shuffle_data_helper(self, seed=0, *arrays):
""" Shuffles an arbirary number of data arrays by row in consistent manner"""
for array in arrays:
prng = RandomState(seed)
prng.shuffle(array)
def shuffle_experiment_data(self, seed=0):
self.shuffle_data_helper(seed, self.train_x, self.train_y,
self.valid_x, self.valid_y,
self.test_x, self.test_y)
def print_vehicle_data_details(self,
print_train_set=False,
print_valid_set=True,
print_test_set=True):
self.print_sanity_check_values()
if self.c.train_test_split_unique_models == True:
if print_train_set:
train_set_details = self.ratings_df_train['name'].values
print("\nTrain Set: {}\n".format(train_set_details))
if print_valid_set:
valid_set_details = self.ratings_df_valid['name'].values
print("\nValidation Set: {}\n".format(valid_set_details))
if print_test_set:
test_set_details = self.ratings_df_test['name'].values
print("\nTest Set: {}\n".format(test_set_details))
else:
if print_train_set:
train_set_details = np.array([
' '.join(elm)
for elm in self.products_list[self.train_x_design_iloc][:, :3]])
print("\nTrain Set: {}\n".format(train_set_details))
if print_valid_set:
valid_set_details = np.array([
' '.join(elm)
for elm in self.products_list[self.test_x_design_iloc][:, :3]])
print("\nValidation Set: {}\n".format(valid_set_details))
if print_test_set:
test_set_details = np.array([
' '.join(elm)
for elm in self.products_list[self.valid_x_design_iloc][:, :3]])
print("\nTest Set: {}\n".format(test_set_details))
def print_sanity_check_values(self, print_guess_three=False):
print("\nNaive Baselines:")
print('Guess training mean: {:.4f}'.format(self.dataset.training_mean))
print('Guess training median: {:.4f}'.format(self.dataset.training_median))
if print_guess_three:
print('Guess 3.0: {:.4f}\n'.format(self.dataset.training_mid))
def adjust_learning_rate(self, epoch):
"""Stepwise decrease in the learning rate by 10 every 10 epochs"""
new_lr = self.c.lr * (0.1 ** (epoch // 10))
for param_group_ind, _ in enumerate(self.optimizer.state_dict()['param_groups']):
self.optimizer.param_groups[param_group_ind]['lr'] = new_lr
if __name__ == "__main__":
#os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
#torch.backends.cudnn.benchmark = True
from config import c
np.random.seed(c.random_seed)
print('Beginning Experiment')
os.environ.update(c.env)
exp = Experiment(c)
exp.run_experiment()
print('Finished Experiment')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
from typing import List
import jinja2
from project_results import ProjectResults
def generate_index_html(template: jinja2.Template,
results: List[ProjectResults]):
print('Generating index page...')
projects_dict = {}
all_toolchains = set()
for project_results in results:
boards = {}
for board, toolchains in project_results.entries.items():
board_toolchains = []
for toolchain in toolchains.keys():
board_toolchains.append(toolchain)
all_toolchains.add(toolchain)
boards[board] = board_toolchains
projects_dict[project_results.project_name] = boards
projects_list = sorted(list(projects_dict.items()), key=lambda t: t[0])
toolchain_list = sorted(list(all_toolchains))
return template.render(projects=projects_list, toolchains=toolchain_list)
|
#!/bin/python3
import argparse
import luxafor
import time
def main():
parser = argparse.ArgumentParser(description='Change Luxafor colour')
parser.add_argument('color', choices=['green', 'yellow', 'red', 'blue', 'white', 'off'], help='color to change to')
args = parser.parse_args()
l = luxafor.LuxaFor()
## Normal working of script, set predefined color
l.predefined_color(args.color)
## Example 1 Police Animation:
# l.animate_police(10)
## Example 2 Set Custom Color:
# l.set_color('#A7226E')
# time.sleep(1)
# l.set_color('#EC2049')
# time.sleep(1)
# l.set_color('#F26B38')
# time.sleep(1)
# l.set_color('#F7DB4F')
# time.sleep(1)
# l.set_color('#2F9599')
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright CNRS 2012
# Roman Yurchak (LULI)
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software.
import sys
import os, os.path
import hashlib
import warnings
import numpy as np
import tables
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import yt.mods
from yt.frontends.flash.data_structures import FLASHStaticOutput
import flash.output
import hedp
from hedp.math.abel import abel
import hedp.opacity.henke
from hedp.diags.xray import xray_filter_transmission, Kalpha_profile,\
ff_profile,ip_sensitivity
from time import time
from numexpr import evaluate
def fslice(filename, fields, resolution=800, cache="/dev/shm", bounds=None, method='nearest'):
"""
Load and cache filelds from a FLASH output
Parameters:
----------
- filename [str]: path to filename
- fileds [list]: a list of fields we want to load
- resolution [int]: requested resolution
- cache [bool or str]: cache the output somewhere
- method: interpolation to use in scipy.interpolate.griddata
"""
# doing some homogenization
filename = os.path.abspath(filename)
fields = sorted(fields)
cache_miss = True
if cache:
m = hashlib.md5()
hash_in = "".join([filename,
"".join(fields),
str(resolution)])
m.update(hash_in)
hash_key = m.hexdigest()
cache_file = os.path.join(cache, hash_key+'.hdf')
if os.path.exists(cache_file):
cache_miss = False
if not cache or cache and cache_miss:
pf = yt.mods.load(filename)
#pf = yt.frontends.flash.data_structures.FLASHStaticOutput(filename)
#print filename
d = {}
def _get_a_field(field):
# fixing the stupid dot problem
ffield = field
if 'packmeshchkreadhdf5' not in pf:
ffield = '{:<4s}'.format(field)
R, Z, D = flash.output.slice(2, 0.0, ffield, pf, resolution=resolution, bounds=bounds, method=method)
return R, Z, D
D_unsorted = map(_get_a_field, fields)
for key, val in zip(fields, D_unsorted):
d[key] = val[2]
d['z'] = val[1]
d['r'] = val[0]
d['x'] = d['r']
d['y'] = d['z']
if cache:
if cache_miss:
f = tables.openFile(cache_file, 'w')
for key in d:
atom = tables.Atom.from_dtype(d[key].dtype)
ds = f.createCArray(f.root, key, atom, d[key].shape)
ds[:] = d[key]
f.root._v_attrs.t = pf.current_time
f.root._v_attrs.filename = filename
f.close()
d['t'] = pf.current_time
d['filename'] = filename
else:
f = tables.openFile(cache_file, 'r')
d = {}
for key in fields + ['r', 'z']:
d[key] = getattr(f.root, key)[:]
d['t'] = f.root._v_attrs.t
d['filename'] = f.root._v_attrs.filename
f.close()
else:
d['t'] = pf.current_time
d['filename'] = filename
return hedp.Storage(d)
|
class ORSolver(object):
"""Solver Class of the JobShop Problem"""
def solve_with_disunctive_model(self, config_path, init_path, sol_path, max_time):
"""Minimal jobshop problem."""
import collections
import time
import xml.etree.ElementTree as ET
import numpy as np
import struct
from XML_Reader import XML_Reader
from XML_Writer import XML_Writer
# Import Python wrapper for or-tools CP-SAT solver.
from ortools.sat.python import cp_model
print('Loading data files')
problem = XML_Reader(config_path, init_path)
production_time_matrix = problem.get_production_time_matrix()
worker_movement_time_matrix = problem.get_worker_movement_matrix()
automatic_worksteps_times_matrix = problem.get_automatic_times_matrix()
number_manual_worksteps = problem.get_number_worksteps()
releasetime_workers = problem.get_releasetime_workers()
leftover_job_products = problem.get_leftover_jobs_products()
worker_skill = problem.get_worker_skill()
products_type = problem.get_products_type()
products_initial = problem.get_products_initial()
workers_initial_station = problem.get_worker_initial_stat()
tic = time.perf_counter()
model = cp_model.CpModel()
stations_count = len(worker_movement_time_matrix[0])
print(f' #Stations: {stations_count}')
all_stations = range(stations_count)
products_count = len(products_type)
print(f' #Products: {products_count}')
all_products = range(products_count)
num_product_types = 4 # len(production_time_matrix)
print(f' #Product types: {num_product_types}')
horizon = 1000000
print(f' Horizon: {horizon}')
workers_count = len(worker_skill)
print(f' #Workers: {workers_count}')
all_workers = range(workers_count)
# Named tuple to store information about created variables.
worker_task_type = collections.namedtuple(
'worker_task_type', 'exists start end duration station product step')
task_type = collections.namedtuple(
'task_type', 'start end exists_vars worker_durations')
# Named tuple to manipulate solution information.
assigned_task_type = collections.namedtuple(
'assigned_task_type', 'start exists product station workstep duration')
# Creates job intervals, for two intervals assigned to the same worker,
# whether one occurs before the other(is_before), whether both intervals
# exist for the same worker (both_exist) and the start_time_diff (the
# difference in start times between two jobs if they both exist for this
# worker, otherwise we set this to 0) and add to the corresponding machine
# lists.
print('\nBuilding model')
all_tasks = {} # indexed by (product, station, workstep)
all_worker_tasks = collections.defaultdict(list) # indexed by workers
task_starts = []
print(f' Task variables: {time.perf_counter() - tic:0.4f}s')
for product in all_products:
for station in range(products_initial[0][product], stations_count):
for workstep in range(
number_manual_worksteps[products_type[product]][station]):
# Initial state: product is performed until products_initial[1][product]
# and the first step to do is products_initial[0][product]
if station == products_initial[0][product] and (
workstep < products_initial[1][product] - 1):
continue
suffix = '_%i_%i_%i' % (product, station, workstep)
# start time of each job
start_var = model.NewIntVar(0, horizon, 'start' + suffix)
# end time of each job
end_var = model.NewIntVar(0, horizon, 'end' + suffix)
# dictionary of all tasks
current_task = task_type(
start=start_var,
end=end_var,
exists_vars=[],
worker_durations=[])
all_tasks[product, station, workstep] = current_task
task_starts.append(start_var)
for worker in all_workers:
# for each worker we create an optional interval for each job
suffix_worker = '_%i_%i_%i_%i' % (worker, product, station, workstep)
# variable saying whether the given job is produced by the worker
exists_var = model.NewBoolVar('exists' + suffix_worker)
# duration for the jobs
duration_worker = int(
round(
float(production_time_matrix[products_type[product]][station]
[workstep]) * float(100) / float(worker_skill[worker])))
# dictionary of optional intervals
all_worker_tasks[worker].append(
worker_task_type(
exists=exists_var,
start=start_var,
end=end_var,
duration=duration_worker,
station=station,
product=product,
step=workstep))
# Append worker info on the current task.
current_task.exists_vars.append(exists_var)
current_task.worker_durations.append(duration_worker)
tasks = range(len(all_worker_tasks[0]))
# end = start + duration
print(f' Task durations: {time.perf_counter() - tic:0.4f}s')
for product in all_products:
for station in range(products_initial[0][product], stations_count):
for workstep in range(
number_manual_worksteps[products_type[product]][station]):
if station == products_initial[0][product] and (
workstep < products_initial[1][product] - 1):
continue
task = all_tasks[product, station, workstep]
min_duration = min(task.worker_durations)
shifted = [d - min_duration for d in task.worker_durations]
model.Add(task.end == task.start + min_duration +
cp_model.LinearExpr.ScalProd(task.exists_vars, shifted))
# each interval is present for exactly one worker
print(f' One worker per task: {time.perf_counter() - tic:0.4f}s')
for product in all_products:
for station in range(products_initial[0][product], stations_count):
for workstep in range(
number_manual_worksteps[products_type[product]][station]):
if station == products_initial[0][product] and (
workstep < products_initial[1][product] - 1):
continue
model.Add(sum(all_tasks[product, station, workstep].exists_vars) == 1)
# if the release time for a worker is greater than 0, then he can't start
# working until then
print(f' Release time: {time.perf_counter() - tic:0.4f}s')
for worker in all_workers:
if releasetime_workers[worker] > 0:
for task in tasks:
model.Add(all_worker_tasks[worker][task].start >=
releasetime_workers[worker] + worker_movement_time_matrix[
workers_initial_station[worker]][station]).OnlyEnforceIf(
all_worker_tasks[worker][task].exists)
# if a product has some leftover time at a station, the product cannot start
# being processed elsewhere before the releasetime and no other product can be
# produced at that station until the product is done
print(f' Leftover constraints: {time.perf_counter() - tic:0.4f}s')
for product in all_products:
for station in range(products_initial[0][product], stations_count):
if leftover_job_products[0][product] <= 0:
continue
for workstep in range(
number_manual_worksteps[products_type[product]][station]):
if station == products_initial[0][product] and (
workstep < products_initial[1][product] - 1):
continue
model.Add(
all_tasks[product, station,
workstep].start >= leftover_job_products[0][product])
# the station is blocked for all further products until the release time
for product2 in range(product + 1, products_count):
model.Add(all_tasks[product2, leftover_job_products[1][product] - 1,
0].start >= leftover_job_products[0][product])
# the next buffer must be clear when the product finishes (but only if
# the final workstep at the station is being completed before the
# release time)
if products_initial[1] == 1:
for product1 in range(0, product):
if products_initial[0][product1] == leftover_job_products[1][
product]:
continue
model.Add(all_tasks[product1, leftover_job_products[1][product],
0].start <= leftover_job_products[0][product])
# precedence constraints stations
print(f' Station precedendce constraints: {time.perf_counter() - tic:0.4f}s')
for product in all_products:
for station1 in range(products_initial[0][product], stations_count - 1):
station2 = station1 + 1 # for station2 in range(station1+1,stations_count):
model.Add(
all_tasks[product, station1,
number_manual_worksteps[products_type[product]][station1] -
1].end <= all_tasks[product, station2, 0].start)
# precedence constraints products
print(f' Product precedence constraints {time.perf_counter() - tic:0.4f}s')
for product1 in range(0, products_count - 1): # all_products:
product2 = product1 + 1 # for product2 in range(product1+1, products_count):
for station in range(products_initial[0][product1], stations_count):
if (station == products_initial[0][product2] and
products_initial[1][product2] != 1):
continue
model.Add(
all_tasks[product1, station,
number_manual_worksteps[products_type[product1]][station] -
1].end <= all_tasks[product2, station, 0].start)
# buffer constraint
print(f' Buffer constraints: {time.perf_counter() - tic:0.4f}s')
for product1 in range(0, products_count - 1): # all_products:
product2 = product1 + 1 # for product2 in range(product1+1, products_count):
for station in range(
max([products_initial[0][product1] - 1, products_initial[0][product2]]),
stations_count - 1):
if (station + 1 == products_initial[0][product1] and
products_initial[1][product1] != 1):
continue
model.Add(all_tasks[product1, station + 1, 0].start <= all_tasks[
product2, station,
number_manual_worksteps[products_type[product2]][station] - 1].end)
# automatic station
print(f' Automatic stations: {time.perf_counter() - tic:0.4f}s')
for product in all_products:
for station in range(products_initial[0][product], stations_count):
if (station == products_initial[0][product] and
0 < products_initial[1][product] - 1):
continue
if number_manual_worksteps[products_type[product]][station] == 2:
model.Add(
all_tasks[product, station,
0].end == all_tasks[product, station, 1].start -
automatic_worksteps_times_matrix[products_type[product]][station])
# Disjunctions between tasks.
print(f' Time disjunctions: {time.perf_counter() - tic:0.4f}s')
constraint_counter = 0
for worker in all_workers:
for task1 in tasks:
for task2 in range(task1 + 1, len(all_worker_tasks[worker])):
suffix = '_%i_%i_%i' % (worker, task1, task2)
station1 = all_worker_tasks[worker][task1].station
station2 = all_worker_tasks[worker][task2].station
product1 = all_worker_tasks[worker][task1].product
product2 = all_worker_tasks[worker][task2].product
if ((station1 >= station2) and (product1 >= product2)) and not (
(station1 == station2 + 1) and
(product1 == product2)) and not ((station1 == station2) and
(product1 == product2 + 1)):
continue
if ((station1 <= station2) and (product1 <= product2)) and not (
(station1 == station2 - 1) and
(product1 == product2)) and not ((station1 == station2) and
(product1 == product2 - 1)):
continue
constraint_counter += 1
is_before_var = model.NewBoolVar('is_before' + suffix)
exists1 = all_worker_tasks[worker][task1].exists
exists2 = all_worker_tasks[worker][task2].exists
model.Add(
all_worker_tasks[worker][task2].start >=
all_worker_tasks[worker][task1].end +
worker_movement_time_matrix[station1][station2]).OnlyEnforceIf(
[is_before_var, exists1, exists2])
model.Add(all_worker_tasks[worker][task1].start >= (
all_worker_tasks[worker][task2].end +
worker_movement_time_matrix[station2][station1])).OnlyEnforceIf(
[is_before_var.Not(), exists1, exists2])
print(' #is_before constraints: ', constraint_counter)
# Initial positions of the workers means that no production step can start
# before the worker has walked to the station from its initial position.
print(f' Initial worker positions: {time.perf_counter() - tic:0.4f}s')
for worker in all_workers:
for task in tasks:
model.Add(all_worker_tasks[worker][task].start >=
worker_movement_time_matrix[workers_initial_station[worker]][
all_worker_tasks[worker][task].station]).OnlyEnforceIf(
all_worker_tasks[worker][task].exists)
# Makespan objective.
print(f' Objective: {time.perf_counter() - tic:0.4f}s')
obj_var = model.NewIntVar(0, horizon, 'makespan')
model.Add(obj_var == all_tasks[
products_count - 1, stations_count - 1,
number_manual_worksteps[products_type[products_count -
1]][stations_count - 1] - 1].end)
model.Minimize(obj_var)
# Solve model.
tada = time.perf_counter()
model_loading_time = tada - tic
print('\nSolve model')
solver = cp_model.CpSolver()
solver.parameters.num_search_workers = 16 # if multiple workers
solver.parameters.max_time_in_seconds = max_time - model_loading_time
solver.parameters.log_search_progress = True
status = solver.Solve(model)
toc = time.perf_counter()
if status == cp_model.INFEASIBLE:
print(f'Shown infeasible in {toc - tic:0.4f} seconds')
status_message = 'INFEASIBLE'
solution_objective = horizon
solution_time = toc - tic
solution_status = 'INFEASIBLE'
if status == cp_model.MODEL_INVALID:
print(f'Shown model invalid in {toc - tic:0.4f} seconds')
status_message = 'MODEL_INVALID'
status_message = 'UNSOLVED'
solution_objective = horizon
solution_time = toc - tic
solution_status = 'UNSOLVED'
if status == cp_model.OPTIMAL:
print(f'Solved optimally in {toc - tic:0.4f} seconds')
status_message = 'OPTIMAL'
# Create one list of assigned tasks per machine.
if status == cp_model.FEASIBLE:
print(f'Solved suboptimally in {toc - tic:0.4f} seconds')
status_message = 'SUBOPTIMAL'
if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
# Finally print the solution found.
solution_objective = solver.ObjectiveValue()
solution_time = toc - tic
solution_status = status_message
#print('Schedule Length: %i' % solution_objective)
assigned_stations = collections.defaultdict(list)
for product_id in all_products:
for station_id in all_stations:
if station_id < products_initial[0][product_id]:
continue
for workstep_id in range(
number_manual_worksteps[products_type[product_id]][station_id]):
if station_id == products_initial[0][product_id] and (
workstep_id < products_initial[1][product_id] - 1):
continue
task = all_tasks[product_id, station_id, workstep_id]
d = -1
for var, duration in zip(task.exists_vars, task.worker_durations):
if solver.BooleanValue(var):
d = duration
assigned_stations[product_id].append(
assigned_task_type(
start=solver.Value(task.start),
exists=True, # exists=solver.Value(all_tasks[product_id, station_id, workstep_id].exists),
product=product_id,
station=station_id,
workstep=workstep_id,
duration=d))
# Create one list of assigned tasks per worker
assigned_products_workers = collections.defaultdict(list)
for worker_id in all_workers:
for task in tasks:
if solver.BooleanValue(all_worker_tasks[worker_id][task].exists):
assigned_products_workers[worker_id].append(
assigned_task_type(
start=solver.Value(all_worker_tasks[worker_id][task].start),
exists=True,
product=all_worker_tasks[worker_id][task].product,
station=all_worker_tasks[worker_id][task].station,
workstep=all_worker_tasks[worker_id][task].step,
duration=solver.Value(
all_worker_tasks[worker_id][task].duration)))
# Sort by starting time.
for worker in all_workers:
assigned_products_workers[worker].sort()
for product in all_products:
assigned_stations[product].sort()
# Write the solution to the XML input file
xml_writer = XML_Writer(config_path, init_path)
xml_writer.write_solution(sol_path, assigned_products_workers, assigned_stations, solution_objective, "OR-Tools", solution_time, solution_status, 0)
else:
solution_objective = horizon
solution_time = toc - tic
solution_status = 'UNSOLVED'
conf_f = config_path.split(".")[0].split("/")[4]
ini_f = init_path.split(".")[0].split("/")[4]
sol_f = sol_path.split(".")[0].split("/")[4]
new_row = f"{conf_f},{ini_f},{sol_f},{solution_status},{solution_time},{solution_objective},{model_loading_time}, {horizon}\n"
with open('Results.csv','a') as fd:
fd.write(new_row)
|
# Generated by Django 2.2.14 on 2020-07-29 19:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Recipes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Title', models.CharField(max_length=100)),
('Category', models.CharField(choices=[('North Indian', 'North Indian'), ('North-Eastern', 'North-Easten'), ('Rajasthani', 'Rajasthani'), ('Gujarati', 'Gujarati'), ('South Indian', 'South Indian'), ('Continental', 'Continental'), ('Chinese', 'Chinese'), ('Italian', 'Italian')], max_length=15)),
('Ingredient', models.TextField()),
('Steps', models.TextField()),
('Image', models.ImageField(default='none.jpg', upload_to='')),
('Date', models.DateField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Recipes',
},
),
]
|
def main():
height = get_height()
draw(height, height)
def draw(height, h):
if height == 0:
return
draw(height - 1, h)
print(" " * (h - height), end='')
print("#" * height, end='')
print(" ", end='')
print("#" * height)
def get_height():
while True:
try:
height = int(input("Height: "))
if (height > 0 and height < 9):
break
except ValueError:
None
return height
main()
|
T = int(input())
max_val = (2 ** 32) - 1
for _ in range(T):
print (max_val - int(input()))
|
import sys
import yaml
import pandas as pd
import numpy as np
from .regression import Regression
from .classification import Classification
from .model import Model
from .preprocessing import Preprocessing, file_split_X_y
from .images import Images
class UnicornML:
__problem: str
__algorithms: list
__metric: str
model: object
output_classes: int
input_shape: tuple
cv: int
images: bool
X_train: np.ndarray
X_test: np.ndarray
y_train: np.ndarray
y_test: np.ndarray
def __init__(self, input=None, options=None):
if input is None:
input = {}
if options is None:
options = {}
if not bool(input):
sys.exit("Undefined input data")
X, y = None, None
if "file" in input:
self.images = False
data = pd.read_csv(input["file"])
label_index = input["label_col"] if "label_col" in input else -1
X, y = file_split_X_y(data, label_index)
elif "X" in input and "y" in input:
self.images = False
X, y = input["X"], input["Y"]
elif "images" in input:
self.images = True
directory = input["images"]
input_shape = (options["height"], options["width"], options["depth"])
if "fine_tuning" in options:
fine_tuning = options["fine_tuning"]
else:
fine_tuning = False
self.model = Images(input_shape, directory, fine_tuning=fine_tuning)
else:
sys.exit("Invalid options for input")
if not self.images:
self.cv = 5
self.X_train, self.X_test, self.y_train, self.y_test, (self.__problem, self.output_classes) = Preprocessing(X, y, self.cv)
self.input_shape = self.X_train.shape
with open("options.yaml") as file:
config = yaml.full_load(file)
if "algorithms" in options:
if not isinstance(options["algorithms"], list):
sys.exit("The \"algorithms\" paramater needs to be a list")
for alg in options["algorithms"]:
if not isinstance(alg, str):
sys.exit("The algorithm need to be a string")
if alg not in config["Problem"][self.__problem]["algorithms"]:
sys.exit(
"Invalid algorithm %s for a %s problem. Algorithms available:[%s]" % (
alg,
self.__problem,
", ".join(config["Problem"][self.__problem]["algorithms"])
)
)
self.__algorithms = options["algorithms"]
else:
self.__algorithms = config["Problem"][self.__problem]["algorithms"]
if "metric" in options:
if not isinstance(options["metric"], str):
sys.exit("The \"metric\" paramater needs to be a string (choose only one metric, please)")
if options["metric"] not in config["Problem"][self.__problem]["metrics"]:
sys.exit(
"Invalid metric %s for a %s problem. Metrics available:[%s]" % (
options["metric"],
self.__problem,
", ".join(config["Problem"][self.__problem]["metrics"])
)
)
self.__metric = options["metric"]
else:
self.__metric = config["Problem"][self.__problem]["metrics"][0]
print("\nIt's a %s problem\nSelected algorithms: [%s]\nSelected metric: [%s]\n" % (
self.__problem,
",".join(self.__algorithms), self.__metric
))
def Rainbow(self):
if self.images:
self.model.train()
else:
for algorithm in self.__get_model_algorithms():
sqrt = True if "sqrt" in algorithm.keys() else False
self.model.param_tunning_method(
algorithm["estimator"],
algorithm["desc"],
algorithm["params"],
sqrt
)
if self.__metric == "mse" and self.get_best_model(False) < 0.01:
print("Stopping training early, because a good enough result was achieved")
break
elif self.__metric == "accuracy" and self.get_best_model(False) > 0.95:
print("Stopping training early, because a good enough result was achieved")
break
def __get_model_algorithms(self):
if self.__problem == "Classification":
classificator = Classification(
self.input_shape,
self.__algorithms,
self.__metric,
self.output_classes
)
self.model = Model(
self.X_train, self.X_test, self.y_train, self.y_test,
classificator.get_metric(), 1, self.cv
)
algorithms = classificator.get_algorithms()
else:
regressor = Regression(
self.input_shape,
self.__algorithms,
self.__metric
)
self.model = Model(
self.X_train, self.X_test, self.y_train, self.y_test,
regressor.get_metric(), regressor.get_metric_sign(), self.cv
)
algorithms = regressor.get_algorithms()
return algorithms
def get_best_model(self, verbose=True, name=False):
if self.model.metric_sign == -1:
bestModel = sorted(self.model.results, key=lambda x: x["score"], reverse=False)[0]
else:
bestModel = sorted(self.model.results, key=lambda x: x["score"], reverse=True)[0]
if name:
return bestModel["name"]
elif verbose:
print("Best model: {0}\t Score: {1}".format(bestModel["name"], bestModel["score"]))
return bestModel["model"]
else:
return bestModel["score"]
def predict(self, X):
if self.images:
return 1
else:
return self.get_best_model().predict(X)
def evaluate(self, y, yatt):
if self.images:
return self.model.evaluate()
else:
if self.get_best_model(False, True) == "Neural Networks":
yatt = np.argmax(yatt, axis=1)
return self.model.metric(y, yatt)
|
__title__ = 'logbook-dptk'
__description__ = 'File summary pipeline for DPTK'
__url__ = 'https://github.com/sbdp/logbook-dptk'
__version__ = '0.1a0'
__author__ = 'SBDP'
__author_email__ = 'info@sbdp.invalid'
|
from django.db import models
from django.utils import timezone
class Comment(models.Model):
name = models.CharField(max_length=20)
comment = models.TextField()
date_added = models.DateTimeField(default=timezone.now)
def __str__(self):
return '<Name: {}, ID: {}>'.format(self.name,self.id)
|
from ctapipe.utils import get_dataset_path
from ctapipe.io.eventsource import EventSource
def test_construct():
try:
EventSource(config=None, tool=None)
except TypeError:
return
raise TypeError("EventSource should raise a TypeError when "
"instantiated due to its abstract methods")
class DummyReader(EventSource):
"""
Simple working EventSource
"""
def _generator(self):
return range(len(self.input_url))
@staticmethod
def is_compatible(file_path):
return False
def test_can_be_implemented():
dataset = get_dataset_path("gamma_test.simtel.gz")
test_reader = DummyReader(input_url=dataset)
assert test_reader is not None
def test_is_iterable():
dataset = get_dataset_path("gamma_test.simtel.gz")
test_reader = DummyReader(input_url=dataset)
for _ in test_reader:
pass
|
from django.shortcuts import render, get_object_or_404
from .forms import AddEventForm
from .models import Event
def index(request):
events = Event.objects.all()
return render(request, 'index.html', {'events': events})
def event_detail(request, pk):
event = get_object_or_404(Event, pk=pk)
return render(request, 'event_detail.html', {'event': event})
def event_add(request):
if request.method == 'POST':
form = AddEventForm(request.POST)
else:
form = AddEventForm()
return render(request, 'event_add.html', {'form': form})
|
# @name: Katana-DorkScanner
# @repo: https://github.com/adnane-X-tebbaa/Katana-ds
# @author: Adnane tebbaa (AXT)
# Bit.ly-file Dev
"""
MIT License
Copyright (c) 2020 adnane tebbaa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import requests
import os
import time
import sys
import strgen
from bs4 import BeautifulSoup , SoupStrainer
import re
import strgen
from termcolor import colored, cprint
O = """
____ _ _ _
| __ )(_) |_ | |_ _
| _ \| | __| | | | | | Katana-ds V1.5
| |_) | | |_ _| | |_| | Bit.ly Mode
|____/|_|\__(_)_|\__, | Coded by Adnane-X-Tebbaa (AXT)
|___/
"""
print (O)
print(colored('Note: the script still in dev', 'red', 'on_grey'))
Xbeta = "https://bit.ly/"
resp = requests.get(Xbeta)
print("[+] got :" , resp.status_code )
print(colored('[+] bit.ly is up', 'green', 'on_grey'))
def a () :
randomString1 = strgen.StringGenerator("[\w\c]{5}").render()
randomString2 = strgen.StringGenerator("[\w\c]{5}").render()
randomString3 = strgen.StringGenerator("[\w\c]{5}").render()
randomString4 = strgen.StringGenerator("[\w\c]{6}").render()
randomString5 = strgen.StringGenerator("[\w\c]{6}").render()
randomString6 = strgen.StringGenerator("[\w\c]{6}").render()
randomString7 = strgen.StringGenerator("[\w\c]{6}").render()
randomString8 = strgen.StringGenerator("[\w\c]{6}").render()
randomString9 = strgen.StringGenerator("[\w\c]{6}").render()
randomString10 = strgen.StringGenerator("[\w\c]{6}").render()
page = requests.get(Xbeta + randomString1)
print (Xbeta + randomString1 , page.status_code )
time.sleep(1)
page = requests.get(Xbeta + randomString2)
print (Xbeta + randomString2 , page.status_code )
time.sleep(1)
page = requests.get(Xbeta + randomString3)
print (Xbeta + randomString3 , page.status_code )
time.sleep(1)
page = requests.get(Xbeta + randomString4)
print (Xbeta + randomString4 , page.status_code )
time.sleep(1)
page = requests.get(Xbeta + randomString5)
print (Xbeta + randomString5 , page.status_code )
time.sleep(1)
page = requests.get(Xbeta + randomString5)
print (Xbeta + randomString6 , page.status_code )
time.sleep(1)
page = requests.get(Xbeta + randomString2)
print (Xbeta + randomString7 , page.status_code )
time.sleep(1)
page = requests.get(Xbeta + randomString3)
print (Xbeta + randomString8 , page.status_code )
time.sleep(1)
page = requests.get(Xbeta + randomString4)
print (Xbeta + randomString9 , page.status_code )
time.sleep(1)
page = requests.get(Xbeta + randomString5)
print (Xbeta + randomString10 , page.status_code )
try :
a ()
except requests.exceptions.RequestException as e:
print(colored('got an ERROR...Overriding...:', 'green', 'on_grey'))
print(colored('[>] Sleeping for 5s :', 'yellow', 'on_grey' , resp.status_code))
a ()
|
from typing import NoReturn
from uuid import uuid4
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
String,
sql,
)
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from server.models.security import pw_context
Base = declarative_base()
class Entity(Base):
"""
Represents a base entity.
This adds a non nullable integer as primary key for models/entities
that inherits from `Entity`. A index is also created too.
"""
__abstract__ = True
id = Column(Integer, nullable=False, primary_key=True, index=True)
class Product(Entity):
"""
Represents a product entity.
Contains the code to be redeemed.
"""
__tablename__ = "product"
uuid = Column(
UUID(as_uuid=True), nullable=False, unique=True, default=uuid4
)
code = Column(String, nullable=False, unique=True, index=True)
summary = Column(String, nullable=False)
taken = Column(Boolean, nullable=False, default=sql.false())
created_at = Column(
DateTime, nullable=False, server_default=sql.func.now()
)
updated_at = Column(
DateTime,
nullable=False,
server_default=sql.func.now(),
onupdate=sql.func.now(),
)
def __repr__(self): # pragma: no cover
return str(self.__dict__)
class Order(Entity):
"""
Represents an order entity.
Contains information from a code redemption request.
Consider the information of the moderator who requested the code, the user
who received it, general date/time information, and the product identifier.
"""
__tablename__ = "order"
uuid = Column(
UUID(as_uuid=True), nullable=False, unique=True, default=uuid4
)
mod_id = Column(String, nullable=False)
mod_display_name = Column(String, nullable=False)
owner_display_name = Column(String, nullable=False)
requested_at = Column(DateTime, nullable=False, default=sql.func.now())
product_id = Column(Integer, ForeignKey("product.id"), nullable=False)
product = relationship("Product")
def __repr__(self): # pragma: no cover
return str(self.__dict__)
class Application(Entity):
"""
Represents an application entity.
We use it to define what applications can consume our API.
"""
__tablename__ = "application"
username = Column(String, nullable=False, unique=True)
pass_hash = Column(String, nullable=False)
@hybrid_property
def password(self):
"""Retrieve the hashed password."""
return self.pass_hash
@password.setter # noqa
def password(self, plain_password: str) -> NoReturn:
"""
Hash the password.
Args:
- plain_password: the plain password to be hashed.
"""
self.pass_hash = pw_context.hash(plain_password)
def check_password(self, plain_password: str) -> bool:
"""
Compare the hashed password with the plain one.
Args:
- plain_password: the plain password to be compared with the hash.
"""
return pw_context.verify(plain_password, self.pass_hash)
def __repr__(self): # pragma: no cover
return str(self.__dict__)
|
# -*- coding: utf-8 -*-
#
# Authors: Swolf <swolfforever@gmail.com>
# Date: 2021/3/4
# License: MIT License
"""
High-gamma dataset.
"""
import re
from typing import Union, Optional, Dict, List, Tuple
from pathlib import Path
import numpy as np
import h5py
import mne
from mne.io import Raw
from mne.channels import make_standard_montage
from .base import BaseDataset
from ..utils.download import mne_data_path
from ..utils.channels import upper_ch_names
GIN_URL = "https://web.gin.g-node.org/robintibor/high-gamma-dataset/raw/master/data"
class Schirrmeister2017(BaseDataset):
"""High-gamma dataset discribed in Schirrmeister et al. 2017
Our “High-Gamma Dataset” is a 128-electrode dataset (of which we later only use
44 sensors covering the motor cortex, (see Section 2.7.1), obtained from 14
healthy subjects (6 female, 2 left-handed, age 27.2 ± 3.6 (mean ± std)) with
roughly 1000 (963.1 ± 150.9, mean ± std) four-second trials of executed
movements divided into 13 runs per subject. The four classes of movements were
movements of either the left hand, the right hand, both feet, and rest (no
movement, but same type of visual cue as for the other classes). The training
set consists of the approx. 880 trials of all runs except the last two runs,
the test set of the approx. 160 trials of the last 2 runs. This dataset was
acquired in an EEG lab optimized for non-invasive detection of high- frequency
movement-related EEG components (Ball et al., 2008; Darvas et al., 2010).
Depending on the direction of a gray arrow that was shown on black back-
ground, the subjects had to repetitively clench their toes (downward arrow),
perform sequential finger-tapping of their left (leftward arrow) or right
(rightward arrow) hand, or relax (upward arrow). The movements were selected
to require little proximal muscular activity while still being complex enough
to keep subjects in- volved. Within the 4-s trials, the subjects performed the
repetitive movements at their own pace, which had to be maintained as long as
the arrow was showing. Per run, 80 arrows were displayed for 4 s each, with 3
to 4 s of continuous random inter-trial interval. The order of presentation
was pseudo-randomized, with all four arrows being shown every four trials.
Ideally 13 runs were performed to collect 260 trials of each movement and rest.
The stimuli were presented and the data recorded with BCI2000 (Schalk et al.,
2004). The experiment was approved by the ethical committee of the University
of Freiburg.
References
----------
.. [1] Schirrmeister, Robin Tibor, et al. "Deep learning with convolutional
neural networks for EEG decoding and visualization." Human brain mapping 38.11
(2017): 5391-5420.
"""
_EVENTS = {
"right_hand": (1, (0, 4)),
"left_hand": (2, (0, 4)),
"rest": (3, (0, 4)),
"feet": (4, (0, 4)),
}
_CHANNELS = [
'FP1', 'FP2', 'FPZ', 'F7', 'F3', 'FZ', 'F4', 'F8', 'FC5', 'FC1', 'FC2', 'FC6', 'T7', 'C3', 'CZ', 'C4', 'T8', 'CP5', 'CP1', 'CP2', 'CP6', 'P7', 'P3', 'PZ', 'P4', 'P8', 'POZ', 'O1', 'OZ', 'O2', 'AF7', 'AF3', 'AF4', 'AF8', 'F5', 'F1', 'F2', 'F6', 'FC3', 'FCZ', 'FC4', 'C5', 'C1', 'C2', 'C6', 'CP3', 'CPZ', 'CP4', 'P5', 'P1', 'P2', 'P6', 'PO5', 'PO3', 'PO4', 'PO6', 'FT7', 'FT8', 'TP7', 'TP8', 'PO7', 'PO8', 'FT9', 'FT10', 'TPP9H', 'TPP10H', 'PO9', 'PO10', 'P9', 'P10', 'AFF1', 'AFZ', 'AFF2', 'FFC5H', 'FFC3H', 'FFC4H', 'FFC6H', 'FCC5H', 'FCC3H', 'FCC4H', 'FCC6H', 'CCP5H', 'CCP3H', 'CCP4H', 'CCP6H', 'CPP5H', 'CPP3H', 'CPP4H', 'CPP6H', 'PPO1', 'PPO2', 'I1', 'IZ', 'I2', 'AFP3H', 'AFP4H', 'AFF5H', 'AFF6H', 'FFT7H', 'FFC1H', 'FFC2H', 'FFT8H', 'FTT9H', 'FTT7H', 'FCC1H', 'FCC2H', 'FTT8H', 'FTT10H', 'TTP7H', 'CCP1H', 'CCP2H', 'TTP8H', 'TPP7H', 'CPP1H', 'CPP2H', 'TPP8H', 'PPO9H', 'PPO5H', 'PPO6H', 'PPO10H', 'POO9H', 'POO3H', 'POO4H', 'POO10H', 'OI1H', 'OI2H'
]
def __init__(self):
super().__init__(
dataset_code='schirrmeister2017',
subjects=list(range(1, 15)),
events=self._EVENTS,
channels=self._CHANNELS,
srate=500,
paradigm='imagery'
)
def data_path(self,
subject: Union[str, int],
path: Optional[Union[str, Path]] = None,
force_update: bool = False,
update_path: Optional[bool] = None,
proxies: Optional[Dict[str, str]] = None,
verbose: Optional[Union[bool, str, int]] = None) -> List[List[Union[str, Path]]]:
if subject not in self.subjects:
raise(ValueError("Invalid subject id"))
dests = []
base_url = '{u:s}/{t:s}/{s:d}.mat'
dests = [
[
mne_data_path(base_url.format(u=GIN_URL, t=t, s=subject), self.dataset_code,
path=path, proxies=proxies, force_update=force_update, update_path=update_path) for t in ['train', 'test']
]
]
return dests
def _get_single_subject_data(self, subject: Union[str, int],
verbose: Optional[Union[bool, str, int]] = None) -> Dict[str, Dict[str, Raw]]:
dests = self.data_path(subject)
montage = make_standard_montage('standard_1005')
montage.ch_names = [ch_name.upper() for ch_name in montage.ch_names]
sess = dict()
for isess, run_dests in enumerate(dests):
runs = dict()
for irun, run_array in enumerate(run_dests):
raw = BBCIDataset(run_array).load()
raw = upper_ch_names(raw)
raw.set_montage(montage)
runs['run_{:d}'.format(irun)] = raw
sess['session_{:d}'.format(isess)] = runs
return sess
class BBCIDataset(object):
"""
Loader class for files created by saving BBCI files in matlab (make
sure to save with '-v7.3' in matlab, see
https://de.mathworks.com/help/matlab/import_export/mat-file-versions.html#buk6i87
)
Parameters
----------
filename: str
load_sensor_names: list of str, optional
Also speeds up loading if you only load some sensors.
None means load all sensors.
Copyright Robin Schirrmeister, 2017
Altered by Vinay Jayaram, 2018
"""
def __init__(self, filename, load_sensor_names=None):
self.__dict__.update(locals())
del self.self
def load(self):
cnt = self._load_continuous_signal()
cnt = self._add_markers(cnt)
return cnt
def _load_continuous_signal(self):
wanted_chan_inds, wanted_sensor_names = self._determine_sensors()
fs = self._determine_samplingrate()
with h5py.File(self.filename, 'r') as h5file:
samples = int(h5file['nfo']['T'][0, 0])
cnt_signal_shape = (samples, len(wanted_chan_inds))
continuous_signal = np.ones(cnt_signal_shape,
dtype=np.float32) * np.nan
for chan_ind_arr, chan_ind_set in enumerate(wanted_chan_inds):
# + 1 because matlab/this hdf5-naming logic
# has 1-based indexing
# i.e ch1,ch2,....
chan_set_name = 'ch' + str(chan_ind_set + 1)
# first 0 to unpack into vector, before it is 1xN matrix
chan_signal = h5file[chan_set_name][
:].squeeze() # already load into memory
continuous_signal[:, chan_ind_arr] = chan_signal
assert not np.any(
np.isnan(continuous_signal)), "No NaNs expected in signal"
# Assume we cant know channel type here automatically
ch_types = ['eeg'] * len(wanted_chan_inds)
info = mne.create_info(ch_names=wanted_sensor_names, sfreq=fs,
ch_types=ch_types)
# Scale to volts from microvolts, (VJ 19.6.18)
continuous_signal = continuous_signal * 1e-6
cnt = mne.io.RawArray(continuous_signal.T, info)
return cnt
def _determine_sensors(self):
all_sensor_names = self.get_all_sensors(self.filename, pattern=None)
if self.load_sensor_names is None:
# if no sensor names given, take all EEG-chans
eeg_sensor_names = all_sensor_names
eeg_sensor_names = filter(lambda s: not s.startswith('BIP'),
eeg_sensor_names)
eeg_sensor_names = filter(lambda s: not s.startswith('E'),
eeg_sensor_names)
eeg_sensor_names = filter(lambda s: not s.startswith('Microphone'),
eeg_sensor_names)
eeg_sensor_names = filter(lambda s: not s.startswith('Breath'),
eeg_sensor_names)
eeg_sensor_names = filter(lambda s: not s.startswith('GSR'),
eeg_sensor_names)
eeg_sensor_names = list(eeg_sensor_names)
assert (len(eeg_sensor_names) in set(
[128, 64, 32, 16])), "check this code if you have different sensors..." # noqa
self.load_sensor_names = eeg_sensor_names
chan_inds = self._determine_chan_inds(all_sensor_names,
self.load_sensor_names)
return chan_inds, self.load_sensor_names
def _determine_samplingrate(self):
with h5py.File(self.filename, 'r') as h5file:
fs = h5file['nfo']['fs'][0, 0]
assert isinstance(fs, int) or fs.is_integer()
fs = int(fs)
return fs
@staticmethod
def _determine_chan_inds(all_sensor_names, sensor_names):
assert sensor_names is not None
chan_inds = [all_sensor_names.index(s) for s in sensor_names]
assert len(chan_inds) == len(sensor_names), ("All"
"sensors"
"should be there.")
# TODO: is it possible for this to fail? the list
# comp fails first right?
assert len(set(chan_inds)) == len(chan_inds), ("No"
"duplicated sensors"
"wanted.")
return chan_inds
@staticmethod
def get_all_sensors(filename, pattern=None):
"""
Get all sensors that exist in the given file.
Parameters
----------
filename: str
pattern: str, optional
Only return those sensor names that match the given pattern.
Returns
-------
sensor_names: list of str
Sensor names that match the pattern or all
sensor names in the file.
"""
with h5py.File(filename, 'r') as h5file:
clab_set = h5file['nfo']['clab'][:].squeeze()
all_sensor_names = [''.join(
chr(c.squeeze()) for c in h5file[obj_ref])
for obj_ref in clab_set]
if pattern is not None:
all_sensor_names = filter(
lambda sname: re.search(pattern, sname),
all_sensor_names)
return all_sensor_names
def _add_markers(self, cnt):
with h5py.File(self.filename, 'r') as h5file:
event_times_in_ms = h5file['mrk']['time'][:].squeeze()
event_classes = h5file['mrk']['event']['desc'][:].squeeze().astype(
np.int64)
# Check whether class names known and correct order
# class_name_set = h5file['nfo']['className'][:].squeeze()
# all_class_names = [''.join(chr(c) for c in h5file[obj_ref])
# for obj_ref in class_name_set]
event_times_in_samples = event_times_in_ms * cnt.info['sfreq'] / 1000.0
event_times_in_samples = np.uint32(np.round(event_times_in_samples))
# Check if there are markers at the same time
previous_i_sample = -1
for i_event, (i_sample, id_class) in enumerate(
zip(event_times_in_samples, event_classes)):
if i_sample == previous_i_sample:
info = "{:d}: ({:.0f} and {:.0f}).\n".format(i_sample,
event_classes[
i_event - 1],
event_classes[
i_event])
log.warning("Same sample has at least two markers.\n"
+ info +
"Marker codes will be summed.")
previous_i_sample = i_sample
# Now create stim chan
stim_chan = np.zeros_like(cnt.get_data()[0])
for i_sample, id_class in zip(event_times_in_samples, event_classes):
stim_chan[i_sample] += id_class
info = mne.create_info(ch_names=['STI 014'],
sfreq=cnt.info['sfreq'],
ch_types=['stim'])
stim_cnt = mne.io.RawArray(stim_chan[None], info, verbose='WARNING')
cnt = cnt.add_channels([stim_cnt])
event_arr = [event_times_in_samples,
[0] * len(event_times_in_samples),
event_classes]
cnt.info['events'] = np.array(event_arr).T
return cnt
|
# vi: set shiftwidth=4 tabstop=4 expandtab:
import datetime
import itertools
RUN_LONG_TESTS = False
def get_public_keys_from_file(file_path="../../resources/year2020_day25_input.txt"):
with open(file_path) as f:
return [int(l) for l in f]
MODULO = 20201227
def transform(subject_number, loop_size):
return pow(subject_number, loop_size, MODULO)
def find_loop_size(subject_number, public_key):
# There must be a better way
v = 1
for i in itertools.count(start=1):
v = (v * subject_number) % MODULO
if v == public_key:
assert v == transform(subject_number, i)
return i
def run_tests():
pk1, pk2 = 5764801, 17807724
ls1, ls2 = 8, 11
key = 14897079
assert transform(7, ls1) == pk1
assert transform(7, ls2) == pk2
assert find_loop_size(7, pk1) == ls1
assert find_loop_size(7, pk2) == ls2
assert transform(pk1, ls2) == key
assert transform(pk2, ls1) == key
def get_solutions():
pk1, pk2 = get_public_keys_from_file()
if RUN_LONG_TESTS:
ls1 = find_loop_size(7, pk1)
ls2 = find_loop_size(7, pk2)
print(transform(pk1, ls2) == 7269858)
print(transform(pk2, ls1) == 7269858)
if __name__ == "__main__":
RUN_LONG_TESTS = True
begin = datetime.datetime.now()
run_tests()
get_solutions()
end = datetime.datetime.now()
print(end - begin)
|
import sqlite3
conn = sqlite3.connect('resultsdb.sqlite')
c = conn.cursor()
#c.execute("CREATE TABLE Results (address text, burglaries integer)")
c.execute("INSERT INTO Results VALUES ('Queen Vic', 2)")
conn.commit()
conn.close()
|
import numpy as np
arrs = []
for i in xrange(60):
arrs.append(np.load('latent_means/latent_means_%d.npy' % i))
full = np.concatenate(arrs)
np.save('latent_means/latent_means.npy', full)
|
#File: Ex019_Counter_Sunk_Holes.py
#To use this example file, you need to first follow the "Using CadQuery From Inside FreeCAD"
#instructions here: https://github.com/dcowden/cadquery#installing----using-cadquery-from-inside-freecad
#You run this example by typing the following in the FreeCAD python console, making sure to change
#the path to this example, and the name of the example appropriately.
#import sys
#sys.path.append('/home/user/Downloads/cadquery/examples/FreeCAD')
#import Ex019_Counter_Sunk_Holes
#If you need to reload the part after making a change, you can use the following lines within the FreeCAD console.
#reload(Ex019_Counter_Sunk_Holes)
#You'll need to delete the original shape that was created, and the new shape should be named sequentially
# (Shape001, etc).
#You can also tie these blocks of code to macros, buttons, and keybindings in FreeCAD for quicker access.
#You can get a more information on this example at
# http://parametricparts.com/docs/examples.html#an-extruded-prismatic-solid
import cadquery
import Part
#Create a plate with 4 counter-sunk holes in it
result = cadquery.Workplane(cadquery.Plane.XY()).box(4, 2, 0.5).faces(">Z").workplane() \
.rect(3.5, 1.5, forConstruction=True)\
.vertices().cskHole(0.125, 0.25, 82.0, depth=None)
#Boiler plate code to render our solid in FreeCAD's GUI
Part.show(result.toFreecad())
|
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import os
import sys
import collections
import numpy as np
from random import randint
import pytest
from mmgroup.dev.hadamard.hadamard_t import bitparity
from mmgroup.dev.mm_op.mm_op import MM_Op, INT_BITS
from mmgroup.tests.test_hadamard.matrices import hadamard_matrix
from mmgroup.tests.test_hadamard.matrices import msym16
from mmgroup.tests.test_hadamard.matrices import mat_l_16
from mmgroup.tests.test_hadamard.cythonize import SIZES
from mmgroup.tests.test_hadamard.cythonize import PRIMES
from mmgroup.tests.test_hadamard.cythonize import build, kill
from mmgroup.tests.spaces.sparse_mm_space import SparseMmV
from mmgroup.tests.groups.mgroup_n import MGroupNWord
NP_DTYPE = np.uint32 if INT_BITS == 32 else np.uint64
V = SparseMmV
G = MGroupNWord
################################################################
# Build external module with code to be tests
################################################################
test_hadamard = None
def build_code():
"""Build Cython code, import is as module test_hadamard
This function builds the Cython module
mmgroup.tests.test_hadamard.temp.test_hadamard
an imports that module as test_hadamard.
Since that build process takes some time, we do it on
demand only.
"""
global test_hadamard
if test_hadamard is None:
build()
print("importing test_hadamard...")
from mmgroup.tests.test_hadamard.temp import test_hadamard as t
print("import done")
test_hadamard = t
################################################################
# Auxiliary functions
################################################################
LOG_BIN = {4:2, 16:4, 64:6}
def pwr2modp(k, p):
"""Return 2**k mod p for any integer k"""
if k < 0:
assert p & 1
return pow((p + 1) >> 1, -k, p)
return pow(2, k, p)
################################################################
# Wrapper for functions in library test_hadamard.pyx
################################################################
def matrix_function(name, p, *data, **kwds):
def pack_input_vector(data, dest, offset):
for i in range(VECTOR_INTS):
value = 0
part = data[i * INT_FIELDS : (i+1) * INT_FIELDS]
for j, x in enumerate(part):
value |= (int(x) % p) << (j * FIELD_BITS)
dest[offset + i] = value
try:
verbose = int(kwds["verbose"])
except:
verbose = 0
basics = MM_Op(p)
vlen = len(data[0])
if name[-1:].isdigit():
function_name = "mod%d_%s_test" % (p, name)
else:
function_name = "mod%d_%s%d_test" % (p, name, vlen)
f = test_hadamard.__dict__[function_name]
FIELD_BITS = basics.FIELD_BITS
INT_FIELDS = basics.INT_FIELDS
VECTOR_INTS = max(1, vlen // INT_FIELDS)
w_len = max(10, len(data)) * VECTOR_INTS
w = np.zeros(w_len, dtype = NP_DTYPE)
for i, d in enumerate(data):
pack_input_vector(d, w, i * VECTOR_INTS)
if verbose > 1:
print("Input to function %s, p = %d, len = %d:" % (
name, p, vlen))
print([hex(x) for x in w[:VECTOR_INTS]])
f(w)
if verbose > 1:
print("Output of function %s:" % name)
print([hex(x) for x in w[:VECTOR_INTS]])
result = np.zeros(vlen, dtype = np.int32)
for i in range(vlen):
index, sh = divmod(i, INT_FIELDS)
o = int(w[index]) >> (sh * FIELD_BITS)
result[i] = ((o & p) % p)
return result
################################################################
# Testing multiplication with Hadamard matrix
################################################################
def hadamard_function(p, vector, verbose = 0):
return matrix_function("hadamard", p, vector, verbose=verbose)
def one_test_hadamard_function(p, vector, verbose = 0):
if verbose:
print("\nTest multiplication v * H with Hadamard matrix H, v =")
print(vector)
vector = np.array(vector, dtype = np.int32) % p
obtained = hadamard_function(p, vector, verbose = verbose)
k = LOG_BIN[len(vector)]
h_matrix = hadamard_matrix(k) * pwr2modp(-k >> 1, p) % p
expected = (vector @ h_matrix) % p
ok = (obtained == expected).all()
if verbose or not ok:
if not ok:
print("Multiplication v * H with Hadmard matrix H, v =")
print(vector)
print("Result obtained:")
print(obtained)
if not ok:
print("Result expected:")
print(expected)
raise ValueError("Wrong result")
def hadamard_function_testcases(n_cases = 5):
test_cases = [
(3, [1,0,0,0]),
(3, [0,1,0,0]),
(3, [0,0,1,0]),
(3, [0,0,0,1]),
]
for t in test_cases:
yield t
for i in range(n_cases):
for p in PRIMES:
for l in SIZES:
v = [randint(0, p - 1) for l in range(1 << l)]
yield p, v
@pytest.mark.compiler
@pytest.mark.slow
@pytest.mark.hadamard
def test_hadamard_function(n_cases = 5, verbose = 0):
build_code()
print("Test C code for multiplication with Hadamard matrix")
for p, v in hadamard_function_testcases(n_cases):
one_test_hadamard_function(p, v, verbose)
print("Test passed")
################################################################
# Testing exchanging entries with odd parity
################################################################
def xch_parity_function(p, vector, verbose = 0):
return matrix_function("xch_parity", p, vector, verbose=verbose)
def ref_xch_parity(vector):
l = len(vector)
res = np.zeros(l, dtype = np.int32)
for i in range(l):
j = l - i - 1 if bitparity(i) else i
res[i] = vector[j]
return res
def one_test_xch_parity_function(p, vector, verbose = 0):
if verbose:
print("\nTest exchanging entries of v with odd parity, v =")
print(vector)
vector = np.array(vector, dtype = np.int32) % p
obtained = xch_parity_function(p, vector, verbose = verbose)
k = LOG_BIN[len(vector)]
expected = ref_xch_parity(vector) % p
ok = (obtained == expected).all()
if verbose or not ok:
if not ok:
s = "Exchanging entries of v (mod %d) with odd parity, v ="
print(s % p)
print(vector)
print("Result obtained:")
print(obtained)
if not ok:
print("Result expected:")
print(expected)
raise ValueError("Wrong result")
def xch_parity_function_testcases(n_cases = 5):
test_p = [3, 7, 127]
for p in test_p:
if not p in PRIMES:
continue
for i in range(64):
lst = [0] * 64
lst[i] = 1
yield p, lst
for i in range(n_cases):
for p in PRIMES:
v = [randint(0, p - 1) for l in range(64)]
yield p, v
@pytest.mark.compiler
@pytest.mark.slow
@pytest.mark.hadamard
def test_xch_parity_function(n_cases = 5, verbose = 0):
build_code()
print("Test C code for exchanging entries with odd parity")
for p, v in xch_parity_function_testcases(n_cases):
one_test_xch_parity_function(p, v, verbose)
print("Test passed")
################################################################
# Testing operation t done by 64 times 64 matrices
################################################################
def op_t64_function(p, vector, exp, verbose = 0):
exp = exp % 3
if exp == 0: return vector
x = [exp - 1]
return matrix_function("op_t", p, vector, x, verbose=verbose)
def ref_op_t64(p, vector, exp):
space = V(p)
v = space()
for i in range(64):
v += int(vector[i]) * space('T',0, i)
v *= G('t', exp)
res = np.zeros(64, dtype = np.int32)
for i in range(64):
res[i] = v['T', 0, i]
return res
def one_test_op_t64_function(p, vector, exp, verbose = 0):
if verbose:
print("\nTest op t64, exp = ", exp, ", v =\n")
print(vector)
vector = np.array(vector, dtype = np.int32) % p
obtained = op_t64_function(p, vector, exp, verbose = verbose)
k = LOG_BIN[len(vector)]
expected = ref_op_t64(p, vector, exp)
ok = (obtained == expected).all()
if verbose or not ok:
if not ok:
s = "\nTest op t64 mod %d, exp = %d, v ="
print(s % (p, exp))
print(vector)
print("Result obtained:")
print(obtained)
if not ok:
print("Result expected:")
print(expected)
raise ValueError("Wrong result")
def op_p64_function_testcases(n_cases = 5):
for p in PRIMES:
for i in range(64):
lst = [0] * 64
lst[i] = 1
yield p, lst, 1
yield p, lst, 2
for i in range(n_cases):
for p in PRIMES:
v = [randint(0, p - 1) for l in range(64)]
yield p, v, 1
yield p, v, 2
@pytest.mark.compiler
@pytest.mark.slow
@pytest.mark.hadamard
def test_op_p64_function(n_cases = 5, verbose = 0):
build_code()
print("Test operator t on tag 'T' done by 64 x 64 matrices")
for p, v, exp1 in op_p64_function_testcases(n_cases):
one_test_op_t64_function(p, v, exp1, verbose)
print("Test passed")
################################################################
# Testing operation t done by 3 times 3 matrices
################################################################
def v24_ints(p):
return MM_Op(p).V24_INTS
def int_fields(p):
return MM_Op(p).INT_FIELDS
def op_t3_function(p, vector, exp, verbose = 0):
if exp == 0: return vector
assert len(vector) == 3 * int_fields(p)
exp = exp % 3
x = [exp - 1]
return matrix_function("op_t3", p, vector, x, verbose=verbose)
def pr_a3(p, a):
ld = len(str(p))
fields = int_fields(p)
for i in range(0, 3*fields, fields):
for j in range(fields):
print("%*d" % (ld,a[i+j]), end = " ")
print("")
def ref_op_t3(p, vector, exp):
fields = int_fields(p)
assert len(vector) == 3 * fields
exp = exp % 3
space = V(p)
result = [None] * (3 * fields)
for i in range(fields):
v = space()
for tag, j in [("A", 0), ("B", fields), ("C", 2 * fields)]:
v += int(vector[i + j]) * space(tag, 1, 0)
v *= G('t', exp)
for tag, j in [("A", 0), ("B", fields), ("C", 2 * fields)]:
result[i + j] = v[tag, 1, 0]
return result
def one_test_op_t3_function(p, vector, exp, verbose = 0):
if verbose:
print("\nTest op t3, exp = ", exp, ", v =\n")
pr_a3(p, vector)
vector = np.array(vector, dtype = np.int32) % p
obtained = op_t3_function(p, vector, exp, verbose = verbose)
expected = ref_op_t3(p, vector, exp)
ok = (obtained == expected).all()
if verbose or not ok:
if not ok:
s = "\nTest op t3 mod %d, exp = %d, v ="
print(s % (p, exp))
pr_a3(p,vector)
print("Result obtained:")
pr_a3(p, obtained)
if not ok:
print("Result expected:")
pr_a3(p, expected)
raise ValueError("Wrong result")
def op_p3_function_testcases(n_cases = 5):
for p in PRIMES:
fields = int_fields(p)
for i in range(3 * fields):
lst = [0] * (3 * fields)
lst[i] = 1
yield p, lst, 1
yield p, lst, 2
for i in range(n_cases):
for p in PRIMES:
fields = int_fields(p)
v = [randint(0, p - 1) for l in range(3 * fields)]
yield p, v, 1
yield p, v, 2
@pytest.mark.compiler
@pytest.mark.slow
@pytest.mark.hadamard
def test_op_p3_function(n_cases = 5, verbose = 0):
build_code()
print("Test operator t on tags 'ABC' done by 3 x 3 matrices")
for p, v, exp1 in op_p3_function_testcases(n_cases):
one_test_op_t3_function(p, v, exp1, verbose)
print("Test passed")
################################################################
# test symmetric operation xi done by 64 times 64 matrices
################################################################
msym4a = np.array(
[[1,1,1,1], [1,1,-1,-1], [1,-1,1,-1], [1,-1,-1,1]]
)
def op_xi_sym64_function(p, vector, verbose = 0):
assert vector.shape == (16, 24)
vector %= p
v1 = np.zeros( (16, 32), dtype = np.int32)
v1[0:16, 0:24] = vector
v1 = v1.reshape(16*32)
v0 = np.zeros(8, dtype = np.int8)
v2 = matrix_function("op_xi64", p, v1, [2], verbose=verbose)
v2 = v2.reshape( (16, 32) )
assert np.count_nonzero(v2[: , 24:]) == 0
return v2[: , :24] % p
def ref_op_xi_sym64(p, vector):
v = np.copy(vector)
for i in range(16):
for j in range(0, 24, 4):
v[i, j:j+4] = v[i, j:j+4].dot(msym4a)
for i in range(24):
v[:,i] = mat_l_16.dot(v[:,i])
q = pow((p + 1) >> 1, 3, p)
return v * q % p
def one_test_op_xi_sym64_function(p, vector, verbose = 0):
if verbose:
print("\nTest op lsym64 mod %d, v =\n" % p)
print(vector)
vector = np.array(vector, dtype = np.int32) % p
obtained = op_xi_sym64_function(p, vector, verbose = verbose)
expected = ref_op_xi_sym64(p, vector)
ok = (obtained == expected).all()
if verbose or not ok:
if not ok:
print("\nTest op xi sym64 mod %d, v =\n" % p)
print(vector)
print("Result obtained:")
print(obtained)
if not ok:
print("Result expected:")
print(expected)
raise ValueError("Wrong result")
def op_xi_sym64_function_testcases(n_cases = 5):
for p in PRIMES:
for i in range(16):
for j in range(24):
v = np.zeros((16,24), dtype = np.int32)
v[i, j] = 1
yield p, v
for i in range(n_cases):
for p in PRIMES:
v = np.zeros((16,24), dtype = np.int32)
for i in range(16):
for j in range(24):
v[i,j] = randint(0, p-1)
yield p, v
@pytest.mark.compiler
@pytest.mark.slow
@pytest.mark.hadamard
def test_op_xi_sym64_function(n_cases = 5, verbose = 0):
build_code()
print("Test part of operator xi done by 64 x 64 matrices")
for p, v in op_xi_sym64_function_testcases(n_cases):
one_test_op_xi_sym64_function(p, v, verbose)
print("Test passed")
################################################################
# test operation xi done by 64 times 64 matrices
################################################################
def op_xi64_function(p, vector, exp, verbose = 0):
exp = exp % 3
if exp == 0: return vector
x = [exp - 1]
assert vector.shape == (16, 24)
vector %= p
v1 = np.zeros( (16, 32), dtype = np.int32)
v1[0:16, 0:24] = vector
v1 = v1.reshape(16 * 32)
v2 = matrix_function("op_xi64", p, v1, x, verbose=verbose)
v2 = v2.reshape( (16, 32) )
assert np.count_nonzero(v2[: , 24:]) == 0
return v2[: , :24] % p
def ref_op_xi64(p, vector, exp):
exp = exp % 3
if exp == 0: return vector
space = V(p)
v = space(0)
for i in range(16):
for j in range(24):
v += int(vector[i,j]) * space('Y',i + 0x400, j)
v *= G('l', exp)
#print(v)
v1 = np.zeros( (16, 24), dtype = np.int32)
for i in range(16):
for j in range(24):
v1[i, j] = v['Y', i + 0x400, j]
return v1
def one_test_op_xi64_function(p, vector, exp, verbose = 0):
if verbose:
print("\nTest op xi 64 mod %d, exp = %d, v =\n" % (p, exp))
print(vector)
vector = np.array(vector, dtype = np.int32) % p
obtained = op_xi64_function(p, vector, exp, verbose = verbose)
expected = ref_op_xi64(p, vector, exp)
ok = (obtained == expected).all()
if verbose or not ok:
if not ok:
print("\nTest op xi 64 mod %d, exp = %d, v =\n" % (p, exp))
print(vector)
print("Result obtained:")
print(obtained)
if not ok:
print("Result expected:")
print(expected)
raise ValueError("Wrong result")
def op_xi64_function_testcases(n_cases = 5):
if n_cases >= 100:
for p in PRIMES:
for i in range(16):
for j in range(24):
v = np.zeros((16,24), dtype = np.int32)
v[i, j] = 1
yield p, v, 1
yield p, v, 2
for i in range(n_cases):
for p in PRIMES:
v = np.zeros((16,24), dtype = np.int32)
for i in range(16):
for j in range(24):
v[i,j] = randint(0, p-1)
yield p, v, 1
yield p, v, 2
@pytest.mark.compiler
@pytest.mark.slow
@pytest.mark.hadamard
def test_op_xi64_function(n_cases = 5, verbose = 0):
build_code()
print("Test operator xi on tags 'YZ' done by 64 x 64 matrices")
for p, v, exp in op_xi64_function_testcases(n_cases):
one_test_op_xi64_function(p, v, exp, verbose)
print("Test passed")
################################################################
# test operation xi done by 16 times 16 matrices
################################################################
def op_xi16_function(p, vector, exp, verbose = 0):
exp = exp % 3
if exp == 0: return vector
x = [exp - 1]
assert vector.shape == (4, 24)
vector %= p
v1 = np.zeros( (4, 32), dtype = np.int32)
v1[0:16, 0:24] = vector
v1 = v1.reshape(4 * 32)
v2 = matrix_function("op_xi16", p, v1, x, verbose=verbose)
v2 = v2.reshape( (4, 32) )
assert np.count_nonzero(v2[: , 24:]) == 0
return v2[: , :24] % p
def ref_op_xi16(p, vector, exp):
"""Generate reference vector for 16 1 14 operation on tag 'A'"""
assert (vector[:4,:4] == vector[:4,:4].T).all()
# The previous assertion is a rather subtile symmetry condition
# required for the python reference implementation to work.
# Function symmetrize_test_matrix(vector) forces that symmetry.
exp = exp % 3
if exp == 0: return vector
space = V(p)
v = space()
for i in range(4):
for j in range(24):
i0, j0 = max(i,j), min(i,j)
if j >= 4 or i >= j:
v += int(vector[i,j]) * space('A', i, j)
v *= G('l', exp)
#print(v)
v1 = np.zeros( (4, 24), dtype = np.int32)
for i in range(4):
for j in range(24):
i0, j0 = max(i,j), min(i,j)
v1[i, j] = v['A', i0, j0]
return v1
def one_test_op_xi16_function(p, vector, exp, verbose = 0):
if verbose:
print("\nTest op xi16 mod %d, exp = %d, v =\n" % (p, exp))
print(vector)
vector = np.array(vector, dtype = np.int32) % p
obtained = op_xi16_function(p, vector, exp, verbose = verbose)
expected = ref_op_xi16(p, vector, exp)
ok = (obtained == expected).all()
if verbose or not ok:
if not ok:
print("\nTest op xi 16 mod %d, exp = %d, v =\n" % (p, exp))
print(vector)
print("Result obtained:")
print(obtained)
if not ok:
print("Result expected:")
print(expected)
raise ValueError("Wrong result")
def symmetrize_test_matrix(vector, i = 0):
"""Force symmetry of 'vector' required for function ref_op_xi16()"""
i = i & ~ 3
for j in range(4):
for k in range(j):
vector[i + j, k] = vector[i + k, j]
def op_xi16_function_testcases(n_cases = 5):
if n_cases >= 0:
for p in [7] + PRIMES:
for i in range(4):
for j in range(24):
v = np.zeros((4,24), dtype = np.int32)
v[i, j] = 1
symmetrize_test_matrix(v)
yield p, v, 1
yield p, v, 2
for i in range(n_cases):
for p in PRIMES:
v = np.zeros((4,24), dtype = np.int32)
for i in range(4):
for j in range(24):
v[i,j] = randint(0, p-1)
symmetrize_test_matrix(v)
yield p, v, 1
yield p, v, 2
@pytest.mark.compiler
@pytest.mark.slow
@pytest.mark.hadamard
def test_op_xi16_function(n_cases = 5, verbose = 0):
build_code()
print("Test operator xi on tag 'A' done by 16 x 16 matrices")
for p, v, exp in op_xi16_function_testcases(n_cases):
one_test_op_xi16_function(p, v, exp, verbose)
print("Test passed")
################################################################
# Clean up
################################################################
################################################################
# Main test program
################################################################
N_CASES = 20
if __name__ == "__main__":
test_hadamard_function(N_CASES, verbose = 0)
test_xch_parity_function(N_CASES, verbose = 0)
test_op_p3_function(N_CASES, verbose = 0)
test_op_p64_function(N_CASES, verbose = 0)
test_op_xi16_function(N_CASES, verbose = 0)
test_op_xi_sym64_function(N_CASES, verbose = 0)
test_op_xi64_function(N_CASES, verbose = 0)
|
#
# PySNMP MIB module PPVPN-TC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/PPVPN-TC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:04:56 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, Integer32, NotificationType, Bits, Counter64, ObjectIdentity, MibIdentifier, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, iso, Gauge32, IpAddress, experimental, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Integer32", "NotificationType", "Bits", "Counter64", "ObjectIdentity", "MibIdentifier", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "iso", "Gauge32", "IpAddress", "experimental", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ppvpnTcMIB = ModuleIdentity((1, 3, 6, 1, 3, 1111))
ppvpnTcMIB.setRevisions(('2001-02-28 12:00',))
if mibBuilder.loadTexts: ppvpnTcMIB.setLastUpdated('200102281200Z')
if mibBuilder.loadTexts: ppvpnTcMIB.setOrganization('Provider Provisioned Virtual Private Networks Working Group.')
class VPNId(TextualConvention, OctetString):
reference = "RFC 2685, Fox & Gleeson, 'Virtual Private Networks Identifier', September 1999."
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 7)
mibBuilder.exportSymbols("PPVPN-TC-MIB", VPNId=VPNId, ppvpnTcMIB=ppvpnTcMIB, PYSNMP_MODULE_ID=ppvpnTcMIB)
|
#=========================================================================
# BehavioralRTLIR.py
#=========================================================================
"""Provide behavioral RTLIR AST node types.
This file is automatically generated by BehavioralRTLIRImplGen.py.
"""
class BaseBehavioralRTLIR:
"""Base class for all behavioral RTLIR AST nodes."""
def __eq__( s, other ):
return type(s) is type(other)
def __ne__( s, other ):
return not s.__eq__( other )
class CombUpblk( BaseBehavioralRTLIR ):
def __init__( s, name, body ):
s.name = name
s.body = body
def __eq__( s, other ):
if not isinstance(other, CombUpblk) or s.name != other.name:
return False
for x, y in zip( s.body, other.body ):
if x != y:
return False
return True
class SeqUpblk( BaseBehavioralRTLIR ):
def __init__( s, name, body ):
s.name = name
s.body = body
def __eq__( s, other ):
if not isinstance(other, SeqUpblk) or s.name != other.name:
return False
for x, y in zip( s.body, other.body ):
if x != y:
return False
return True
class Assign( BaseBehavioralRTLIR ):
def __init__( s, targets, value, blocking ):
s.targets = targets
s.value = value
s.blocking = blocking
def __eq__( s, other ):
if not isinstance(other, Assign) or s.value != other.value or s.blocking != other.blocking:
return False
for x, y in zip( s.targets, other.targets ):
if x != y:
return False
return True
class If( BaseBehavioralRTLIR ):
def __init__( s, cond, body, orelse ):
s.cond = cond
s.body = body
s.orelse = orelse
def __eq__( s, other ):
if not isinstance(other, If) or s.cond != other.cond:
return False
for x, y in zip( s.body, other.body ):
if x != y:
return False
for x, y in zip( s.orelse, other.orelse ):
if x != y:
return False
return True
class For( BaseBehavioralRTLIR ):
def __init__( s, var, start, end, step, body ):
s.var = var
s.start = start
s.end = end
s.step = step
s.body = body
def __eq__( s, other ):
if not isinstance(other, For) or s.var != other.var or s.start != other.start or s.end != other.end or s.step != other.step:
return False
for x, y in zip( s.body, other.body ):
if x != y:
return False
return True
class Number( BaseBehavioralRTLIR ):
def __init__( s, value ):
s.value = value
def __eq__( s, other ):
return isinstance(other, Number) and s.value == other.value
class Concat( BaseBehavioralRTLIR ):
def __init__( s, values ):
s.values = values
def __eq__( s, other ):
if not isinstance(other, Concat):
return False
for x, y in zip( s.values, other.values ):
if x != y:
return False
return True
class ZeroExt( BaseBehavioralRTLIR ):
def __init__( s, nbits, value ):
s.nbits = nbits
s.value = value
def __eq__( s, other ):
return isinstance(other, ZeroExt) and s.nbits == other.nbits and s.value == other.value
class SignExt( BaseBehavioralRTLIR ):
def __init__( s, nbits, value ):
s.nbits = nbits
s.value = value
def __eq__( s, other ):
return isinstance(other, SignExt) and s.nbits == other.nbits and s.value == other.value
class Reduce( BaseBehavioralRTLIR ):
def __init__( s, op, value ):
s.op = op
s.value = value
def __eq__( s, other ):
return isinstance(other, Reduce) and s.op == other.op and s.value == other.value
class SizeCast( BaseBehavioralRTLIR ):
def __init__( s, nbits, value ):
s.nbits = nbits
s.value = value
def __eq__( s, other ):
return isinstance(other, SizeCast) and s.nbits == other.nbits and s.value == other.value
class StructInst( BaseBehavioralRTLIR ):
def __init__( s, struct, values ):
s.struct = struct
s.values = values
def __eq__( s, other ):
if not isinstance(other, StructInst) or s.struct != other.struct:
return False
for x, y in zip( s.values, other.values ):
if x != y:
return False
return True
class IfExp( BaseBehavioralRTLIR ):
def __init__( s, cond, body, orelse ):
s.cond = cond
s.body = body
s.orelse = orelse
def __eq__( s, other ):
return isinstance(other, IfExp) and s.cond == other.cond and s.body == other.body and s.orelse == other.orelse
class UnaryOp( BaseBehavioralRTLIR ):
def __init__( s, op, operand ):
s.op = op
s.operand = operand
def __eq__( s, other ):
return isinstance(other, UnaryOp) and s.op == other.op and s.operand == other.operand
class BoolOp( BaseBehavioralRTLIR ):
def __init__( s, op, values ):
s.op = op
s.values = values
def __eq__( s, other ):
if not isinstance(other, BoolOp) or s.op != other.op:
return False
for x, y in zip( s.values, other.values ):
if x != y:
return False
return True
class BinOp( BaseBehavioralRTLIR ):
def __init__( s, left, op, right ):
s.left = left
s.op = op
s.right = right
def __eq__( s, other ):
return isinstance(other, BinOp) and s.left == other.left and s.op == other.op and s.right == other.right
class Compare( BaseBehavioralRTLIR ):
def __init__( s, left, op, right ):
s.left = left
s.op = op
s.right = right
def __eq__( s, other ):
return isinstance(other, Compare) and s.left == other.left and s.op == other.op and s.right == other.right
class Attribute( BaseBehavioralRTLIR ):
def __init__( s, value, attr ):
s.value = value
s.attr = attr
def __eq__( s, other ):
return isinstance(other, Attribute) and s.value == other.value and s.attr == other.attr
class Index( BaseBehavioralRTLIR ):
def __init__( s, value, idx ):
s.value = value
s.idx = idx
def __eq__( s, other ):
return isinstance(other, Index) and s.value == other.value and s.idx == other.idx
class Slice( BaseBehavioralRTLIR ):
def __init__( s, value, lower, upper, base = None, size = None ):
s.value = value
s.lower = lower
s.upper = upper
s.base = base
s.size = size
def __eq__( s, other ):
return isinstance(other, Slice) and s.value == other.value and s.lower == other.lower and s.upper == other.upper and s.base == other.base and s.size == other.size
class Base( BaseBehavioralRTLIR ):
def __init__( s, base ):
s.base = base
def __eq__( s, other ):
return isinstance(other, Base) and s.base == other.base
class LoopVar( BaseBehavioralRTLIR ):
def __init__( s, name ):
s.name = name
def __eq__( s, other ):
return isinstance(other, LoopVar) and s.name == other.name
class FreeVar( BaseBehavioralRTLIR ):
def __init__( s, name, obj ):
s.name = name
s.obj = obj
def __eq__( s, other ):
return isinstance(other, FreeVar) and s.name == other.name and s.obj == other.obj
class TmpVar( BaseBehavioralRTLIR ):
def __init__( s, name, upblk_name ):
s.name = name
s.upblk_name = upblk_name
def __eq__( s, other ):
return isinstance(other, TmpVar) and s.name == other.name and s.upblk_name == other.upblk_name
class LoopVarDecl( BaseBehavioralRTLIR ):
def __init__( s, name ):
s.name = name
def __eq__( s, other ):
return isinstance(other, LoopVarDecl) and s.name == other.name
class Invert( BaseBehavioralRTLIR ):
pass
class Not( BaseBehavioralRTLIR ):
pass
class UAdd( BaseBehavioralRTLIR ):
pass
class USub( BaseBehavioralRTLIR ):
pass
class And( BaseBehavioralRTLIR ):
pass
class Or( BaseBehavioralRTLIR ):
pass
class Add( BaseBehavioralRTLIR ):
pass
class Sub( BaseBehavioralRTLIR ):
pass
class Mult( BaseBehavioralRTLIR ):
pass
class Div( BaseBehavioralRTLIR ):
pass
class Mod( BaseBehavioralRTLIR ):
pass
class Pow( BaseBehavioralRTLIR ):
pass
class ShiftLeft( BaseBehavioralRTLIR ):
pass
class ShiftRightLogic( BaseBehavioralRTLIR ):
pass
class BitAnd( BaseBehavioralRTLIR ):
pass
class BitOr( BaseBehavioralRTLIR ):
pass
class BitXor( BaseBehavioralRTLIR ):
pass
class Eq( BaseBehavioralRTLIR ):
pass
class NotEq( BaseBehavioralRTLIR ):
pass
class Lt( BaseBehavioralRTLIR ):
pass
class LtE( BaseBehavioralRTLIR ):
pass
class Gt( BaseBehavioralRTLIR ):
pass
class GtE( BaseBehavioralRTLIR ):
pass
class BehavioralRTLIRNodeVisitor:
"""Class for behavioral RTLIR AST visitors."""
def visit( self, node, *args ):
method = 'visit_' + node.__class__.__name__
visitor = getattr( self, method, self.generic_visit )
return visitor( node, *args )
def generic_visit( self, node, *args ):
for field, value in vars(node).items():
if isinstance( value, list ):
for item in value:
if isinstance( item, BaseBehavioralRTLIR ):
self.visit( item, *args )
elif isinstance( value, BaseBehavioralRTLIR ):
self.visit( value, *args )
|
from django.contrib import admin
from django.urls import path
from eventlog import views
from eventlog.models import LoginSession, Event
class SessionAdmin(admin.ModelAdmin):
readonly_fields = ('id', 'user', 'startedAtTime', 'endedAtTime', 'userAgent')
list_filter= ('user', 'startedAtTime')
list_display = ('id', 'user', 'startedAtTime', 'endedAtTime')
ordering = ('-startedAtTime',)
class EventAdmin(admin.ModelAdmin):
readonly_fields = ('id', 'eventTime', 'actor', 'group', 'membership', 'parent_event_id', 'type', 'action', 'session',
'book_version_id', 'resource_href', 'resource_progression', 'page', 'control', 'value')
list_display = ('eventTime', 'actor', 'group_anon_id', 'type', 'action', 'page', 'control', 'value',
'book_version_id')
list_filter = ('actor__permission', 'eventTime')
ordering = ('-eventTime',)
change_list_template = 'eventlog/event_changelist.html'
def group_anon_id(self,obj):
return obj.group.anon_id if obj.group else None
group_anon_id.short_description = 'Group'
def get_urls(self):
urls = super().get_urls()
my_urls = [
path('download_csv/', views.event_log_report)
]
return my_urls + urls
admin.site.register(LoginSession, SessionAdmin)
admin.site.register(Event, EventAdmin)
|
import os
if not SENTRY_DSN: # type: ignore # noqa: F821
SENTRY_DSN = os.getenv('SENTRY_DSN')
if SENTRY_DSN:
import logging
import sentry_sdk
# TODO(dmu) HIGH: Enable Celery integration once Celery is added
# from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
handlers = LOGGING['root']['handlers'] # type: ignore # noqa: F821
if 'pre_sentry_handler' not in handlers:
try:
index = handlers.index('pre_sentry_handler')
except ValueError:
handlers.append('pre_sentry_handler')
else:
handlers.insert('pre_sentry_handler', index + 1)
console_handler_filters = LOGGING['handlers']['console']['filters'] # type: ignore # noqa: F821
if 'sentry' not in console_handler_filters:
console_handler_filters.append('sentry')
logging_integration = LoggingIntegration(
level=logging.DEBUG, # Breadcrumbs level
event_level=SENTRY_EVENT_LEVEL, # type: ignore # noqa: F821
)
sentry_sdk.init(
dsn=SENTRY_DSN,
debug=True,
send_default_pii=True,
# TODO(dmu) HIGH: Provide `release=...`,
request_bodies='medium',
integrations=(logging_integration, DjangoIntegration()),
)
|
#!_PYTHONLOC
#
# (C) COPYRIGHT 2006-2021 Al von Ruff, Ahasuerus and Dirk Stoecker
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
from isfdb import *
from common import *
from login import *
from SQLparsing import *
def PrintRecord(record, eccolor):
if not record:
return
if eccolor:
print '<tr align=left class="table1">'
else:
print '<tr align=left class="table2">'
# Watch out for votes for no-longer-existing titles
title_id = record[1]
title = SQLloadTitle(title_id)
if title:
title_link = ISFDBLink('title.cgi', title_id, title[TITLE_TITLE])
title_type = title[TITLE_TTYPE]
title_year = title[TITLE_YEAR]
else:
title_link = "<i>Title Deleted (id %d)</i>" % (title_id)
title_type = "-"
title_year = "-"
print '<td>%d</td>' % (record[3])
print '<td>%s</td>' % (title_link)
print '<td>%s</td>' % (title_type)
print '<td>%s</td>' % (title_year)
# Only display author(s) if there is a title
print '<td>'
if title:
authors = SQLTitleBriefAuthorRecords(title_id)
counter = 0
for author in authors:
if counter:
print ' <b>and</b> '
displayAuthorById(author[0], author[1])
counter += 1
else:
print "-"
print '</td>'
print '</tr>'
if __name__ == '__main__':
start = SESSION.Parameter(0, 'int', 0)
PrintHeader('My Votes')
PrintNavbar('myvotes', 0, 0, 'myvotes.cgi', 0)
(myID, username, usertoken) = GetUserData()
myID = int(myID)
if not myID:
print '<h3>You have to be logged in to view your votes</h3>'
PrintTrailer('votes', 0, 0)
sys.exit(0)
# Get the (next) set of votes. We join over the titles table to avoid picking
# up any votes for titles that have been deleted.
if start:
query = """select v.* from votes v, titles t
where v.user_id=%d
and t.title_id = v.title_id
order by v.rating desc, t.title_title
limit %d,50""" % (myID, start)
else:
query = """select v.* from votes v, titles t
where v.user_id=%d
and t.title_id = v.title_id
order by v.rating desc, t.title_title
limit 50""" % (myID)
db.query(query)
result = db.store_result()
if result.num_rows() == 0:
print '<h3>No votes present</h3>'
PrintTrailer('votes', 0, 0)
sys.exit(0)
print '<table class="vote_table">'
print '<tr class="table1">'
print '<th>Vote</th>'
print '<th>Title</th>'
print '<th>Type</th>'
print '<th>Year</th>'
print '<th>Author</th>'
print '</tr>'
record = result.fetch_row()
color = 0
while record:
PrintRecord(record[0], color)
color = color ^ 1
record = result.fetch_row()
print '</table>'
print '<p>'
print ISFDBLinkNoName('myvotes.cgi', start+50, 'MORE', True)
PrintTrailer('votes', 0, 0)
|
from application import *
from database.models import AccountData
from database.database import SessionLocal
import uuid
from hashlib import sha256, sha512
import time
class Account:
def __init__(self, request):
self.db = SessionLocal()
self.request = request
@staticmethod
def exists_or_not_found(email, db):
"""
if account does not exists then,
return bad request 400
"""
self.db_account = self.db.query(AccountData.account_uuid).filter(AccountData.email == email).one_or_none()
if self.db_account is None:
return bad_request({"account_not_found_exception":"account not found"})
def register(self, request):
def validate_inputs():
exception, reason = validate_data()
response = ok()
if exception != 'success':
response = bad_request({exception:reason})
return response
def validate_data():
if len(self.password) < 9 or len(self.password) > 200:
return ("password_length_exception", "password can not be shorter than 9 characters or longer than 200")
elif self.password.isalnum():
return ("password_special_char_missing_exception", "password is missing a special character")
elif not any(char.isdigit() for char in self.password):
return ("password_num_char_missing_exception", "password is missing a numerical character")
elif not any(char.isupper() for char in self.password):
return ("password_upper_char_missing_exception", "password is missing an uppercase character")
elif not any(char.islower() for char in self.password):
return ("password_lower_char_missing_exception", "password is missing a lowercase character")
elif not self.display_name.isalnum():
return ("string_invalid_char_exception", "display_name has an invalid char")
elif len(self.display_name) < 6:
return ("string_underproduce_exception", "display_name has to be atleast 6 characters")
elif len(self.display_name) > 15:
return ("string_exceeds_exception", "display_name has to be atmost 15 characters")
elif re.search('\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,}\b', self.email):
return ("email_invalid_exception", "email entered is invalid")
self.db_account = self.db.query(exists().where(AccountData.email == self.email)).scalar()
if self.db_account:
return ("duplicate_email_exception", "email already registered")
self.db_account = self.db.query(exists().where(AccountData.display_name == self.display_name)).scalar()
if self.db_account:
return ("duplicate_display_name_exception", "display name already registered")
return ("success","success")
def generate_uuid() -> str:
"""
generates unique identifier
"""
return str(uuid.uuid4())
def generate_salt() -> str:
"""
to make the hashed password secure
in case of a databreach, makes it
close to imposible for a dictionary
attack to occur
"""
return str(uuid.uuid4())[:12]
def hashed_password(password, salt) -> str:
"""
hashes the user given password
with salt for maximum security
"""
return sha512((password+salt).encode("utf-8")).hexdigest()
self.input = request.value
self.email = self.input.email.lower()
self.display_name = self.input.display_name.lower()
self.password = self.input.password
response = validate_inputs()
if response.status != 200:
return response
self.salt = generate_salt()
self.uuid = generate_uuid()
self.hashed_password = hashed_password(self.password, self.salt)
self.time = int(time.time())
self.db_account = AccountData(
account_uuid = self.uuid,
email = self.email,
password = self.hashed_password,
display_name = self.display_name,
salt = self.salt,
account_type = 0,
time_stamp_created = self.time,
)
self.db.add(self.db_account)
self.db.commit()
self.db.refresh(self.db_account)
self.db_account = self.db_account.__dict__
del self.db_account["_sa_instance_state"]
for attribute in self.db_account:
self.request.session[attribute] = self.db_account[attribute]
return ok({"uuid": self.uuid})
def login(self, request):
def compare_password(password, salt) -> str:
"""
tests the user entered password combined with the salt
to see if the one on db's password and entered password are
the exact same
"""
return sha512((password + salt).encode("utf-8")).hexdigest()
self.input = request.value
self.email = self.input.email.lower()
self.password = self.input.password
self.db_account = self.db.query(AccountData).filter(AccountData.email == self.email).one_or_none()
if self.db_account:
self.db_account = self.db_account.__dict__
del self.db_account["_sa_instance_state"]
if self.db_account["password"] == compare_password(self.password, self.db_account["salt"]):
for attribute in self.db_account:
self.request.session[attribute] = self.db_account[attribute]
return ok({"uuid": self.db_account["account_uuid"]})
return bad_request({"wrong_password_exception":"wrong password"})
return bad_request({"email_not_found_exception":"email not found"})
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import urllib2
import urllib
import logging
class Reputation(object):
def __init__(self,conf, logger=None):
self._fb_app_id = conf['app_id']
self._fb_app_secret = conf['app_secret']
self._logger = logging.getLogger('OA.FB') if logger else Util.get_logger('OA.FB',create_file=False)
def check(self, ips=None, urls=None, cat=False):
self._logger.info("Threat-Exchange reputation check starts...")
reputation_dict = {}
data = []
if ips is not None:
values = ips
qtype = 'IP_ADDRESS'
getopt = 'GET_IP_'
elif urls is not None:
values = urls
qtype = 'DOMAIN'
getopt = 'GET_NAME_'
else:
self._logger.info("Need either an ip or an url to check reputation.")
return reputation_dict
for val in values:
query_params = urllib.urlencode({
'type': qtype,
'text': val
})
indicator_request = {
'method': 'GET',
'name': "{0}{1}".format(getopt, val.replace('.', '_')),
'relative_url': "/v2.4/threat_indicators?{0}".format(query_params)
}
descriptor_request = {
'method': 'GET',
'relative_url': '/v2.4/{result=' + getopt + val.replace('.', '_') + ':$.data.*.id}/descriptors'
}
data.append(indicator_request)
data.append(descriptor_request)
reputation_dict.update(self._request_reputation(data, val))
data = []
if len(data) > 0:
reputation_dict.update(self._request_reputation(data))
return reputation_dict
def _request_reputation(self, data, name):
reputation_dict = {}
token = "{0}|{1}".format(self._fb_app_id, self._fb_app_secret)
request_body = {
'access_token': token,
'batch': data
}
request_body = urllib.urlencode(request_body)
url = "https://graph.facebook.com/"
content_type = {'Content-Type': 'application/json'}
request = urllib2.Request(url, request_body, content_type)
try:
str_response = urllib2.urlopen(request).read()
response = json.loads(str_response)
except urllib2.HTTPError as e:
self._logger.info("Error calling ThreatExchange in module fb: " + e.message)
reputation_dict[name] = self._get_reputation_label('UNKNOWN')
return reputation_dict
for row in response:
if row is None:
continue
if row['code'] != 200:
reputation_dict[name] = self._get_reputation_label('UNKNOWN')
return reputation_dict
if 'body' in row:
try:
row_response = json.loads(row['body'])
except ValueError as e:
self._logger.error("Error reading JSON body response in fb module: " + e.message)
if 'data' in row_response and row_response['data'] != []:
row_response_data = row_response['data']
name = row_response_data[0]['indicator']['indicator']
reputation_dict[name] = self._get_reputation_label(row_response_data[0]['status'])
else:
reputation_dict[name] = self._get_reputation_label('UNKNOWN')
else:
reputation_dict[name] = self._get_reputation_label('UNKNOWN')
return reputation_dict
def _get_reputation_label(self,reputation):
if reputation == 'UNKNOWN':
return "fb:UNKNOWN:-1"
elif reputation == 'NON_MALICIOUS':
return "fb:NON_MALICIOUS:0"
elif reputation == 'SUSPICIOUS':
return "fb:SUSPICIOUS:2"
elif reputation == 'MALICIOUS':
return "fb:MALICIOUS:3"
|
import json
def get_stored_username():
"""Get Stored Username"""
filename = 'numbers.json'
try:
with open(filename) as file_object:
username = json.load(file_object)
except FileNotFoundError:
return None
else:
return username
def get_new_username():
username = input("What is your name? ")
filename = 'numbers.json'
with open(filename, 'w') as file_object:
json.dump(username, file_object)
return username
def greet_user():
"""Greet the User by Name"""
username = get_stored_username()
if username:
ask = input("Are you " + username + "?")
if ask == 'yes':
print("Welcome back, " + username + "!")
else:
get_new_username()
else:
username = get_new_username()
print("We'll remmeber you when you come back, " + username + "!")
greet_user()
|
from os import remove, sep
from os.path import isfile
from queue import Queue
from parse_audio import validate, youtube_download
class Track:
def __init__(self, chunk_size, filename=None, url=None):
if filename and isfile(filename):
self.trackname, self.filename, self.length = validate(filename)
elif url:
self.trackname, self.filename, self.length = youtube_download(url)
else:
self.filename = None
self.trackname = 'LIVE'
if self.filename:
self.file_reader = open(self.filename, 'rb')
self.chunk_size = chunk_size
self.chunk_queue = Queue()
self.read = False
self.num_chunks = 0
def read_file(self):
chunk = self.file_reader.read(self.chunk_size)
while chunk:
self.chunk_queue.put(chunk)
chunk = self.file_reader.read(self.chunk_size)
self.num_chunks += 1
self.delete_file()
self.read = True
def delete_file(self):
try:
remove(self.filename)
except FileNotFoundError: # file already removed
print(f'couldn\'t delete {self.trackname}|{self.filename}')
def add_chunk(self, chunk):
"""
Used only for speaker/microphone
"""
self.chunk_queue.put(chunk)
def get_filename(self):
return self.filename
def get_length(self):
return self.length
def get_num_chunks(self):
return self.num_chunks
def get_chunk_queue(self):
return self.chunk_queue
def set_chunk_queue(self, queue):
self.chunk_queue = queue
def get_read(self):
return self.read
def set_trackname(self, name):
self.trackname = name
def get_trackname(self):
if self.trackname:
return self.trackname
else:
return self.filename[self.filename.rindex(sep) + 1:self.filename.rindex(' temp')]
def __repr__(self):
return self.get_trackname()
|
antigos = float(input('digite o valor o salário: '))
rj = (antigos*15)/100
ns = antigos + rj
print('O salário antigo é R${}'.format(antigos))
print('O novo salário é R${}'.format(ns))
print('Recebeu um reajuste de R${}'.format(rj))
|
#!/usr/bin/env python
# encoding: utf-8
import json
from flask import Flask, request, jsonify
from flask_mongoengine import MongoEngine
app = Flask(__name__)
app.config['MONGODB_SETTINGS'] = {
'db': 'your_database',
'host': 'localhost',
'port': 27017
}
db = MongoEngine()
db.init_app(app)
class User(db.Document):
name = db.StringField()
email = db.StringField()
def to_json(self):
return {"name": self.name,
"email": self.email}
@app.route('/', methods=['GET'])
def query_records():
name = request.args.get('name')
user = User.objects(name=name).first()
if not user:
return jsonify({'error': 'data not found'})
else:
return jsonify(user.to_json())
@app.route('/', methods=['PUT'])
def create_record():
record = json.loads(request.data)
user = User(name=record['name'],
email=record['email'])
user.save()
return jsonify(user.to_json())
@app.route('/', methods=['POST'])
def update_record():
record = json.loads(request.data)
user = User.objects(name=record['name']).first()
if not user:
return jsonify({'error': 'data not found'})
else:
user.update(email=record['email'])
return jsonify(user.to_json())
@app.route('/', methods=['DELETE'])
def delete_record():
record = json.loads(request.data)
user = User.objects(name=record['name']).first()
if not user:
return jsonify({'error': 'data not found'})
else:
user.delete()
return jsonify(user.to_json())
if __name__ == "__main__":
app.run(debug=True)
|
"""
This module create HTML template for displaying videos
Example
$python template.py --startI 10 --end 40
create HTML file which presenets images with index between 10 and 40
"""
import os
from jinja2 import Environment, FileSystemLoader
from dataloader import MITDataLoader
ROOT_DIR = os.path.join("/", *os.path.abspath(__file__).split("/")[:-2])
class HtmlBuilder():
def __init__(self):
self.env = Environment(
loader=FileSystemLoader(
os.path.join(ROOT_DIR, "template"), encoding='utf8'))
self.tpl = self.env.get_template("template.html")
def __call__(self, data_iter, save=True, save_path="index.html"):
html = self.tpl.render({"data_iter": data_iter})
print(html)
if save:
with open(os.path.join(ROOT_DIR, save_path), "w") as f:
f.write(html)
if __name__ == "__main__":
builder = HtmlBuilder()
dataloader = MITDataLoader()
index_filtered = dataloader.filter_data(
lambda data: data.has_object_label("cat"))
builder(index_filtered)
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for translation_helper.py."""
import unittest
import os
import sys
import translation_helper
here = os.path.realpath(__file__)
testdata_path = os.path.normpath(os.path.join(here, '..', '..', 'testdata'))
class TcHelperTest(unittest.TestCase):
def test_get_translatable_grds(self):
grds = translation_helper.get_translatable_grds(
testdata_path, ['test.grd', 'not_translated.grd', 'internal.grd'],
os.path.join(testdata_path,
'translation_expectations_without_unlisted_file.pyl'))
self.assertEqual(1, len(grds))
# There should be no references to not_translated.grd (mentioning the
# filename here so that it doesn't appear unused).
grd = grds[0]
self.assertEqual(os.path.join(testdata_path, 'test.grd'), grd.path)
self.assertEqual(testdata_path, grd.dir)
self.assertEqual('test.grd', grd.name)
self.assertEqual([os.path.join(testdata_path, 'part.grdp')], grd.grdp_paths)
self.assertEqual([], grd.structure_paths)
self.assertEqual([os.path.join(testdata_path, 'test_en-GB.xtb')],
grd.xtb_paths)
self.assertEqual({'en-GB': os.path.join(testdata_path, 'test_en-GB.xtb')},
grd.lang_to_xtb_path)
self.assertTrue(grd.appears_translatable)
self.assertEquals(['en-GB'], grd.expected_languages)
# The expectations list an untranslatable file (not_translated.grd), but the
# grd list doesn't contain it.
def test_missing_untranslatable(self):
TRANSLATION_EXPECTATIONS = os.path.join(
testdata_path, 'translation_expectations_without_unlisted_file.pyl')
with self.assertRaises(Exception) as context:
translation_helper.get_translatable_grds(
testdata_path, ['test.grd', 'internal.grd'], TRANSLATION_EXPECTATIONS)
self.assertEqual(
'%s needs to be updated. Please fix these issues:\n'
' - not_translated.grd is listed in the translation expectations, '
'but this grd file does not exist.' % TRANSLATION_EXPECTATIONS,
str(context.exception))
# The expectations list an internal file (internal.grd), but the grd list
# doesn't contain it.
def test_missing_internal(self):
TRANSLATION_EXPECTATIONS = os.path.join(
testdata_path, 'translation_expectations_without_unlisted_file.pyl')
with self.assertRaises(Exception) as context:
translation_helper.get_translatable_grds(
testdata_path, ['test.grd', 'not_translated.grd'],
TRANSLATION_EXPECTATIONS)
self.assertEqual(
'%s needs to be updated. Please fix these issues:\n'
' - internal.grd is listed in translation expectations as an internal '
'file to be ignored, but this grd file does not exist.' %
TRANSLATION_EXPECTATIONS, str(context.exception))
# The expectations list a translatable file (test.grd), but the grd list
# doesn't contain it.
def test_missing_translatable(self):
TRANSLATION_EXPECTATIONS = os.path.join(
testdata_path, 'translation_expectations_without_unlisted_file.pyl')
with self.assertRaises(Exception) as context:
translation_helper.get_translatable_grds(
testdata_path, ['not_translated.grd', 'internal.grd'],
TRANSLATION_EXPECTATIONS)
self.assertEqual(
'%s needs to be updated. Please fix these issues:\n'
' - test.grd is listed in the translation expectations, but this grd '
'file does not exist.' % TRANSLATION_EXPECTATIONS,
str(context.exception))
# The grd list contains a file (part.grdp) that's not listed in translation
# expectations.
def test_expectations_not_updated(self):
TRANSLATION_EXPECTATIONS = os.path.join(
testdata_path, 'translation_expectations_without_unlisted_file.pyl')
with self.assertRaises(Exception) as context:
translation_helper.get_translatable_grds(
testdata_path,
['test.grd', 'part.grdp', 'not_translated.grd', 'internal.grd'],
TRANSLATION_EXPECTATIONS)
self.assertEqual(
'%s needs to be updated. Please fix these issues:\n'
' - part.grdp appears to be translatable (because it contains <file> '
'or <message> elements), but is not listed in the translation '
'expectations.' % TRANSLATION_EXPECTATIONS, str(context.exception))
if __name__ == '__main__':
unittest.main()
|
class VCenterDeployVMFromLinkedCloneResourceModel(object):
def __init__(self):
self.vcenter_vm = ''
self.vcenter_vm_snapshot = ''
self.vm_cluster = ''
self.vm_storage = ''
self.ip_regex = ''
self.vm_resource_pool = ''
self.vm_location = ''
self.auto_power_on = True
self.auto_power_off = True
self.wait_for_ip = True
self.auto_delete = True
self.autoload = True
self.refresh_ip_timeout = 0
self.behavior_during_save = ''
|
import uuid
import numpy.testing as npt
from skyportal.tests import api
def test_candidate_list(view_only_token, public_candidate):
status, data = api("GET", "candidates", token=view_only_token)
assert status == 200
assert data["status"] == "success"
def test_token_user_retrieving_candidate(view_only_token, public_candidate):
status, data = api(
"GET", f"candidates/{public_candidate.id}", token=view_only_token
)
assert status == 200
assert data["status"] == "success"
print(data["data"])
assert all(
k in data["data"]
for k in ["ra", "dec", "redshift"]
)
def test_token_user_update_candidate(manage_sources_token, public_candidate):
status, data = api(
"PATCH",
f"candidates/{public_candidate.id}",
data={
"ra": 234.22,
"dec": -22.33,
"redshift": 3,
"transient": False,
"ra_dis": 2.3,
},
token=manage_sources_token,
)
assert status == 200
assert data["status"] == "success"
status, data = api(
"GET", f"candidates/{public_candidate.id}", token=manage_sources_token
)
assert status == 200
assert data["status"] == "success"
npt.assert_almost_equal(data["data"]["ra"], 234.22)
npt.assert_almost_equal(data["data"]["redshift"], 3.0)
def test_cannot_update_candidate_without_permission(view_only_token, public_candidate):
status, data = api(
"PATCH",
f"candidates/{public_candidate.id}",
data={
"ra": 234.22,
"dec": -22.33,
"redshift": 3,
"transient": False,
"ra_dis": 2.3,
},
token=view_only_token,
)
assert status == 400
assert data["status"] == "error"
def test_token_user_post_new_candidate(
upload_data_token, view_only_token, public_filter
):
candidate_id = str(uuid.uuid4())
status, data = api(
"POST",
"candidates",
data={
"id": candidate_id,
"ra": 234.22,
"dec": -22.33,
"redshift": 3,
"transient": False,
"ra_dis": 2.3,
"filter_ids": [public_filter.id],
},
token=upload_data_token,
)
assert status == 200
assert data["data"]["id"] == candidate_id
status, data = api("GET", f"candidates/{candidate_id}", token=view_only_token)
assert status == 200
assert data["data"]["id"] == candidate_id
npt.assert_almost_equal(data["data"]["ra"], 234.22)
def test_cannot_add_candidate_without_filter_id(upload_data_token):
candidate_id = str(uuid.uuid4())
status, data = api(
"POST",
"candidates",
data={
"id": candidate_id,
"ra": 234.22,
"dec": -22.33,
"redshift": 3,
"transient": False,
"ra_dis": 2.3,
},
token=upload_data_token,
)
assert status == 400
|
import appdaemon.plugins.hass.hassapi as hass
from ping3 import ping
class Backup(hass.Hass):
def initialize(self):
pass
# self.log('initializing backup script')
# self.notify('initializing backup script', title='appdaemon: Backup', name='pushbullet')
# self.run_every(self.check_primary, "now", 5 * 60)
def check_primary(self, kwargs):
pass
# try:
# primary_host = ping('192.168.2.13')
# self.log(f"primary host response time: {primary_host}")
# # check if the primary home assistant is up. If not, restart it
# if not primary_host:
# self.log('Home assistant is down')
# # don't wake us up in the middle of night, or the kids
# if not self.now_is_between("00:00:00", "09:00:00"):
# return
# # noinspection PyUnresolvedReferences
# # self.get_app('notify').notify('Home assistant is down', self.args['speaker'])
# self.notify('initializing backup script', title='appdaemon: Backup', name='pushbullet')
# # self.turn_off('switch.home_assistant')
# # self.turn_on('switch.home_assistant')
# except Exception as ex:
# print(ex)
def terminate(self):
self.log('Terminating Backup App')
|
#!/usr/bin/python3
safe_print_integer_err = \
__import__('100-safe_print_integer_err').safe_print_integer_err
value = 89
has_been_print = safe_print_integer_err(value)
if not has_been_print:
print("{} is not an integer".format(value))
value = -89
has_been_print = safe_print_integer_err(value)
if not has_been_print:
print("{} is not an integer".format(value))
value = "Holberton"
has_been_print = safe_print_integer_err(value)
if not has_been_print:
print("{} is not an integer".format(value))
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import compas
from compas.geometry import dot_vectors
from compas.utilities import i_to_blue
from compas_rhino.helpers import mesh_from_surface
from compas_rhino.helpers import mesh_select_face
from compas_3gs.algorithms import cell_arearise_face
from compas_3gs.diagrams import Cell
from compas_3gs.operations import cell_relocate_face
from compas_3gs.rhino import MeshConduit
try:
import rhinoscriptsyntax as rs
except ImportError:
compas.raise_if_ironpython()
__author__ = 'Juney Lee'
__copyright__ = 'Copyright 2019, BLOCK Research Group - ETH Zurich'
__license__ = 'MIT License'
__email__ = 'juney.lee@arch.ethz.ch'
# ------------------------------------------------------------------------------
# 1. make cell from rhino polysurfaces
# ------------------------------------------------------------------------------
layer = 'cell'
guid = rs.GetObject("select a closed polysurface", filter=rs.filter.polysurface)
rs.HideObjects(guid)
cell = mesh_from_surface(Cell, guid)
cell.draw()
# ------------------------------------------------------------------------------
# 2. Target area
# ------------------------------------------------------------------------------
fkey = mesh_select_face(cell)
area = cell.face_area(fkey)
center = cell.face_centroid(fkey)
normal = cell.face_normal(fkey)
target_area = rs.GetReal("Enter target area", number=area)
# ------------------------------------------------------------------------------
# 3. Arearise cell face
# ------------------------------------------------------------------------------
# conduit
conduit = MeshConduit(cell)
def callback(cell, args):
current_area = cell.face_area(fkey)
color = i_to_blue(abs(current_area - target_area) / target_area)
conduit.face_colordict = {fkey: color}
time.sleep(0.05)
conduit.redraw()
with conduit.enabled():
cell_arearise_face(cell,
fkey,
target_area,
callback=callback)
# ------------------------------------------------------------------------------
# 4. Check result
# ------------------------------------------------------------------------------
new_area = cell.face_area(fkey)
new_normal = cell.face_normal(fkey)
if dot_vectors(normal, new_normal) < 0:
new_area *= -1
if abs(new_area - target_area) > 1:
print('===================================================================')
print('')
print('Arearisation attempted, but did not converge...')
print('It is likely that the target area is not valid / inexistent...')
print('')
print('===================================================================')
cell_relocate_face(cell, fkey, center, normal)
# ------------------------------------------------------------------------------
# 5. Draw result
# ------------------------------------------------------------------------------
cell.draw()
|
import os
"""
Logic to check if a dataset is valid
Parameters
----------
dataset_folder: str
folder of the dataset
labels_type: str
labels_type
Returns
-------
Boolean
true if the dataset is valid, false otherwise
"""
def validate_dataset(dataset_folder, labels_type):
valid = True
valid = True if os.path.isdir(dataset_folder+'/images') else False
if valid == True:
valid = True if os.path.isdir(dataset_folder+'/labels') else False
if valid == True:
valid = True if os.path.isdir(dataset_folder+'/labels/'+labels_type) else False
if valid ==True:
valid = True if os.path.isfile(dataset_folder+'/objectclasses.json') else False
if valid == True:
images = os.listdir(dataset_folder+'/images')
for image in images:
image = image.lower()
if not (image.endswith('.png') or image.endswith('.jpg') or image.endswith('.jpeg')):
valid = False
break
if valid == True:
labels = os.listdir(dataset_folder+'/labels/'+labels_type)
extension = None
if labels_type == 'json':
extension = '.json'
elif labels_type == 'pascal':
extension = '.xml'
for label in labels:
if not (label.lower().endswith(extension)):
valid = False
break
return valid
"""
Logic to check labels types for a dataset
Parameters
----------
dataset_folder: str
folder of the dataset
Returns
-------
list of str
supported label types
"""
def dataset_label_types(dataset_folder):
supported_types = {"json", "pascal"}
found_types = []
label_types = os.listdir(dataset_folder+'/labels')
print(label_types)
for label_type in label_types:
if label_type == "json" or label_type == "pascal":
found_types.append(label_type)
intersected_types = list(supported_types.intersection(set(found_types)))
return intersected_types
|
# -*- coding: utf-8 -*-
'''
Created on Mar 27, 2017
@author: hustcc
'''
from warpart import app
# from gevent.wsgi import WSGIServer
# from gevent import monkey
# monkey.patch_all() # patch
def runserver(port=10028, debug=False):
app.run('0.0.0.0', port, debug=debug, threaded=False)
# http_server = WSGIServer(('0.0.0.0', port), app)
# http_server.serve_forever()
if __name__ == '__main__':
runserver(debug=True)
|
import unittest
import firstlib
class TestFirstLib(unittest.TestCase):
def testFunction(self):
self.assertEqual(3, 3, "3 and 3 are indeed equal... aren't they?")
# assertTrue()
# assertRaises()
self.assertEqual(firstlib.analysis.test(), 1, "test not working...")
# aslo available: @unittest.skip(reason) and @unittest.skipIf(condition, reason)
@unittest.expectedFailure
def testError(self):
self.assertTrue(0/0, "I can divide 0 times 0!!")
if __name__ == "__main__":
unittest.main()
|
from imdbmovie_scrap import *
from pprint import pprint
a = scrap_top_list()
def movis():
realeas_yr=[]
for i in a:
if i['years'] not in realeas_yr:
realeas_yr.append(i['years'])
realeas_yr=sorted(realeas_yr)
movie_dict={i:[] for i in realeas_yr}
for i in a:
yr=i['years']
for x in movie_dict:
if str(x)==str(yr):
movie_dict[x].append(i)
return movie_dict
c=movis()
pprint(c)
# f=open('task2.json','w')
# json.dump(c,f,indent=4)
# f.close()
|
# Generated by Django 2.0.13 on 2019-04-11 03:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sis_provisioner', '0005_auto_20190408_1712'),
]
operations = [
migrations.DeleteModel(
name='EmployeeAppointment',
),
migrations.DeleteModel(
name='UwBridgeUser',
),
]
|
"""Implementation of the SSL Adapter for the TLS Pool.
When you use cheroot directly, you can specify an
`ssl_adapter` set to an instance of this class.
Using the WSGI standard example, you might adapt
it thusly:
from cheroot import wsgi
from cheroot.ssl.tlspooladapter import TLSPoolAdapter
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return [b'Hello world!\r\nThis is the TLS Pool variant of cheroot\r\n']
addr = '0.0.0.0', 8070
server = wsgi.Server(addr, my_crazy_app, server_name='tlspool.arpa2.lab')
server.ssl_adapter = TLSPoolAdapter ('tlspool.arpa2.lab')
server.start()
In comparison to the standard WSGI server, you have added
one `import` line and set the `server.ssl_adaptor` to an
instance of the plugin class defined in this module.
The idea of the TLS Pool is to isolate long-term credentials
from application code. This allows code to be more freely
developed, and an occasional security breach to never get to
the crown jewels of the site, thanks to the strict separation
of processes maintained by the operating system. It also
allows central management of credentials, which is a nuisance
up to a point of a certain scale, where it becomes divine.
The intention of the InternetWide Architecture and specifically
the IdentityHub is to automate so much of the flow surrounding
the TLS Pool that this point of divinity is at zero. More on
http://internetwide.org -- yes, insecure as it is just a blog,
so the burden of management is avoided until our tools make
it a breeze!
"""
from . import Adapter
from ..makefile import StreamReader, StreamWriter
import tlspool
try:
from _pyio import DEFAULT_BUFFER_SIZE
except ImportError:
try:
from io import DEFAULT_BUFFER_SIZE
except ImportError:
DEFAULT_BUFFER_SIZE = -1
class TLSPoolAdapter (Adapter):
"""The TLS Pool is a separate daemon implementing TLS in a
separate process, so as to keep long-term credentials
and the management of TLS away from application logic.
This is perfect for a dynamic, pluggable environment
that might integrate scripts from a variety of mildly
unknown sources. It is generally good to contain the
problems resulting from an application breach.
"""
def __init__ (self, server_name):
"""Initialise this object and ignore the customary
things: cert, key, chain, ciphers are all handled
by the TLS Pool, so we can be blissfully ignorant.
This __init__() function is not the usual one
being called; instead, the Adapter base class
promises a function with four arguments that
is normally called by the environment.
"""
self.server_name = server_name
def __init__ (self, server_name, *mooh):
"""The other plugins in this directory expect 4 args,
namely cert, key, chain, ciphers. This information
is moot (or mooh) to this Adapter because management
issues like that are centralised to the TLS Pool.
You are however permitted to provide all four args,
where the first (usually the certificate path) is
interpreted as the server name. The TLS Pool will
look for a certificate to go with that.
"""
self.__init__ (server_name)
def bind (self, sock):
"""Wrap and return the socket.
TODO: Wrapping is not done here, as in Builtin?!?
"""
return super (TLSPoolAdapter,self).bind (sock)
def wrap (self, extsock):
"""Wrap the given socket in TLS and return the result,
along with WSGI environment variables in a tuple.
"""
fl = ( tlspool.PIOF_STARTTLS_LOCALROLE_SERVER |
tlspool.PIOF_STARTTLS_REMOTEROLE_CLIENT |
tlspool.PIOF_STARTTLS_IGNORE_REMOTEID )
hdl = tlspool.Connection (extsock, service='http', flags=fl)
hdl.tlsdata.localid = self.server_name
intsock = hdl.starttls ()
env = {
'wsgi.url_scheme': 'https',
'HTTPS': 'on',
'LOCAL_USER': hdl.tlsdata.localid,
'REMOTE_USER': hdl.tlsdata.remoteid,
}
return intsock, env
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
"""Return socket file object."""
cls = StreamReader if 'r' in mode else StreamWriter
return cls(sock, mode, bufsize)
def get_environ (self, sock):
"""Return WSGI variables to be merged into each request.
"""
return {
'wsgi.url_scheme': 'https',
'HTTPS': 'on',
}
|
from funcx.executors.high_throughput.executor import HighThroughputExecutor
__all__ = ['HighThroughputExecutor']
|
from functools import wraps
def singleton(cls):
"""装饰器,被装饰的类为单例模式"""
instances = {}
@wraps(cls)
def getinstance(*args, **kw):
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return getinstance
|
from __future__ import unicode_literals
from functools import reduce
from sys import stdout
from django.db.models import Model
from django.db.models.query import QuerySet
from django.utils import six
from .utils import (
get_rendition_key_set,
get_url_from_image_key,
validate_versatileimagefield_sizekey_list
)
def cli_progress_bar(start, end, bar_length=50):
"""
Prints out a Yum-style progress bar (via sys.stdout.write).
`start`: The 'current' value of the progress bar.
`end`: The '100%' value of the progress bar.
`bar_length`: The size of the overall progress bar.
Example output with start=20, end=100, bar_length=50:
[###########----------------------------------------] 20/100 (100%)
Intended to be used in a loop. Example:
end = 100
for i in range(end):
cli_progress_bar(i, end)
Based on an implementation found here:
http://stackoverflow.com/a/13685020/1149774
"""
percent = float(start) / end
hashes = '#' * int(round(percent * bar_length))
spaces = '-' * (bar_length - len(hashes))
stdout.write(
"\r[{0}] {1}/{2} ({3}%)".format(
hashes + spaces,
start,
end,
int(round(percent * 100))
)
)
stdout.flush()
class VersatileImageFieldWarmer(object):
"""
A class for creating sets of images from a VersatileImageField
"""
def __init__(self, instance_or_queryset,
rendition_key_set, image_attr, verbose=False):
"""
Arguments:
`instance_or_queryset`: A django model instance or QuerySet
`rendition_key_set`: Either a string that corresponds to a key on
settings.VERSATILEIMAGEFIELD_RENDITION_KEY_SETS
or an iterable
of 2-tuples, both strings:
[0]: The 'name' of the image size.
[1]: A VersatileImageField 'size_key'.
Example: [
('large', 'url'),
('medium', 'crop__400x400'),
('small', 'thumbnail__100x100')
]
`image_attr`: A dot-notated path to a VersatileImageField on
`instance_or_queryset`
`verbose`: bool signifying whether a progress bar should be printed
to sys.stdout
"""
if isinstance(instance_or_queryset, Model):
queryset = instance_or_queryset.__class__._default_manager.filter(
pk=instance_or_queryset.pk
)
elif isinstance(instance_or_queryset, QuerySet):
queryset = instance_or_queryset
else:
raise ValueError(
"Only django model instances or QuerySets can be processed by "
"{}".format(self.__class__.__name__)
)
self.queryset = queryset
if isinstance(rendition_key_set, six.string_types):
rendition_key_set = get_rendition_key_set(rendition_key_set)
self.size_key_list = [
size_key
for key, size_key in validate_versatileimagefield_sizekey_list(
rendition_key_set
)
]
self.image_attr = image_attr
self.verbose = verbose
@staticmethod
def _prewarm_versatileimagefield(size_key, versatileimagefieldfile):
"""
Returns a 2-tuple:
0: bool signifying whether the image was successfully pre-warmed
1: The url of the successfully created image OR the path on storage of
the image that was not able to be successfully created.
Arguments:
`size_key_list`: A list of VersatileImageField size keys. Examples:
* 'crop__800x450'
* 'thumbnail__800x800'
`versatileimagefieldfile`: A VersatileImageFieldFile instance
"""
versatileimagefieldfile.create_on_demand = True
try:
url = get_url_from_image_key(versatileimagefieldfile, size_key)
except:
success = False
url_or_filepath = versatileimagefieldfile.name
else:
success = True
url_or_filepath = url
return (success, url_or_filepath)
def warm(self):
"""
Returns a 2-tuple:
[0]: Number of images successfully pre-warmed
[1]: A list of paths on the storage class associated with the
VersatileImageField field being processed by `self` of
files that could not be successfully seeded.
"""
num_images_pre_warmed = 0
failed_to_create_image_path_list = []
total = self.queryset.count() * len(self.size_key_list)
for a, instance in enumerate(self.queryset, start=1):
for b, size_key in enumerate(self.size_key_list, start=1):
success, url_or_filepath = self._prewarm_versatileimagefield(
size_key,
reduce(getattr, self.image_attr.split("."), instance)
)
if success is True:
num_images_pre_warmed += 1
if self.verbose:
cli_progress_bar(num_images_pre_warmed, total)
else:
failed_to_create_image_path_list.append(url_or_filepath)
if a * b == total:
stdout.write('\n')
stdout.flush()
return (num_images_pre_warmed, failed_to_create_image_path_list)
|
import uuid
import datetime
from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from taggit.managers import TaggableManager
from preferences.models import Preferences
from epl.custommodels import IntegerRangeField, FloatRangeField
from util.file_validator import FileValidator
from timemap.constants import BRANCH_NAME_LEN, BRANCH_DESCRIPTION_LEN, STORY_TITLE_LEN, \
STORY_DESCRIPTION_LEN, STORY_TEXT_LEN, MAP_BASE_FOLDER_LEN, \
MAP_TITLE_LEN, MAP_AUTHOR_LEN, UPLOAD_EXTENSIONS, \
UPLOAD_MIME_TYPES, BASE_URL_LEN, KEY_LEN
from util.email import emailer, email_template
class Branch(models.Model):
class Meta:
verbose_name_plural = "Branches"
BRANCH = "B"
STREET_CAR = "S"
BOOK_MOBILE = "M"
BRANCH_TYPE_CHOICES = (
(BRANCH, 'branch'),
(STREET_CAR, 'street'),
(BOOK_MOBILE, 'mobile'),
)
name = models.CharField(db_index=True, max_length=BRANCH_NAME_LEN)
description = models.TextField(max_length=BRANCH_DESCRIPTION_LEN)
start_year = IntegerRangeField(db_index=True, min_value=1900, max_value=3000)
end_year = IntegerRangeField(db_index=True, min_value=1900, max_value=3000, blank=True, null=True)
floor_plan = models.FileField(upload_to="floor_plans")
latitude_help = "Latitude range : -90:90"
latitude = FloatRangeField(min_value=-90, max_value=90, help_text=latitude_help)
longitude_help = "Longitude range : -180:180"
longitude = FloatRangeField(min_value=-180, max_value=180, help_text=longitude_help)
btype = models.CharField(db_index=True,
max_length=1,
choices=BRANCH_TYPE_CHOICES,
default=BRANCH)
def clean(self):
if self.end_year and self.start_year > self.end_year:
raise ValidationError("End year must occur after start year")
def __unicode__(self):
return self.name
def media_upload_to(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return instance.CONTENT_TYPE_DICT[instance.content_type]+ "/" + filename
class Story(models.Model):
TEXT = "T"
LINK = "L"
IMAGE = "I"
PDF = "P"
AUDIO = "A"
VIDEO = "V"
CONTENT_TYPE_CHOICES = (
(TEXT, 'text'),
(LINK, 'link'),
(IMAGE, 'image'),
(PDF, 'pdf'),
(AUDIO, 'audio'),
(VIDEO, 'video'),
)
CONTENT_TYPE_DICT = dict(CONTENT_TYPE_CHOICES)
class Meta:
verbose_name_plural = "Stories"
title = models.CharField(db_index=True, max_length=STORY_TITLE_LEN)
description = models.TextField(db_index=True, max_length=STORY_DESCRIPTION_LEN)
story_text = models.TextField(max_length=STORY_TEXT_LEN, blank=True)
content_type = models.CharField(db_index=True,
max_length=1,
choices=CONTENT_TYPE_CHOICES,
default=TEXT)
link_url = models.URLField(blank=True, error_messages={'invalid': "Please input a valid URL (for example: http://www.example.com)."})
media_file = models.FileField(upload_to=media_upload_to,
blank=True,
validators=[FileValidator(allowed_extensions=UPLOAD_EXTENSIONS,
allowed_mimetypes=UPLOAD_MIME_TYPES)])
year = IntegerRangeField(db_index=True, min_value=1900, max_value=3000)
month = IntegerRangeField(min_value=1, max_value=12, blank=True, null=True)
day = IntegerRangeField(min_value=1, max_value=31, blank=True, null=True)
branch = models.ForeignKey('Branch', blank=True, null=True)
keywords = TaggableManager(verbose_name="keywords",
help_text=("A comma-separated list of keywords"),
blank=True)
user = models.ForeignKey(User)
anonymous = models.BooleanField(default=False)
public_approved = models.BooleanField(default=False)
def clean(self):
try:
day = self.day if self.day else 1
month = self.month if self.month else 1
date = "%s/%s/%s" % (day, month, self.year)
datetime.datetime.strptime(date, "%d/%m/%Y")
except ValueError:
#TODO: Should make the resulting error clearer
raise ValidationError("Please enter a valid date.")
def __unicode__(self):
return self.title
class Map(models.Model):
class Meta:
verbose_name_plural = "Maps"
base_folder = models.CharField(max_length=MAP_BASE_FOLDER_LEN)
title = models.CharField(max_length=MAP_TITLE_LEN)
author = models.CharField(max_length=MAP_AUTHOR_LEN)
published = IntegerRangeField(min_value=1900, max_value=3000)
start_year = IntegerRangeField(min_value=1900, max_value=3000)
end_year = IntegerRangeField(min_value=1900, max_value=3000)
def clean(self):
if self.start_year > self.end_year:
raise ValidationError("End year must occur after start year.")
def __unicode__(self):
return self.title
class FeaturedStory(models.Model):
class Meta:
verbose_name_plural = "Featured Stories"
story = models.ForeignKey('Story')
def __unicode__(self):
return self.story.title
class TimemapPreferences(Preferences):
class Meta:
verbose_name_plural = "Timemap Preferences"
__module__ = 'preferences.models'
timeline_init_date = models.DateField(default=datetime.date(2013, 1, 1))
timeline_start_date = models.DateField(default=datetime.date(1900, 1, 1))
timeline_end_date = models.DateField(default=datetime.date(2014, 1, 1))
base_url = models.CharField(max_length=BASE_URL_LEN, default="http://serve.ctrlshiftcreate.com/")
facebook_key = models.CharField(max_length=KEY_LEN, default='150662938425048')
google_key = models.CharField(max_length=KEY_LEN, default='AIzaSyA59Z_Kym_voRl--cHJzYkep3Cs-_71')
# Signal setup
from django.dispatch.dispatcher import receiver
from django.db.models.signals import pre_save, pre_delete
@receiver(pre_save)
def validate_model(sender, **kwargs):
"""
Force a clean call when certain models are saved in order to do
keep model constrains
"""
if sender in [Branch, Story, Map] and 'raw' in kwargs and not kwargs['raw']:
kwargs['instance'].full_clean()
@receiver(pre_delete)
def story_delete(sender, instance, **kwargs):
"""
Delete media files when stories are deleted
"""
if sender in [Story] and instance.media_file:
instance.media_file.delete(False)
|
import unittest
from aws_lambda_decorators.classes import Parameter, SSMParameter, BaseParameter
class ParamTests(unittest.TestCase):
def test_can_create_base_parameter(self):
base_param = BaseParameter("var_name")
self.assertEqual("var_name", base_param.get_var_name())
def test_annotations_from_key_returns_annotation(self):
key = "simple[annotation]"
response = Parameter.get_annotations_from_key(key)
self.assertTrue(response[0] == "simple")
self.assertTrue(response[1] == "annotation")
def test_can_not_add_non_pythonic_var_name_to_ssm_parameter(self):
param = SSMParameter("tests", "with space")
with self.assertRaises(SyntaxError):
param.get_var_name()
|
"""The Bocadillo API class."""
import os
from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Type, Union, Callable
from starlette.middleware.cors import CORSMiddleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.middleware.httpsredirect import HTTPSRedirectMiddleware
from starlette.middleware.trustedhost import TrustedHostMiddleware
from starlette.middleware.wsgi import WSGIResponder
from starlette.testclient import TestClient
from uvicorn.main import get_logger, run
from uvicorn.reloaders.statreload import StatReload
from .cors import DEFAULT_CORS_CONFIG
from .error_handlers import ErrorHandler, convert_exception_to_response
from .events import EventsMixin
from .exceptions import HTTPError
from .hooks import HooksMixin
from .media import Media
from .meta import APIMeta
from .recipes import RecipeBase
from .redirection import Redirection
from .request import Request
from .response import Response
from .routing import RoutingMixin
from .static import static
from .templates import TemplatesMixin
from .types import ASGIApp, ASGIAppInstance, WSGIApp
class API(
TemplatesMixin, RoutingMixin, HooksMixin, EventsMixin, metaclass=APIMeta
):
"""The all-mighty API class.
This class implements the [ASGI](https://asgi.readthedocs.io) protocol.
# Example
```python
>>> import bocadillo
>>> api = bocadillo.API()
```
# Parameters
templates_dir (str):
The name of the directory where templates are searched for,
relative to the application entry point.
Defaults to `"templates"`.
static_dir (str):
The name of the directory containing static files, relative to
the application entry point. Set to `None` to not serve any static
files.
Defaults to `"static"`.
static_root (str):
The path prefix for static assets.
Defaults to `"static"`.
allowed_hosts (list of str, optional):
A list of hosts which the server is allowed to run at.
If the list contains `"*"`, any host is allowed.
Defaults to `["*"]`.
enable_cors (bool):
If `True`, Cross Origin Resource Sharing will be configured according
to `cors_config`. Defaults to `False`.
See also [CORS](../topics/features/cors.md).
cors_config (dict):
A dictionary of CORS configuration parameters.
Defaults to `dict(allow_origins=[], allow_methods=["GET"])`.
enable_hsts (bool):
If `True`, enable HSTS (HTTP Strict Transport Security) and automatically
redirect HTTP traffic to HTTPS.
Defaults to `False`.
See also [HSTS](../topics/features/hsts.md).
enable_gzip (bool):
If `True`, enable GZip compression and automatically
compress responses for clients that support it.
Defaults to `False`.
See also [GZip](../topics/features/gzip.md).
gzip_min_size (int):
If specified, compress only responses that
have more bytes than the specified value.
Defaults to `1024`.
media_type (str):
Determines how values given to `res.media` are serialized.
Can be one of the supported media types.
Defaults to `"application/json"`.
See also [Media](../topics/request-handling/media.md).
"""
_error_handlers: List[Tuple[Type[Exception], ErrorHandler]]
def __init__(
self,
templates_dir: str = "templates",
static_dir: Optional[str] = "static",
static_root: Optional[str] = "static",
allowed_hosts: List[str] = None,
enable_cors: bool = False,
cors_config: dict = None,
enable_hsts: bool = False,
enable_gzip: bool = False,
gzip_min_size: int = 1024,
media_type: Optional[str] = Media.JSON,
):
super().__init__(templates_dir=templates_dir)
self._error_handlers = []
self._extra_apps: Dict[str, Any] = {}
self.client = self._build_client()
if static_dir is not None:
if static_root is None:
static_root = static_dir
self.mount(static_root, static(static_dir))
self._media = Media(media_type=media_type)
self._middleware = []
self._asgi_middleware = []
if allowed_hosts is None:
allowed_hosts = ["*"]
self.add_asgi_middleware(
TrustedHostMiddleware, allowed_hosts=allowed_hosts
)
if enable_cors:
if cors_config is None:
cors_config = {}
cors_config = {**DEFAULT_CORS_CONFIG, **cors_config}
self.add_asgi_middleware(CORSMiddleware, **cors_config)
if enable_hsts:
self.add_asgi_middleware(HTTPSRedirectMiddleware)
if enable_gzip:
self.add_asgi_middleware(GZipMiddleware, minimum_size=gzip_min_size)
def get_template_globals(self):
return {"url_for": self.url_for}
def _build_client(self) -> TestClient:
return TestClient(self)
def mount(self, prefix: str, app: Union[ASGIApp, WSGIApp]):
"""Mount another WSGI or ASGI app at the given prefix.
# Parameters
prefix (str): A path prefix where the app should be mounted, e.g. `"/myapp"`.
app: An object implementing [WSGI](https://wsgi.readthedocs.io) or [ASGI](https://asgi.readthedocs.io) protocol.
"""
if not prefix.startswith("/"):
prefix = "/" + prefix
self._extra_apps[prefix] = app
def recipe(self, recipe: RecipeBase):
recipe.apply(self)
@property
def media_type(self) -> str:
"""The currently configured media type.
When setting it to a value outside of built-in or custom media types,
an `UnsupportedMediaType` exception is raised.
"""
return self._media.type
@media_type.setter
def media_type(self, media_type: str):
self._media.type = media_type
@property
def media_handlers(self) -> dict:
"""The dictionary of supported media handlers.
You can access, edit or replace this at will.
"""
return self._media.handlers
@media_handlers.setter
def media_handlers(self, media_handlers: dict):
self._media.handlers = media_handlers
def add_error_handler(
self, exception_cls: Type[Exception], handler: ErrorHandler
):
"""Register a new error handler.
# Parameters
exception_cls (Exception class):
The type of exception that should be handled.
handler (callable):
The actual error handler, which is called when an instance of
`exception_cls` is caught.
Should accept a `req`, a `res` and an `exc`.
"""
self._error_handlers.insert(0, (exception_cls, handler))
def error_handler(self, exception_cls: Type[Exception]):
"""Register a new error handler (decorator syntax).
# Example
```python
>>> import bocadillo
>>> api = bocadillo.API()
>>> @api.error_handler(KeyError)
... def on_key_error(req, res, exc):
... pass # perhaps set res.content and res.status_code
```
"""
def wrapper(handler):
self.add_error_handler(exception_cls, handler)
return handler
return wrapper
def _find_handler(
self, exception_cls: Type[Exception]
) -> Optional[ErrorHandler]:
for cls, handler in self._error_handlers:
if issubclass(exception_cls, cls):
return handler
return None
def _handle_exception(
self, req: Request, res: Response, exception: Exception
) -> None:
"""Handle an exception raised during dispatch.
If no handler was registered for the exception, it is raised.
"""
handler = self._find_handler(exception.__class__)
if handler is None:
raise exception from None
handler(req, res, exception)
if res.status_code is None:
res.status_code = 500
def redirect(
self,
*,
name: str = None,
url: str = None,
permanent: bool = False,
**kwargs
):
"""Redirect to another route.
# Parameters
name (str): name of the route to redirect to.
url (str): URL of the route to redirect to, required if `name` is ommitted.
permanent (bool):
If `False` (the default), returns a temporary redirection (302).
If `True`, returns a permanent redirection (301).
kwargs (dict):
Route parameters.
# Raises
Redirection: an exception that will be caught by #API.dispatch().
"""
if name is not None:
url = self.url_for(name=name, **kwargs)
else:
assert url is not None, "url is expected if no route name is given"
raise Redirection(url=url, permanent=permanent)
def add_middleware(self, middleware_cls, **kwargs):
"""Register a middleware class.
See also [Middleware](../topics/features/middleware.md).
# Parameters
middleware_cls (Middleware class):
A subclass of #~some.middleware.Middleware.
"""
self._middleware.insert(0, (middleware_cls, kwargs))
def add_asgi_middleware(self, middleware_cls, *args, **kwargs):
"""Register an ASGI middleware class.
# Parameters
middleware_cls (Middleware class):
A class that conforms to ASGI standard.
"""
self._asgi_middleware.insert(0, (middleware_cls, args, kwargs))
async def dispatch(self, req: Request) -> Response:
"""Dispatch a req and return a response.
For the exact algorithm, see
[How are requests processed?](../topics/request-handling/routes-url-design.md#how-are-requests-processed).
# Parameters
req (Request): an inbound HTTP request.
# Returns
response (Response): an HTTP response.
"""
res = Response(req, media=self._media)
try:
match = self._router.match(req.url.path)
if match is None:
raise HTTPError(status=404)
route, params = match.route, match.params
route.raise_for_method(req)
try:
hooks = self.get_hooks().on(route, req, res, params)
async with hooks:
await route(req, res, **params)
except Redirection as redirection:
res = redirection.response
except Exception as e:
self._handle_exception(req, res, e)
return res
def find_app(self, scope: dict) -> ASGIAppInstance:
"""Return the ASGI application suited to the given ASGI scope.
This is also what `API.__call__(self)` returns.
# Parameters
scope (dict):
An ASGI scope.
# Returns
app:
An ASGI application instance
(either `self` or an instance of a sub-app).
"""
if scope["type"] == "lifespan":
return self.handle_lifespan(scope)
path: str = scope["path"]
# Return a sub-mounted extra app, if found
for prefix, app in self._extra_apps.items():
if not path.startswith(prefix):
continue
# Remove prefix from path so that the request is made according
# to the mounted app's point of view.
scope["path"] = path[len(prefix) :]
try:
return app(scope)
except TypeError:
return WSGIResponder(app, scope)
def app(s: dict):
async def asgi(receive, send):
nonlocal s
req = Request(s, receive)
res = await self._get_response(req)
await res(receive, send)
return asgi
app = self._asgi_middleware_chain(app)
return app(scope)
async def _get_response(self, req: Request) -> Response:
error_handler = self._find_handler(HTTPError)
convert = partial(
convert_exception_to_response,
error_handler=error_handler,
media=self._media,
)
dispatch = convert(self.dispatch)
for cls, kwargs in self._middleware:
middleware = cls(dispatch, **kwargs)
dispatch = convert(middleware)
return await dispatch(req)
def _asgi_middleware_chain(self, app: ASGIApp) -> ASGIApp:
for middleware_cls, args, kwargs in self._asgi_middleware:
app: ASGIApp = middleware_cls(app, *args, **kwargs)
return app
def run(
self,
host: str = None,
port: int = None,
debug: bool = False,
log_level: str = "info",
_run: Callable = run,
):
"""Serve the application using [uvicorn](https://www.uvicorn.org).
For further details, refer to
[uvicorn settings](https://www.uvicorn.org/settings/).
# Parameters
host (str):
The host to bind to.
Defaults to `"127.0.0.1"` (localhost).
If not given and `$PORT` is set, `"0.0.0.0"` will be used to
serve to all known hosts.
port (int):
The port to bind to.
Defaults to `8000` or (if set) the value of the `$PORT` environment
variable.
debug (bool):
Whether to serve the application in debug mode. Defaults to `False`.
log_level (str):
A logging level for the debug logger. Must be a logging level
from the `logging` module. Defaults to `"info"`.
"""
if "PORT" in os.environ:
port = int(os.environ["PORT"])
if host is None:
host = "0.0.0.0"
if host is None:
host = "127.0.0.1"
if port is None:
port = 8000
if debug:
reloader = StatReload(get_logger(log_level))
reloader.run(
run,
{
"app": self,
"host": host,
"port": port,
"log_level": log_level,
"debug": debug,
},
)
else:
_run(self, host=host, port=port)
def __call__(self, scope: dict) -> ASGIAppInstance:
return self.find_app(scope)
|
from .supportr import Supportr
from .predict import predict
|
#!/usr/bin/env python3
import sys
# Functions
def read_numbers():
try:
n = int(sys.stdin.readline())
k = int(sys.stdin.readline())
v = [int(sys.stdin.readline()) for _ in range(n)]
except ValueError:
return 0, 0, None
return n, k, v
def compute_unfairness(n, k, v):
v.sort() # Order from smallest to largest
u = v[n - 1] - v[0] # Initial unfairness
for i in range(0, n - k + 1):
d = v[i + k - 1] - v[i] # Difference between max and min in sequence
u = min(u, d) # Take smallest unfairness
return u
# Main execution
def main():
n, k, v = read_numbers()
while n:
print(compute_unfairness(n, k, v))
n, k, v = read_numbers()
if __name__ == '__main__':
main()
|
def pourcent(chaîne_de_caractères):
nombre_de_0 = chaîne_de_caractères.count('0')
if len(chaîne_de_caractères):
return nombre_de_0 / len(chaîne_de_caractères)
else:
return 0
def non_lue():
pass
|
#!/usr/bin/env python3
"""
Tolkein.
usage: tolkein [<command>] [<args>...] [-h|--help] [--version]
commands:
-h, --help show this
-v, --version show version number
"""
from docopt import docopt
from ._version import __version__
if __name__ == '__main__':
docopt(__doc__,
version=__version__,
options_first=True)
|
"""Implementation of Rule L012."""
from sqlfluff.core.rules.std.L011 import Rule_L011
class Rule_L012(Rule_L011):
"""Implicit aliasing of column not allowed. Use explicit `AS` clause.
NB: This rule inherits its functionality from obj:`Rule_L011` but is
separate so that they can be enabled and disabled separately.
"""
_target_elems = ("select_clause_element",)
|
import argparse
import os
import tempfile
from unittest import TestCase
import unittest
from mimic.utils.filehandling import create_dir_structure
from dataclasses import dataclass
from unittest import TestCase
import torch
import torch.optim as optim
from tensorboardX import SummaryWriter
from mimic.networks.classifiers.utils import ExperimentDf, get_models, Callbacks, CallbacksProto
@dataclass
class Args:
dir_fid = None
dataset = 'mimic'
config_path = None
def __init__(self, tmpdirname: str):
self.dir_experiment = tmpdirname
self.dir_clf = os.path.join(tmpdirname, 'clf_dir')
self.dir_logs_clf = tmpdirname
class TestFilehandling(TestCase):
def test_create_dir_structure(self):
"""
Checks if function create_dir_structure is running.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
args = Args(tmpdirname)
_ = create_dir_structure(args)
if __name__ == '__main__':
unittest.main()
|
from server.models import *
from django.db import migrations, models
def enable_plugins(apps, schema_editor):
Machine = apps.get_model("server", "Machine")
MachineDetailPlugin = apps.get_model("server", "MachineDetailPlugin")
plugin_count = MachineDetailPlugin.objects.exclude(name='MachineDetailSecurity').count()
if plugin_count == 0:
plugin = MachineDetailPlugin(name='MachineDetailSecurity', order=1)
plugin.save()
class Migration(migrations.Migration):
dependencies = [
('server', '0038_auto_20160704_1005'),
]
operations = [
migrations.CreateModel(
name='MachineDetailPlugin',
fields=[
('id', models.AutoField(verbose_name='ID',
serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('description', models.TextField(null=True, blank=True)),
('order', models.IntegerField()),
('type', models.CharField(default=b'builtin', max_length=255, choices=[(b'facter', b'Facter'), (
b'munkicondition', b'Munki Condition'), (b'builtin', b'Built In'), (b'custom', b'Custom Script')])),
],
options={
'ordering': ['order'],
},
),
migrations.AlterField(
model_name='plugin',
name='type',
field=models.CharField(default=b'builtin', max_length=255, choices=[(b'facter', b'Facter'), (
b'munkicondition', b'Munki Condition'), (b'builtin', b'Built In'), (b'custom', b'Custom Script')]),
),
migrations.RunPython(enable_plugins),
]
|
import json
import cgi
from datetime import datetime, timedelta
from uuid import uuid4
from libs.contents.contents import *
from libs.table.table import del_row, update_cell, create_empty_row_, update_row_
from libs.perm.perm import is_admin, user_has_permission
from core.core import *
from core.union import cache, invalidate_cache, response_json
# from libs.captcha.captcha import *
def form_tree_comm(request, docs):
""" forms of documents tree
"""
tree = {doc['_id']: doc for doc in docs}
for doc in docs:
doc['child'] = []
for doc in docs:
parent = doc.get("parent", None)
if parent and parent != '_':
tree[parent]['child'].append(doc)
docss = {"_id": "_", "child": [doc for doc in docs if "parent" not in doc or doc['parent'] == '_']}
return docss
def str_date_j():
return time.strftime("%Y-%m-%d %H:%M:%S")
async def add_comm_post(request):
# return json.dumps(current_id, title, link, proc_id)
"""current_id это id ветки"""
# ip = request.environ.get('REMOTE_ADDR')
data = await request.post(); ip = None
print('data->', data)
#get ip address client
peername = request.transport.get_extra_info('peername'); host=None
if peername is not None:
host, port = peername
ip = host
# print ('host, port->', host, port)
user = get_current_user(request, True)
if check_ban(request, host, user):
return response_json(request, {"result":"fail", "error":"Ваш ip или аккаунт забанен на этом сайте, свяжитесь с администрацией"})
else: title = data.get('title')
if not user_has_permission(request, 'des:obj', 'add_com') and not user_has_permission(request, 'des:obj', 'add_com_pre'):
return response_json(request, {"result":"fail", "error":"no comment"})
if not check_user_rate(request, user):
return response_json(request, {"result":"fail", "error":"Вы не можете оставлять сообщения слишком часто, из-за отрицательной кармы"})
doc_id = data.get('comm_id')
id = data.get('id')
if user_is_logged_in(request): title = get_current_user(request)
# tle = get_doc(request, doc_id )
# print( doc_id )
# print( tle )
# tle = get_doc(request, doc_id )['doc']['title']
title_ = ct(request, title )
title = no_script( title ) if title else 'Аноним'
parent = data.get('parent', "_")
descr = data.get( 'descr')
descr = no_script( descr )
descr = descr.replace('\n', '<br/>')
# ретурн если нет и того и другого а если нет только одного то как раз проверим
pre = 'true' if not user_has_permission(request, 'des:obj', 'add_com') else 'false'
date = str( time.strftime("%Y-%m-%d %H:%M:%S") )
user_ = get_current_user_name(request, title ) or title
our = "true" if user_is_logged_in(request) else "false"
body = re.sub(r'(http?://([a-z0-9-]+([.][a-z0-9-]+)+)+(/([0-9a-z._%?#]+)+)*/?)', r'<a href="\1">\1</a>', descr)
# добавление родителю ребенка
request.db.doc.update({ "_id": parent }, { "$addToSet": { "child": doc_id } } )
# занесение коментов в справочник коментов
doc_id_comm, updated = create_empty_row_(request, 'des:comments', parent, '', { "user":'user:'+title })
data = {"id":doc_id_comm, "title":title_, "date":date, "body":body, "parent":parent, "owner":id, 'ip':ip, 'name':user_, "our":our, 'pre':pre }
update_row_(request, 'des:comments', doc_id_comm, data, parent)
if 'notify_user' in dir(settings) and settings.notify_user:
# if 'notify_user' in settings and settings.notify_user:
# link = make_link('show_object', {'doc_id':doc_id }, True)+'#comm_'+ str( id )
link = settings.domain+'/news/'+doc_id+'#comm_'+ str( id )
subject = 'User {} add comment'.format( title )
sub('user:'+title, link, subject)
print('id1', id)
id = get_doc(request, id)['_id']
print('id2', id)
invalidate_cache('single_page', id=id)
# rev = get_doc(request, doc_id)['doc']['rev']
# reset_cache(type="doc", doc_id=rev)
# добавление подсчета коментариев в отдельном документе
request.db.doc.update({ "_id": doc_id }, { "$inc": { "count_branch":1 } } )
# return json.dumps({"result":"ok", "content":data.update({"title":title}), "hash":""})
return response_json(request, {"result":"ok", "content":data, "hash":""})
async def edit_comm_post(request):
if not user_has_permission(request, 'des:obj', 'add_com'): return {"result":"fail", "error":"no comment"}
if not user_is_logged_in(request): return response_json(request, {"result":"fail", "error":"no comment"})
data = await request.post()
comm_id = data.get('comm_id')
body = data.get('body')
user = data.get('user')
if user == get_current_user(request) or is_admin(request):
if 'child' in get_doc(request, comm_id) and not is_admin(request):
return response_json(request, {"result":"fail", "error":"comment already answered"})
doc = request.db.doc.update({'_id': comm_id}, {"$set": {"doc.body." + cur_lang(request): body}})
from core.union import invalidate_cache
invalidate_cache('single_page')
return response_json(request, {"result":"ok", "id":comm_id})
else:
return response_json(request, {"result":"fail", "error":"access denied"})
async def del_comm_post(request):
""" doc_id - id самого коментария """
data = await request.post()
print(data)
comm_id = data.get( 'comm_id')
doc = get_doc(request, comm_id)
if is_admin(request) or user_has_permission(request, 'des:obj', 'del_comm'):
# добавление подсчета коментариев в отдельном документе
request.db.doc.update({ "_id": doc['doc']['owner'] }, { "$inc": { "count_branch":-1 } } )
if 'child' in doc:
if len(doc['child']):
request.db.doc.update({"_id":comm_id}, {"$set":{'doc.is_del':'true'}})
return response_json(request, {"result":"ok", "action":"del_dom", "id":comm_id})
else:
del_row(request, 'des:comments', { comm_id:comm_id })
return response_json(request, {"result":"ok", "id":comm_id})
else: return response_json(request, {"result":"fail", "error":"error sequrity"})
def add_vote_comm_post(request):
"""Вычисляем данные в посте сколько проголосовало и тд."""
data = request.post()
vote = data.get('vote')
comm_id = data.get('comm_id')
comm = get_doc(comm_id)
# doc = db.tree.find_one({'owner':doc_id})
user = get_current_user(True)
from libs.sites.sites import check_time
# comm = doc['tree'][comm_id]
if check_time( comm['doc']['date'], 'days', int( get_const_value(request, 'vote_timeout') ) ):
return response_json(request, {"result":"fail", "error":"Голосование уже закончилось"})
if not 'vote' in comm : comm['vote'] = {"score":0,"votes_count":0, "votes_count_plus":0,"votes_count_minus":0, "voted":{}}
if not user_has_permission(request, 'des:obj', 'vote_com'): return response_json(request, {"result":"fail","error":"Не имеете права голоса"})
if not is_admin(request) and user in comm['vote']['voted'] : return response_json(request, {"result":"fail","error":"Повторное голосование запрещено"})
if not is_admin(request) and user == 'user:'+ct(request, comm['title']): return response_json(request, {"result":"fail","error":"Голосовать за себя запрещено"})
dt = datetime.today().strftime('%Y-%m-%d')
user_f = get_doc(request, user)
if not 'vote' in user_f : user_f['vote'] = {}
if not dt in user_f['vote'] : user_f['vote'][dt] = {'up': 0, 'down': 0}
if not is_admin(request) and int(user_f['vote'][dt]['up']) + int(user_f['vote'][dt]['down']) >= int(float(user_f['doc']['rate'])+1.25):
return response_json(request, {"result":"fail","error":"Лимит голосов за сегодня исчерпан"})
user_f['vote'][dt][vote] += 1
request.db.doc.save(user_f)
comm['vote']['voted'][user] = vote
if vote == 'up':
comm['vote']['score'] += 1
comm['vote']['votes_count_plus'] += 1
else:
comm['vote']['score'] -= 1
comm['vote']['votes_count_minus'] += 1
comm['vote']['votes_count'] += 1
request.db.doc.save(comm)
comm_vote = comm['vote']
# начисление балов пользователю
# u_id = 'user:'+ct(comm['title'])
u_id = ct(request, comm['doc']['user'] )
u = get_doc(request, u_id)
if u:
if not 'rate' in u['doc']:
u['doc']['rate'] = '0'
request.db.doc.save(u)
if float(u['doc']['rate']) >= 17:
rate = float(u['doc']['rate']) + (0.02 if vote == 'up' else -0.1)
else: rate = float(u['doc']['rate']) + (0.2 if vote == 'up' else -0.1)
# rate =+ 1 if vote == 'up' else -1
update_cell(request, str(u_id), 'des:users', 'rate', str(rate) )
return response_json(request, {"result":"ok", "score":comm_vote["score"],"votes_count":comm_vote["score"],"charge_string":"","sign":"positive",
"votes_count_plus":comm_vote["votes_count_plus"],"votes_count_minus":comm_vote["votes_count_minus"],"is_positive":True})
def ban_comm_post(request):
if not is_admin(request): return response_json(request, {"result":"fail", "error":"no ban"})
if not user_is_logged_in(request): return response_json(request, {"result":"fail", "error":"no comment"})
data = request.post()
proc_id = data.get('proc_id')
id_comm = data.get('branch_id')
doc = request.db.doc.find_one({'_id':id_comm})
doc = doc['doc']
ip = doc['ip'] if 'ip' in doc else ''
# try:
lst = [x.strip() for x in get_const_value(request, 'ban_comm', '').split(',')]
# die([lst, ip, branch])
if not ip in lst:
lst.append(ip)
set_const_value(request, 'ban_comm', ','.join(lst))
user_name = ct(request, doc['user'])
user = get_doc(request, 'user:'+user_name)
if user:
user['doc']['ban'] = 'true'
request.db.doc.save(user)
return response_json(request, {"result":"ok", "user":user_name})
#===========================================================================================================
def conf_menu_get(request, action):
sss = '/tree/data/'+action
return templ('libs.admin:conf_menu', request, dict(proc_id=action, url='/tree/data/'+action))
def count_branch(doc):
# return 1
ctr = 0
tree = doc['tree']
for res in tree.keys():
ctr +=1
return ctr-1
def get_doc_tree(request, owner, tree_id):
def make_doc_tree(request):
doc = {"_id": uuid4().hex, "type": "tree", "tree": { "_": { "children": [ ], "parent": "",
"title": { "ru":"корень", "en": "root" } }}, "seq_id_tree": 1, "owner": owner, "sub_type": tree_id}
request.db.tree.save(doc)
return doc
if not owner:
doc = request.db.tree.find_one({'_id':tree_id})
if not doc: doc = make_doc_tree(request)
return doc
doc = request.db.tree.find_one({'owner':owner})
if doc: return doc
return make_doc_tree(request)
def tree_post(request, proc_id):
if proc_id.startswith('tree:'):
if not user_has_permission(request, proc_id[5:], 'view'):
return response_json(request, {"result": "fail", "error": ct(request, "You have no permission.")})
return tree_data(request, proc_id, False)
else:
data = request.post()
owner = data.get('owner', False)
proc_id2 = data.get('proc_id', False)
return tree_data(request, proc_id, owner) if owner else tree_data(request, proc_id2, False)
def tree_data(request, proc_id, owner):
""" Берет из базы данные формирует из них json и возвращает в нужный шаблон"""
# proc_id = первый раз сom:des:obj
doc = get_doc_tree(request, owner, proc_id)
proc_id = doc['_id']
def translate_(branch):
new_children = []
branch['title'] = ct(request, branch['title'])
branch['descr'] = ct(request, branch['descr'] )if 'descr' in branch else ''
for child in branch['children']:
new_children.append(translate_(child))
branch['children'] = new_children
return branch
docs = [res for res in request.db.doc.find({'doc_type':'des:comments', 'doc.owner':owner}).sort('doc.date', -1) ]
from libs.sites.sites import get_full_docs
docs = get_full_docs(request, docs)
value = form_tree_comm(request, docs )
# value = translate_(form_tree(db.tree.find_one({'_id':proc_id})))
return response_json(request, {"result":"ok", "content":value, "proc_id":proc_id})
def check_ban(request, ip, user):
lst = [x.strip() for x in get_const_value(request, 'ban_comm', '').split(',')]
user = get_doc(request, user)
return (user and 'ban' in user['doc'] and user['doc']['ban'] == 'true') or ip in lst
def accept_comm_post(request):
if not is_admin(request) and not user_has_permission(request, 'des:comments', 'edit'):
return response_json(request, {"result":"fail", "error":"no has permission"})
data = get_post(request)
doc_id = data.get('doc_id')
doc = request.db.doc.find_one({'_id':doc_id})
doc['doc']['pre'] = 'false'
request.db.doc.save(doc)
owner = doc['doc']['doc_id']
comm_id = str(doc['doc']['comm_id'])
tree = request.db.tree.find_one({'_id': owner})
# die(tree['tree'].keys())
tree['tree'][comm_id]['pre'] = 'false'
request.db.tree.save(tree)
return response_json(request, {"result":"ok"})
def check_user_rate(request, user):
user_rate = request.db.doc.find_one({'_id':user}, {'doc.rate':1})
if not user_rate or not 'rate' in user_rate['doc']: return True
user_rate = float(user_rate['doc']['rate'])
if user_rate > -5: return True
for res in request.db.doc.find({'doc_type':'des:comments', 'doc.user':user}, {'doc.date':1}).sort('doc.date', -1).limit(1):
last_time = res['doc']['date']
today = datetime.today()
if user_rate < -5: delta = (today + timedelta(hours=-8)).strftime("%Y-%m-%d %H:%M:%S")
elif user_rate < -10: delta = (today + timedelta(hours=-12)).strftime("%Y-%m-%d %H:%M:%S")
elif user_rate < -15: delta = (today + timedelta(hours=-24)).strftime("%Y-%m-%d %H:%M:%S")
if last_time < delta: return True
return False
return True
def form_tree(request, doc, is_comm=False):
tree = doc['tree']
revers = get_const_value(request, 'comm_reversed') == "true"
def get_children(parent_id):
new_tree = []
children = tree[parent_id]['children']
if revers and is_comm: children.reverse()
for branch_id in children:
branch = tree[branch_id]
new_tree.append( get_children(branch_id))
title = ct(request, tree[parent_id]['title'])
name = get_current_user_name(request, title)
ip = None
if 'ip' in tree[parent_id] and get_settings('is_comm_ip', False):
ip = tree[parent_id]['ip'][-6:]
new_branch = {
'id': parent_id,
'title': name if is_comm and parent_id != '_' and title.startswith('user:') else title ,
'descr': ct(request, tree[parent_id]['descr']) if 'descr' in tree[parent_id] else None,
'date': tree[parent_id]['date'] if 'date' in tree[parent_id] else None,
'name': name,
'ip': ip,
'our': tree[parent_id]['our'] if 'our' in tree[parent_id] else None,
'is_del': tree[parent_id]['is_del'] if 'is_del' in tree[parent_id] else None,
'link': tree[parent_id]['link'] if 'link' in tree[parent_id] else None,
'link2': tree[parent_id]['link2'] if 'link2' in tree[parent_id] else None,
'for_owner': tree[parent_id]['for_owner'] if 'for_owner' in tree[parent_id] else None,
'children': new_tree,
'pre': tree[parent_id]['pre'] if 'pre' in tree[parent_id] else None,
'vote': tree[parent_id]['vote'] if 'vote' in tree[parent_id] else {"score":0,"votes_count":0, "votes_count_plus":0,
"votes_count_minus":0, "voted":{}}
}
return new_branch
return get_children('_')
def get_current_user_name(request, user_id=None):
if not user_id:
user_id = get_current_user(request)
if user_id:
doc = request.db.doc.find_one({"_id":'user:'+user_id}, {"doc.name":1})
if doc: return doc['doc']['name']
else: return 'Guest'
|
from datetime import datetime
import math
import requests
import json
API_URL = 'http://api-ubervest.rhcloud.com'
API_DEVICE_BPM_URL = '%s/devices/%s/bpm' % (API_URL, '%s')
now = int(math.floor(datetime.now().timestamp() * 1000))
print(now)
for i in range(1000):
timestamp = 101 + i
bpm = 60 + (i % 30)
device_id = 12
r = requests.post(API_DEVICE_BPM_URL % (device_id), data={
'timestamp': timestamp,
'bpm': bpm,
'device': device_id
})
print(r)
|
import gym
from dm_control import suite
from dm_control.rl.control import flatten_observation, FLAT_OBSERVATION_KEY
from gym import spaces
from gym.envs.registration import register
import numpy as np
from gym_dmcontrol.viewer import Viewer
class DMControlEnv(gym.Env):
"""
Wrapper for dm_control suite task environments
"""
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, domain, task, task_kwargs=None, visualize_reward=False):
self._dmenv = suite.load(domain, task, task_kwargs, visualize_reward)
self._viewer = None
@property
def observation_space(self):
obs = flatten_observation(
self._dmenv.task.get_observation(self._dmenv.physics))[FLAT_OBSERVATION_KEY]
return spaces.Box(-np.inf, np.inf, shape=obs.shape)
@property
def action_space(self):
aspec = self._dmenv.action_spec()
return spaces.Box(aspec.minimum, aspec.maximum)
def seed(self, seed=None):
self._dmenv.task._random = np.random.RandomState(seed)
def step(self, action):
ts = self._dmenv.step(action)
obs = flatten_observation(ts.observation)[FLAT_OBSERVATION_KEY]
reward = ts.reward
done = ts.step_type.last()
return obs, reward, done, {}
def reset(self):
ts = self._dmenv.reset()
obs = flatten_observation(ts.observation)
return obs[FLAT_OBSERVATION_KEY]
def render(self, mode='human', close=False):
if close:
if self._viewer is not None:
self._viewer.close()
self._viewer = None
return
pixels = self._dmenv.physics.render(width=320, height=240)
if mode == 'rgb_array':
return pixels
elif mode == 'human':
self.viewer.update(pixels)
else:
raise NotImplementedError(mode)
@property
def viewer(self):
if self._viewer is None:
self._viewer = Viewer(width=320, height=240)
return self._viewer
for domain_name, task_name in suite.BENCHMARKING:
register(id='DMBench{}{}-v0'.format(domain_name.capitalize(), task_name.capitalize()),
entry_point='gym_dmcontrol:DMControlEnv',
kwargs={'domain': domain_name,
'task': task_name})
|
from django.test import LiveServerTestCase
from selenium import webdriver
class AdminFuncTests(LiveServerTestCase):
fixtures = ['functest_users.json']
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def test_admin_site_up(self):
self.browser.get(self.live_server_url + '/admin/')
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Django administration', body.text)
|
import os
class Settings():
env_list = [
{'name':'PORTAINER_ACCESSKEY', 'mandatory': True, 'default':''},
{'name':'PORTAINER_URL', 'mandatory': True, 'default':''},
{'name':'BACKUP_USERNAME', 'mandatory': True, 'default':''},
{'name':'BACKUP_HOST', 'mandatory': True, 'default':''},
{'name':'PORTAINER_VOLUME_MOUNT', 'mandatory': True, 'default':''},
{'name':'BACKUP_REMOTE_DIR', 'mandatory': True, 'default':''},
{'name':'LOGLEVEL', 'mandatory': False, 'default':'INFO'},
{'name':'BACKUP_STACK_EXCLUDE', 'mandatory': False, 'default':'-avzP'},
{'name':'RSYNC_OPTIONS', 'mandatory': False, 'default':'-avzP'},
{'name':'PORTAINER_EXPORT_PW', 'mandatory': False, 'default':''}
]
for env_item in env_list:
if os.environ.get(env_item['name']) is None and env_item['mandatory'] == True:
print("The enviroment variable '" + env_item['name'] + "' must be set. Aborting")
exit()
elif os.environ.get(env_item['name']) is None and not env_item['mandatory']:
vars()[env_item['name']] = env_item['default']
else:
vars()[env_item['name']] = os.environ[env_item['name']]
settings = Settings()
|
# View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 9 - tick_visibility
"""
Please note, this script is for python3+.
If you are using python2+, please modify it accordingly.
Tutorial reference:
http://www.scipy-lectures.org/intro/matplotlib/matplotlib.html
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-3, 3, 50)
y = 0.1*x
plt.figure()
plt.plot(x, y, linewidth=10, zorder=1) # set zorder for ordering the plot in plt 2.0.2 or higher
plt.ylim(-2, 2)
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data', 0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(12)
# set zorder for ordering the plot in plt 2.0.2 or higher
label.set_bbox(dict(facecolor='white', edgecolor='none', alpha=0.8, zorder=2))
plt.show()
|
# Copyright 2013 Leighton Pritchard. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Classes and functions to visualise a KGML Pathway Map.
The KGML definition is as of release KGML v0.7.1
(http://www.kegg.jp/kegg/xml/docs/)
Classes:
"""
import os
import tempfile
from io import BytesIO
try:
from reportlab.lib import colors
from reportlab.pdfgen import canvas
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError("Install reportlab if you want to use KGML_vis.")
try:
from PIL import Image
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError("Install pillow if you want to use KGML_vis.")
from urllib.request import urlopen
from Bio.KEGG.KGML.KGML_pathway import Pathway
def darken(color, factor=0.7):
"""Return darkened color as a ReportLab RGB color.
Take a passed color and returns a Reportlab color that is darker by the
factor indicated in the parameter.
"""
newcol = color_to_reportlab(color)
for a in ["red", "green", "blue"]:
setattr(newcol, a, factor * getattr(newcol, a))
return newcol
def color_to_reportlab(color):
"""Return the passed color in Reportlab Color format.
We allow colors to be specified as hex values, tuples, or Reportlab Color
objects, and with or without an alpha channel. This function acts as a
Rosetta stone for conversion of those formats to a Reportlab Color
object, with alpha value.
Any other color specification is returned directly
"""
# Reportlab Color objects are in the format we want already
if isinstance(color, colors.Color):
return color
elif isinstance(color, str): # String implies hex color
if color.startswith("0x"): # Standardise to octothorpe
color.replace("0x", "#")
if len(color) == 7:
return colors.HexColor(color)
else:
try:
return colors.HexColor(color, hasAlpha=True)
except TypeError: # Catch pre-2.7 Reportlab
raise RuntimeError(
"Your reportlab seems to be too old, try 2.7 onwards"
)
elif isinstance(color, tuple): # Tuple implies RGB(alpha) tuple
return colors.Color(*color)
return color
def get_temp_imagefilename(url):
"""Return filename of temporary file containing downloaded image.
Create a new temporary file to hold the image file at the passed URL
and return the filename.
"""
img = urlopen(url).read()
im = Image.open(BytesIO(img))
# im.transpose(Image.FLIP_TOP_BOTTOM)
f = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
fname = f.name
f.close()
im.save(fname, "PNG")
return fname
class KGMLCanvas:
"""Reportlab Canvas-based representation of a KGML pathway map."""
def __init__(
self,
pathway,
import_imagemap=False,
label_compounds=True,
label_orthologs=True,
label_reaction_entries=True,
label_maps=True,
show_maps=False,
fontname="Helvetica",
fontsize=6,
draw_relations=True,
show_orthologs=True,
show_compounds=True,
show_genes=True,
show_reaction_entries=True,
margins=(0.02, 0.02),
):
"""Initialize."""
self.pathway = pathway
self.show_maps = show_maps
self.show_orthologs = show_orthologs
self.show_compounds = show_compounds
self.show_genes = show_genes
self.show_reaction_entries = show_reaction_entries
self.label_compounds = label_compounds
self.label_orthologs = label_orthologs
self.label_reaction_entries = label_reaction_entries
self.label_maps = label_maps
self.fontname = fontname
self.fontsize = fontsize
self.draw_relations = draw_relations
self.non_reactant_transparency = 0.3
self.import_imagemap = import_imagemap # Import the map .png from URL
# percentage of canvas that will be margin in on either side in the
# X and Y directions
self.margins = margins
def draw(self, filename):
"""Add the map elements to the drawing."""
# Instantiate the drawing, first
# size x_max, y_max for now - we can add margins, later
if self.import_imagemap:
# We're drawing directly on the image, so we set the canvas to the
# same size as the image
if os.path.isfile(self.pathway.image):
imfilename = self.pathway.image
else:
imfilename = get_temp_imagefilename(self.pathway.image)
im = Image.open(imfilename)
cwidth, cheight = im.size
else:
# No image, so we set the canvas size to accommodate visible
# elements
cwidth, cheight = (self.pathway.bounds[1][0], self.pathway.bounds[1][1])
# Instantiate canvas
self.drawing = canvas.Canvas(
filename,
bottomup=0,
pagesize=(
cwidth * (1 + 2 * self.margins[0]),
cheight * (1 + 2 * self.margins[1]),
),
)
self.drawing.setFont(self.fontname, self.fontsize)
# Transform the canvas to add the margins
self.drawing.translate(
self.margins[0] * self.pathway.bounds[1][0],
self.margins[1] * self.pathway.bounds[1][1],
)
# Add the map image, if required
if self.import_imagemap:
self.drawing.saveState()
self.drawing.scale(1, -1)
self.drawing.translate(0, -cheight)
self.drawing.drawImage(imfilename, 0, 0)
self.drawing.restoreState()
# Add the reactions, compounds and maps
# Maps go on first, to be overlaid by more information.
# By default, they're slightly transparent.
if self.show_maps:
self.__add_maps()
if self.show_reaction_entries:
self.__add_reaction_entries()
if self.show_orthologs:
self.__add_orthologs()
if self.show_compounds:
self.__add_compounds()
if self.show_genes:
self.__add_genes()
# TODO: complete draw_relations code
# if self.draw_relations:
# self.__add_relations()
# Write the pathway map to PDF
self.drawing.save()
def __add_maps(self):
"""Add maps to the drawing of the map (PRIVATE).
We do this first, as they're regional labels to be overlaid by
information. Also, we want to set the color to something subtle.
We're using Hex colors because that's what KGML uses, and
Reportlab doesn't mind.
"""
for m in self.pathway.maps:
for g in m.graphics:
self.drawing.setStrokeColor("#888888")
self.drawing.setFillColor("#DDDDDD")
self.__add_graphics(g)
if self.label_maps:
self.drawing.setFillColor("#888888")
self.__add_labels(g)
def __add_graphics(self, graphics):
"""Add the passed graphics object to the map (PRIVATE).
Add text, add after the graphics object, for sane Z-ordering.
"""
if graphics.type == "line":
p = self.drawing.beginPath()
x, y = graphics.coords[0]
# There are optional settings for lines that aren't necessarily
# part of the KGML DTD
if graphics.width is not None:
self.drawing.setLineWidth(graphics.width)
else:
self.drawing.setLineWidth(1)
p.moveTo(x, y)
for (x, y) in graphics.coords:
p.lineTo(x, y)
self.drawing.drawPath(p)
self.drawing.setLineWidth(1) # Return to default
# KGML defines the (x, y) coordinates as the centre of the circle/
# rectangle/roundrectangle, but Reportlab uses the co-ordinates of the
# lower-left corner for rectangle/elif.
if graphics.type == "circle":
self.drawing.circle(
graphics.x, graphics.y, graphics.width * 0.5, stroke=1, fill=1
)
elif graphics.type == "roundrectangle":
self.drawing.roundRect(
graphics.x - graphics.width * 0.5,
graphics.y - graphics.height * 0.5,
graphics.width,
graphics.height,
min(graphics.width, graphics.height) * 0.1,
stroke=1,
fill=1,
)
elif graphics.type == "rectangle":
self.drawing.rect(
graphics.x - graphics.width * 0.5,
graphics.y - graphics.height * 0.5,
graphics.width,
graphics.height,
stroke=1,
fill=1,
)
def __add_labels(self, graphics):
"""Add labels for the passed graphics objects to the map (PRIVATE).
We don't check that the labels fit inside objects such as circles/
rectangles/roundrectangles.
"""
if graphics.type == "line":
# We use the midpoint of the line - sort of - we take the median
# line segment (list-wise, not in terms of length), and use the
# midpoint of that line. We could have other options here,
# maybe even parameterising it to a proportion of the total line
# length.
mid_idx = len(graphics.coords) * 0.5
if not int(mid_idx) == mid_idx:
idx1, idx2 = int(mid_idx - 0.5), int(mid_idx + 0.5)
else:
idx1, idx2 = int(mid_idx - 1), int(mid_idx)
x1, y1 = graphics.coords[idx1]
x2, y2 = graphics.coords[idx2]
x, y = 0.5 * (x1 + x2), 0.5 * (y1 + y2)
elif graphics.type == "circle":
x, y = graphics.x, graphics.y
elif graphics.type in ("rectangle", "roundrectangle"):
x, y = graphics.x, graphics.y
# How big so we want the text, and how many characters?
if graphics._parent.type == "map":
text = graphics.name
self.drawing.setFont(self.fontname, self.fontsize + 2)
elif len(graphics.name) < 15:
text = graphics.name
else:
text = graphics.name[:12] + "..."
self.drawing.drawCentredString(x, y, text)
self.drawing.setFont(self.fontname, self.fontsize)
def __add_orthologs(self):
"""Add 'ortholog' Entry elements to the drawing of the map (PRIVATE).
In KGML, these are typically line objects, so we render them
before the compound circles to cover the unsightly ends/junctions.
"""
for ortholog in self.pathway.orthologs:
for g in ortholog.graphics:
self.drawing.setStrokeColor(color_to_reportlab(g.fgcolor))
self.drawing.setFillColor(color_to_reportlab(g.bgcolor))
self.__add_graphics(g)
if self.label_orthologs:
# We want the label color to be slightly darker
# (where possible), so it can be read
self.drawing.setFillColor(darken(g.fgcolor))
self.__add_labels(g)
def __add_reaction_entries(self):
"""Add Entry elements for Reactions to the map drawing (PRIVATE).
In KGML, these are typically line objects, so we render them
before the compound circles to cover the unsightly ends/junctions
"""
for reaction in self.pathway.reaction_entries:
for g in reaction.graphics:
self.drawing.setStrokeColor(color_to_reportlab(g.fgcolor))
self.drawing.setFillColor(color_to_reportlab(g.bgcolor))
self.__add_graphics(g)
if self.label_reaction_entries:
# We want the label color to be slightly darker
# (where possible), so it can be read
self.drawing.setFillColor(darken(g.fgcolor))
self.__add_labels(g)
def __add_compounds(self):
"""Add compound elements to the drawing of the map (PRIVATE)."""
for compound in self.pathway.compounds:
for g in compound.graphics:
# Modify transparency of compounds that don't participate
# in reactions
fillcolor = color_to_reportlab(g.bgcolor)
if not compound.is_reactant:
fillcolor.alpha *= self.non_reactant_transparency
self.drawing.setStrokeColor(color_to_reportlab(g.fgcolor))
self.drawing.setFillColor(fillcolor)
self.__add_graphics(g)
if self.label_compounds:
if not compound.is_reactant:
t = 0.3
else:
t = 1
self.drawing.setFillColor(colors.Color(0.2, 0.2, 0.2, t))
self.__add_labels(g)
def __add_genes(self):
"""Add gene elements to the drawing of the map (PRIVATE)."""
for gene in self.pathway.genes:
for g in gene.graphics:
self.drawing.setStrokeColor(color_to_reportlab(g.fgcolor))
self.drawing.setFillColor(color_to_reportlab(g.bgcolor))
self.__add_graphics(g)
if self.label_compounds:
self.drawing.setFillColor(darken(g.fgcolor))
self.__add_labels(g)
def __add_relations(self):
"""Add relations to the map (PRIVATE).
This is tricky. There is no defined graphic in KGML for a
relation, and the corresponding entries are typically defined
as objects 'to be connected somehow'. KEGG uses KegSketch, which
is not public, and most third-party software draws straight line
arrows, with heads to indicate the appropriate direction
(at both ends for reversible reactions), using solid lines for
ECrel relation types, and dashed lines for maplink relation types.
The relation has:
- entry1: 'from' node
- entry2: 'to' node
- subtype: what the relation refers to
Typically we have entry1 = map/ortholog; entry2 = map/ortholog,
subtype = compound.
"""
# Dashed lines for maplinks, solid for everything else
for relation in list(self.pathway.relations):
if relation.type == "maplink":
self.drawing.setDash(6, 3)
else:
self.drawing.setDash()
for s in relation.subtypes:
subtype = self.pathway.entries[s[1]]
# Our aim is to draw an arrow from the entry1 object to the
# entry2 object, via the subtype object.
# 1) Entry 1 to subtype
self.__draw_arrow(relation.entry1, subtype)
# 2) subtype to Entry 2
self.__draw_arrow(subtype, relation.entry2)
def __draw_arrow(self, g_from, g_to):
"""Draw an arrow between given Entry objects (PRIVATE).
Draws an arrow from the g_from Entry object to the g_to
Entry object; both must have Graphics objects.
"""
# Centre and bound co-ordinates for the from and two objects
bounds_from, bounds_to = g_from.bounds, g_to.bounds
centre_from = (
0.5 * (bounds_from[0][0] + bounds_from[1][0]),
0.5 * (bounds_from[0][1] + bounds_from[1][1]),
)
centre_to = (
0.5 * (bounds_to[0][0] + bounds_to[1][0]),
0.5 * (bounds_to[0][1] + bounds_to[1][1]),
)
p = self.drawing.beginPath()
# print(True, g_from.name, g_to.name, bounds_to, bounds_from)
# If the 'from' and 'to' graphics are vertically-aligned, draw a line
# from the 'from' to the 'to' entity
if bounds_to[0][0] < centre_from[0] < bounds_to[1][0]:
# print(True, g_from.name, g_to.name, bounds_to, bounds_from)
if centre_to[1] > centre_from[1]: # to above from
p.moveTo(centre_from[0], bounds_from[1][1])
p.lineTo(centre_from[0], bounds_to[0][1])
# Draw arrow point - TODO
else: # to below from
p.moveTo(centre_from[0], bounds_from[0][1])
p.lineTo(centre_from[0], bounds_to[1][1])
# Draw arrow point - TODO
elif bounds_from[0][0] < centre_to[0] < bounds_from[1][0]:
# print(True, g_from.name, g_to.name, bounds_to, bounds_from)
if centre_to[1] > centre_from[1]: # to above from
p.moveTo(centre_to[0], bounds_from[1][1])
p.lineTo(centre_to[0], bounds_to[0][1])
# Draw arrow point - TODO
else: # to below from
p.moveTo(centre_to[0], bounds_from[0][1])
p.lineTo(centre_to[0], bounds_to[1][1])
# Draw arrow point - TODO
self.drawing.drawPath(p) # Draw arrow shaft
# print(g_from)
# print(bounds_from)
# print(g_to)
# print(bounds_to)
|
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'w01_hijing8tev_gensimtreeproduction'
config.General.workArea = 'project_w01_hijing8tev_gensimtreeproduction'
config.General.transferOutputs = True
config.General.transferLogs = False
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'ConfFile_cfg.py'
config.Data.inputDataset = '/HIJING_pPb_8160_DataBS/pPb816Summer16DR-MB_80X_mcRun2_pA_v4-v2/AODSIM'
config.Data.inputDBS = 'global'
#config.Data.splitting = 'FileBased'
config.Data.splitting = 'Automatic'
#config.Data.unitsPerJob = 1
config.Data.outLFNDirBase = '/store/user/tuos/loops/cumulants/hijing/w01_hijing8tev_gensimtreeproduction'
config.Data.publication = False
config.Data.outputDatasetTag = 'w01_hijing8tev_gensimtreeproduction'
config.Site.storageSite = 'T2_US_Vanderbilt'
|
"""Database URL parser."""
from typing import Any, NewType
from urllib import parse as urlparse
from .utils import is_truthy
DBConfig = NewType('DBConfig', dict[str, Any])
DBConfig.__qualname__ = 'yaenv.db.DBConfig'
# Supported schemes.
SCHEMES: dict[str, str] = {
'mysql': 'django.db.backends.mysql',
'oracle': 'django.db.backends.oracle',
'pgsql': 'django.db.backends.postgresql',
'sqlite': 'django.db.backends.sqlite3',
}
# Scheme aliases.
SCHEMES['postgres'] = SCHEMES['pgsql']
SCHEMES['postgresql'] = SCHEMES['pgsql']
SCHEMES['sqlite3'] = SCHEMES['sqlite']
# Register database schemes in URLs.
urlparse.uses_netloc += list(SCHEMES)
def add_scheme(scheme: str, backend: str) -> None:
"""
Extend the dictionary of supported schemes.
Parameters
----------
scheme : int
The scheme of the database.
backend : str
The backend of the database.
Examples
--------
>>> add_scheme('mysql-connector', 'mysql.connector.django')
"""
SCHEMES[scheme] = backend
urlparse.uses_netloc.append(scheme)
def parse(url: str) -> DBConfig:
"""
Parse a database URL.
Parameters
----------
url : str
The database URL to be parsed.
Returns
-------
DBConfig
A dictionary that can be used in
:dj:`django.settings.DATABASES <databases>`.
Examples
--------
>>> parse('mysql://user:pass@127.0.0.1:3306/django')
{
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django',
'USER': 'user',
'PASSWORD': 'pass',
'HOST': '127.0.0.1',
'PORT': '3306',
'OPTIONS': {}
}
"""
# Special case: https://www.sqlite.org/inmemorydb.html.
if url == 'sqlite://:memory:':
return DBConfig({
'ENGINE': SCHEMES['sqlite'],
'NAME': ':memory:',
})
# Parse the given URL.
uri = urlparse.urlparse(url)
# Update with environment configuration.
config = DBConfig({
'ENGINE': SCHEMES[uri.scheme],
'NAME': urlparse.unquote(uri.path[1:] or ''),
'USER': urlparse.unquote(uri.username or ''),
'PASSWORD': urlparse.unquote(uri.password or ''),
'HOST': uri.hostname or '',
'PORT': str(uri.port or ''),
})
# Pass the query string into OPTIONS.
options: dict[str, Any] = {}
qs = urlparse.parse_qs(uri.query)
for key, values in qs.items():
if key == 'isolation':
options['isolation_level'] = {
'uncommitted': 4,
'serializable': 3,
'repeatable': 2,
'committed': 1,
'autocommit': 0,
}.get(values[-1], None)
continue
if key == 'search_path':
options['options'] = f'-c search_path={values[-1]}'
continue
if key in ('autocommit', 'atomic_requests'):
config[key.upper()] = is_truthy(values[-1])
continue
if key == 'conn_max_age':
config['CONN_MAX_AGE'] = int(values[-1])
continue
options[key] = values[-1]
if options:
config['OPTIONS'] = options
return config
__all__ = ['DBConfig', 'add_scheme', 'parse']
|
import os
import sys
import simplejson as json
import logging
from io import open
from dogpile.cache import make_region
from swag_client.backend import SWAGManager
from swag_client.util import append_item, remove_item
logger = logging.getLogger(__name__)
try:
from json.errors import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
file_region = make_region()
def load_file(data_file):
"""Tries to load JSON from data file."""
try:
with open(data_file, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except JSONDecodeError as e:
return []
def save_file(data_file, data, dry_run=None):
"""Writes JSON data to data file."""
if dry_run:
return
with open(data_file, 'w', encoding='utf-8') as f:
if sys.version_info > (3, 0):
f.write(json.dumps(data))
else:
f.write(json.dumps(data).decode('utf-8'))
class FileSWAGManager(SWAGManager):
def __init__(self, namespace, **kwargs):
"""Create a file based SWAG backend."""
self.namespace = namespace
self.version = kwargs['schema_version']
if not file_region.is_configured:
file_region.configure(
'dogpile.cache.memory',
expiration_time=kwargs['cache_expires']
)
if not kwargs.get('data_file'):
self.data_file = os.path.join(kwargs['data_dir'], self.namespace + '.json')
else:
self.data_file = kwargs['data_file']
if not os.path.isfile(self.data_file):
logger.warning(
'Backend file does not exist, creating... Path: {data_file}'.format(data_file=self.data_file)
)
save_file(self.data_file, [])
def create(self, item, dry_run=None):
"""Creates a new item in file."""
logger.debug('Creating new item. Item: {item} Path: {data_file}'.format(
item=item,
data_file=self.data_file
))
items = load_file(self.data_file)
items = append_item(self.namespace, self.version, item, items)
save_file(self.data_file, items, dry_run=dry_run)
return item
def delete(self, item, dry_run=None):
"""Deletes item in file."""
logger.debug('Deleting item. Item: {item} Path: {data_file}'.format(
item=item,
data_file=self.data_file
))
items = load_file(self.data_file)
items = remove_item(self.namespace, self.version, item, items)
save_file(self.data_file, items, dry_run=dry_run)
return item
def update(self, item, dry_run=None):
"""Updates item info in file."""
logger.debug('Updating item. Item: {item} Path: {data_file}'.format(
item=item,
data_file=self.data_file
))
self.delete(item, dry_run=dry_run)
return self.create(item, dry_run=dry_run)
@file_region.cache_on_arguments()
def get_all(self):
"""Gets all items in file."""
logger.debug('Fetching items. Path: {data_file}'.format(
data_file=self.data_file
))
return load_file(self.data_file)
def health_check(self):
"""Checks to make sure the file is there."""
logger.debug('Health Check on file for: {namespace}'.format(
namespace=self.namespace
))
return os.path.isfile(self.data_file)
|
from sqlalchemy.dialects import mssql
from sqlalchemy.engine import default
from sqlalchemy.exc import CompileError
from sqlalchemy.sql import and_
from sqlalchemy.sql import bindparam
from sqlalchemy.sql import column
from sqlalchemy.sql import exists
from sqlalchemy.sql import func
from sqlalchemy.sql import literal
from sqlalchemy.sql import select
from sqlalchemy.sql import table
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.sql.visitors import cloned_traverse
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
class CTETest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default_enhanced"
def test_nonrecursive(self):
orders = table(
"orders",
column("region"),
column("amount"),
column("product"),
column("quantity"),
)
regional_sales = (
select(
[
orders.c.region,
func.sum(orders.c.amount).label("total_sales"),
]
)
.group_by(orders.c.region)
.cte("regional_sales")
)
top_regions = (
select([regional_sales.c.region])
.where(
regional_sales.c.total_sales
> select([func.sum(regional_sales.c.total_sales) / 10])
)
.cte("top_regions")
)
s = (
select(
[
orders.c.region,
orders.c.product,
func.sum(orders.c.quantity).label("product_units"),
func.sum(orders.c.amount).label("product_sales"),
]
)
.where(orders.c.region.in_(select([top_regions.c.region])))
.group_by(orders.c.region, orders.c.product)
)
# needs to render regional_sales first as top_regions
# refers to it
self.assert_compile(
s,
"WITH regional_sales AS (SELECT orders.region AS region, "
"sum(orders.amount) AS total_sales FROM orders "
"GROUP BY orders.region), "
"top_regions AS (SELECT "
"regional_sales.region AS region FROM regional_sales "
"WHERE regional_sales.total_sales > "
"(SELECT sum(regional_sales.total_sales) / :sum_1 AS "
"anon_1 FROM regional_sales)) "
"SELECT orders.region, orders.product, "
"sum(orders.quantity) AS product_units, "
"sum(orders.amount) AS product_sales "
"FROM orders WHERE orders.region "
"IN (SELECT top_regions.region FROM top_regions) "
"GROUP BY orders.region, orders.product",
)
def test_recursive(self):
parts = table(
"parts", column("part"), column("sub_part"), column("quantity")
)
included_parts = (
select([parts.c.sub_part, parts.c.part, parts.c.quantity])
.where(parts.c.part == "our part")
.cte(recursive=True)
)
incl_alias = included_parts.alias()
parts_alias = parts.alias()
included_parts = included_parts.union(
select(
[
parts_alias.c.sub_part,
parts_alias.c.part,
parts_alias.c.quantity,
]
).where(parts_alias.c.part == incl_alias.c.sub_part)
)
s = (
select(
[
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).label(
"total_quantity"
),
]
)
.select_from(
included_parts.join(
parts, included_parts.c.part == parts.c.part
)
)
.group_by(included_parts.c.sub_part)
)
self.assert_compile(
s,
"WITH RECURSIVE anon_1(sub_part, part, quantity) "
"AS (SELECT parts.sub_part AS sub_part, parts.part "
"AS part, parts.quantity AS quantity FROM parts "
"WHERE parts.part = :part_1 UNION "
"SELECT parts_1.sub_part AS sub_part, "
"parts_1.part AS part, parts_1.quantity "
"AS quantity FROM parts AS parts_1, anon_1 AS anon_2 "
"WHERE parts_1.part = anon_2.sub_part) "
"SELECT anon_1.sub_part, "
"sum(anon_1.quantity) AS total_quantity FROM anon_1 "
"JOIN parts ON anon_1.part = parts.part "
"GROUP BY anon_1.sub_part",
)
# quick check that the "WITH RECURSIVE" varies per
# dialect
self.assert_compile(
s,
"WITH anon_1(sub_part, part, quantity) "
"AS (SELECT parts.sub_part AS sub_part, parts.part "
"AS part, parts.quantity AS quantity FROM parts "
"WHERE parts.part = :part_1 UNION "
"SELECT parts_1.sub_part AS sub_part, "
"parts_1.part AS part, parts_1.quantity "
"AS quantity FROM parts AS parts_1, anon_1 AS anon_2 "
"WHERE parts_1.part = anon_2.sub_part) "
"SELECT anon_1.sub_part, "
"sum(anon_1.quantity) AS total_quantity FROM anon_1 "
"JOIN parts ON anon_1.part = parts.part "
"GROUP BY anon_1.sub_part",
dialect=mssql.dialect(),
)
def test_recursive_inner_cte_unioned_to_alias(self):
parts = table(
"parts", column("part"), column("sub_part"), column("quantity")
)
included_parts = (
select([parts.c.sub_part, parts.c.part, parts.c.quantity])
.where(parts.c.part == "our part")
.cte(recursive=True)
)
incl_alias = included_parts.alias("incl")
parts_alias = parts.alias()
included_parts = incl_alias.union(
select(
[
parts_alias.c.sub_part,
parts_alias.c.part,
parts_alias.c.quantity,
]
).where(parts_alias.c.part == incl_alias.c.sub_part)
)
s = (
select(
[
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).label(
"total_quantity"
),
]
)
.select_from(
included_parts.join(
parts, included_parts.c.part == parts.c.part
)
)
.group_by(included_parts.c.sub_part)
)
self.assert_compile(
s,
"WITH RECURSIVE incl(sub_part, part, quantity) "
"AS (SELECT parts.sub_part AS sub_part, parts.part "
"AS part, parts.quantity AS quantity FROM parts "
"WHERE parts.part = :part_1 UNION "
"SELECT parts_1.sub_part AS sub_part, "
"parts_1.part AS part, parts_1.quantity "
"AS quantity FROM parts AS parts_1, incl "
"WHERE parts_1.part = incl.sub_part) "
"SELECT incl.sub_part, "
"sum(incl.quantity) AS total_quantity FROM incl "
"JOIN parts ON incl.part = parts.part "
"GROUP BY incl.sub_part",
)
def test_recursive_union_no_alias_one(self):
s1 = select([literal(0).label("x")])
cte = s1.cte(name="cte", recursive=True)
cte = cte.union_all(select([cte.c.x + 1]).where(cte.c.x < 10))
s2 = select([cte])
self.assert_compile(
s2,
"WITH RECURSIVE cte(x) AS "
"(SELECT :param_1 AS x UNION ALL "
"SELECT cte.x + :x_1 AS anon_1 "
"FROM cte WHERE cte.x < :x_2) "
"SELECT cte.x FROM cte",
)
def test_recursive_union_alias_one(self):
s1 = select([literal(0).label("x")])
cte = s1.cte(name="cte", recursive=True)
cte = cte.union_all(select([cte.c.x + 1]).where(cte.c.x < 10)).alias(
"cr1"
)
s2 = select([cte])
self.assert_compile(
s2,
"WITH RECURSIVE cte(x) AS "
"(SELECT :param_1 AS x UNION ALL "
"SELECT cte.x + :x_1 AS anon_1 "
"FROM cte WHERE cte.x < :x_2) "
"SELECT cr1.x FROM cte AS cr1",
)
def test_recursive_union_no_alias_two(self):
"""
pg's example::
WITH RECURSIVE t(n) AS (
VALUES (1)
UNION ALL
SELECT n+1 FROM t WHERE n < 100
)
SELECT sum(n) FROM t;
"""
# I know, this is the PG VALUES keyword,
# we're cheating here. also yes we need the SELECT,
# sorry PG.
t = select([func.values(1).label("n")]).cte("t", recursive=True)
t = t.union_all(select([t.c.n + 1]).where(t.c.n < 100))
s = select([func.sum(t.c.n)])
self.assert_compile(
s,
"WITH RECURSIVE t(n) AS "
"(SELECT values(:values_1) AS n "
"UNION ALL SELECT t.n + :n_1 AS anon_1 "
"FROM t "
"WHERE t.n < :n_2) "
"SELECT sum(t.n) AS sum_1 FROM t",
)
def test_recursive_union_alias_two(self):
"""
"""
# I know, this is the PG VALUES keyword,
# we're cheating here. also yes we need the SELECT,
# sorry PG.
t = select([func.values(1).label("n")]).cte("t", recursive=True)
t = t.union_all(select([t.c.n + 1]).where(t.c.n < 100)).alias("ta")
s = select([func.sum(t.c.n)])
self.assert_compile(
s,
"WITH RECURSIVE t(n) AS "
"(SELECT values(:values_1) AS n "
"UNION ALL SELECT t.n + :n_1 AS anon_1 "
"FROM t "
"WHERE t.n < :n_2) "
"SELECT sum(ta.n) AS sum_1 FROM t AS ta",
)
def test_recursive_union_no_alias_three(self):
# like test one, but let's refer to the CTE
# in a sibling CTE.
s1 = select([literal(0).label("x")])
cte = s1.cte(name="cte", recursive=True)
# can't do it here...
# bar = select([cte]).cte('bar')
cte = cte.union_all(select([cte.c.x + 1]).where(cte.c.x < 10))
bar = select([cte]).cte("bar")
s2 = select([cte, bar])
self.assert_compile(
s2,
"WITH RECURSIVE cte(x) AS "
"(SELECT :param_1 AS x UNION ALL "
"SELECT cte.x + :x_1 AS anon_1 "
"FROM cte WHERE cte.x < :x_2), "
"bar AS (SELECT cte.x AS x FROM cte) "
"SELECT cte.x, bar.x FROM cte, bar",
)
def test_recursive_union_alias_three(self):
# like test one, but let's refer to the CTE
# in a sibling CTE.
s1 = select([literal(0).label("x")])
cte = s1.cte(name="cte", recursive=True)
# can't do it here...
# bar = select([cte]).cte('bar')
cte = cte.union_all(select([cte.c.x + 1]).where(cte.c.x < 10)).alias(
"cs1"
)
bar = select([cte]).cte("bar").alias("cs2")
s2 = select([cte, bar])
self.assert_compile(
s2,
"WITH RECURSIVE cte(x) AS "
"(SELECT :param_1 AS x UNION ALL "
"SELECT cte.x + :x_1 AS anon_1 "
"FROM cte WHERE cte.x < :x_2), "
"bar AS (SELECT cs1.x AS x FROM cte AS cs1) "
"SELECT cs1.x, cs2.x FROM cte AS cs1, bar AS cs2",
)
def test_recursive_union_no_alias_four(self):
# like test one and three, but let's refer
# previous version of "cte". here we test
# how the compiler resolves multiple instances
# of "cte".
s1 = select([literal(0).label("x")])
cte = s1.cte(name="cte", recursive=True)
bar = select([cte]).cte("bar")
cte = cte.union_all(select([cte.c.x + 1]).where(cte.c.x < 10))
# outer cte rendered first, then bar, which
# includes "inner" cte
s2 = select([cte, bar])
self.assert_compile(
s2,
"WITH RECURSIVE cte(x) AS "
"(SELECT :param_1 AS x UNION ALL "
"SELECT cte.x + :x_1 AS anon_1 "
"FROM cte WHERE cte.x < :x_2), "
"bar AS (SELECT cte.x AS x FROM cte) "
"SELECT cte.x, bar.x FROM cte, bar",
)
# bar rendered, only includes "inner" cte,
# "outer" cte isn't present
s2 = select([bar])
self.assert_compile(
s2,
"WITH RECURSIVE cte(x) AS "
"(SELECT :param_1 AS x), "
"bar AS (SELECT cte.x AS x FROM cte) "
"SELECT bar.x FROM bar",
)
# bar rendered, but then the "outer"
# cte is rendered.
s2 = select([bar, cte])
self.assert_compile(
s2,
"WITH RECURSIVE bar AS (SELECT cte.x AS x FROM cte), "
"cte(x) AS "
"(SELECT :param_1 AS x UNION ALL "
"SELECT cte.x + :x_1 AS anon_1 "
"FROM cte WHERE cte.x < :x_2) "
"SELECT bar.x, cte.x FROM bar, cte",
)
def test_recursive_union_alias_four(self):
# like test one and three, but let's refer
# previous version of "cte". here we test
# how the compiler resolves multiple instances
# of "cte".
s1 = select([literal(0).label("x")])
cte = s1.cte(name="cte", recursive=True)
bar = select([cte]).cte("bar").alias("cs1")
cte = cte.union_all(select([cte.c.x + 1]).where(cte.c.x < 10)).alias(
"cs2"
)
# outer cte rendered first, then bar, which
# includes "inner" cte
s2 = select([cte, bar])
self.assert_compile(
s2,
"WITH RECURSIVE cte(x) AS "
"(SELECT :param_1 AS x UNION ALL "
"SELECT cte.x + :x_1 AS anon_1 "
"FROM cte WHERE cte.x < :x_2), "
"bar AS (SELECT cte.x AS x FROM cte) "
"SELECT cs2.x, cs1.x FROM cte AS cs2, bar AS cs1",
)
# bar rendered, only includes "inner" cte,
# "outer" cte isn't present
s2 = select([bar])
self.assert_compile(
s2,
"WITH RECURSIVE cte(x) AS "
"(SELECT :param_1 AS x), "
"bar AS (SELECT cte.x AS x FROM cte) "
"SELECT cs1.x FROM bar AS cs1",
)
# bar rendered, but then the "outer"
# cte is rendered.
s2 = select([bar, cte])
self.assert_compile(
s2,
"WITH RECURSIVE bar AS (SELECT cte.x AS x FROM cte), "
"cte(x) AS "
"(SELECT :param_1 AS x UNION ALL "
"SELECT cte.x + :x_1 AS anon_1 "
"FROM cte WHERE cte.x < :x_2) "
"SELECT cs1.x, cs2.x FROM bar AS cs1, cte AS cs2",
)
def test_conflicting_names(self):
"""test a flat out name conflict."""
s1 = select([1])
c1 = s1.cte(name="cte1", recursive=True)
s2 = select([1])
c2 = s2.cte(name="cte1", recursive=True)
s = select([c1, c2])
assert_raises_message(
CompileError,
"Multiple, unrelated CTEs found " "with the same name: 'cte1'",
s.compile,
)
def test_union(self):
orders = table("orders", column("region"), column("amount"))
regional_sales = select([orders.c.region, orders.c.amount]).cte(
"regional_sales"
)
s = select([regional_sales.c.region]).where(
regional_sales.c.amount > 500
)
self.assert_compile(
s,
"WITH regional_sales AS "
"(SELECT orders.region AS region, "
"orders.amount AS amount FROM orders) "
"SELECT regional_sales.region "
"FROM regional_sales WHERE "
"regional_sales.amount > :amount_1",
)
s = s.union_all(
select([regional_sales.c.region]).where(
regional_sales.c.amount < 300
)
)
self.assert_compile(
s,
"WITH regional_sales AS "
"(SELECT orders.region AS region, "
"orders.amount AS amount FROM orders) "
"SELECT regional_sales.region FROM regional_sales "
"WHERE regional_sales.amount > :amount_1 "
"UNION ALL SELECT regional_sales.region "
"FROM regional_sales WHERE "
"regional_sales.amount < :amount_2",
)
def test_union_cte_aliases(self):
orders = table("orders", column("region"), column("amount"))
regional_sales = (
select([orders.c.region, orders.c.amount])
.cte("regional_sales")
.alias("rs")
)
s = select([regional_sales.c.region]).where(
regional_sales.c.amount > 500
)
self.assert_compile(
s,
"WITH regional_sales AS "
"(SELECT orders.region AS region, "
"orders.amount AS amount FROM orders) "
"SELECT rs.region "
"FROM regional_sales AS rs WHERE "
"rs.amount > :amount_1",
)
s = s.union_all(
select([regional_sales.c.region]).where(
regional_sales.c.amount < 300
)
)
self.assert_compile(
s,
"WITH regional_sales AS "
"(SELECT orders.region AS region, "
"orders.amount AS amount FROM orders) "
"SELECT rs.region FROM regional_sales AS rs "
"WHERE rs.amount > :amount_1 "
"UNION ALL SELECT rs.region "
"FROM regional_sales AS rs WHERE "
"rs.amount < :amount_2",
)
cloned = cloned_traverse(s, {}, {})
self.assert_compile(
cloned,
"WITH regional_sales AS "
"(SELECT orders.region AS region, "
"orders.amount AS amount FROM orders) "
"SELECT rs.region FROM regional_sales AS rs "
"WHERE rs.amount > :amount_1 "
"UNION ALL SELECT rs.region "
"FROM regional_sales AS rs WHERE "
"rs.amount < :amount_2",
)
def test_cloned_alias(self):
entity = table(
"entity", column("id"), column("employer_id"), column("name")
)
tag = table("tag", column("tag"), column("entity_id"))
tags = (
select([tag.c.entity_id, func.array_agg(tag.c.tag).label("tags")])
.group_by(tag.c.entity_id)
.cte("unaliased_tags")
)
entity_tags = tags.alias(name="entity_tags")
employer_tags = tags.alias(name="employer_tags")
q = (
select([entity.c.name])
.select_from(
entity.outerjoin(
entity_tags, tags.c.entity_id == entity.c.id
).outerjoin(
employer_tags, tags.c.entity_id == entity.c.employer_id
)
)
.where(entity_tags.c.tags.op("@>")(bindparam("tags")))
.where(employer_tags.c.tags.op("@>")(bindparam("tags")))
)
self.assert_compile(
q,
"WITH unaliased_tags AS "
"(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags "
"FROM tag GROUP BY tag.entity_id)"
" SELECT entity.name "
"FROM entity "
"LEFT OUTER JOIN unaliased_tags AS entity_tags ON "
"unaliased_tags.entity_id = entity.id "
"LEFT OUTER JOIN unaliased_tags AS employer_tags ON "
"unaliased_tags.entity_id = entity.employer_id "
"WHERE (entity_tags.tags @> :tags) AND "
"(employer_tags.tags @> :tags)",
)
cloned = q.params(tags=["tag1", "tag2"])
self.assert_compile(
cloned,
"WITH unaliased_tags AS "
"(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags "
"FROM tag GROUP BY tag.entity_id)"
" SELECT entity.name "
"FROM entity "
"LEFT OUTER JOIN unaliased_tags AS entity_tags ON "
"unaliased_tags.entity_id = entity.id "
"LEFT OUTER JOIN unaliased_tags AS employer_tags ON "
"unaliased_tags.entity_id = entity.employer_id "
"WHERE (entity_tags.tags @> :tags) AND "
"(employer_tags.tags @> :tags)",
)
def test_reserved_quote(self):
orders = table("orders", column("order"))
s = select([orders.c.order]).cte("regional_sales", recursive=True)
s = select([s.c.order])
self.assert_compile(
s,
'WITH RECURSIVE regional_sales("order") AS '
'(SELECT orders."order" AS "order" '
"FROM orders)"
' SELECT regional_sales."order" '
"FROM regional_sales",
)
def test_multi_subq_quote(self):
cte = select([literal(1).label("id")]).cte(name="CTE")
s1 = select([cte.c.id]).alias()
s2 = select([cte.c.id]).alias()
s = select([s1, s2])
self.assert_compile(
s,
'WITH "CTE" AS (SELECT :param_1 AS id) '
"SELECT anon_1.id, anon_2.id FROM "
'(SELECT "CTE".id AS id FROM "CTE") AS anon_1, '
'(SELECT "CTE".id AS id FROM "CTE") AS anon_2',
)
def test_multi_subq_alias(self):
cte = select([literal(1).label("id")]).cte(name="cte1").alias("aa")
s1 = select([cte.c.id]).alias()
s2 = select([cte.c.id]).alias()
s = select([s1, s2])
self.assert_compile(
s,
"WITH cte1 AS (SELECT :param_1 AS id) "
"SELECT anon_1.id, anon_2.id FROM "
"(SELECT aa.id AS id FROM cte1 AS aa) AS anon_1, "
"(SELECT aa.id AS id FROM cte1 AS aa) AS anon_2",
)
def test_cte_refers_to_aliased_cte_twice(self):
# test issue #4204
a = table("a", column("id"))
b = table("b", column("id"), column("fid"))
c = table("c", column("id"), column("fid"))
cte1 = select([a.c.id]).cte(name="cte1")
aa = cte1.alias("aa")
cte2 = (
select([b.c.id])
.select_from(b.join(aa, b.c.fid == aa.c.id))
.cte(name="cte2")
)
cte3 = (
select([c.c.id])
.select_from(c.join(aa, c.c.fid == aa.c.id))
.cte(name="cte3")
)
stmt = select([cte3.c.id, cte2.c.id]).select_from(
cte2.join(cte3, cte2.c.id == cte3.c.id)
)
self.assert_compile(
stmt,
"WITH cte1 AS (SELECT a.id AS id FROM a), "
"cte2 AS (SELECT b.id AS id FROM b "
"JOIN cte1 AS aa ON b.fid = aa.id), "
"cte3 AS (SELECT c.id AS id FROM c "
"JOIN cte1 AS aa ON c.fid = aa.id) "
"SELECT cte3.id, cte2.id FROM cte2 JOIN cte3 ON cte2.id = cte3.id",
)
def test_named_alias_no_quote(self):
cte = select([literal(1).label("id")]).cte(name="CTE")
s1 = select([cte.c.id]).alias(name="no_quotes")
s = select([s1])
self.assert_compile(
s,
'WITH "CTE" AS (SELECT :param_1 AS id) '
"SELECT no_quotes.id FROM "
'(SELECT "CTE".id AS id FROM "CTE") AS no_quotes',
)
def test_named_alias_quote(self):
cte = select([literal(1).label("id")]).cte(name="CTE")
s1 = select([cte.c.id]).alias(name="Quotes Required")
s = select([s1])
self.assert_compile(
s,
'WITH "CTE" AS (SELECT :param_1 AS id) '
'SELECT "Quotes Required".id FROM '
'(SELECT "CTE".id AS id FROM "CTE") AS "Quotes Required"',
)
def test_named_alias_disable_quote(self):
cte = select([literal(1).label("id")]).cte(
name=quoted_name("CTE", quote=False)
)
s1 = select([cte.c.id]).alias(
name=quoted_name("DontQuote", quote=False)
)
s = select([s1])
self.assert_compile(
s,
"WITH CTE AS (SELECT :param_1 AS id) "
"SELECT DontQuote.id FROM "
"(SELECT CTE.id AS id FROM CTE) AS DontQuote",
)
def test_positional_binds(self):
orders = table("orders", column("order"))
s = select([orders.c.order, literal("x")]).cte("regional_sales")
s = select([s.c.order, literal("y")])
dialect = default.DefaultDialect()
dialect.positional = True
dialect.paramstyle = "numeric"
self.assert_compile(
s,
'WITH regional_sales AS (SELECT orders."order" '
'AS "order", :1 AS anon_2 FROM orders) SELECT '
'regional_sales."order", :2 AS anon_1 FROM regional_sales',
checkpositional=("x", "y"),
dialect=dialect,
)
self.assert_compile(
s.union(s),
'WITH regional_sales AS (SELECT orders."order" '
'AS "order", :1 AS anon_2 FROM orders) SELECT '
'regional_sales."order", :2 AS anon_1 FROM regional_sales '
'UNION SELECT regional_sales."order", :3 AS anon_1 '
"FROM regional_sales",
checkpositional=("x", "y", "y"),
dialect=dialect,
)
s = (
select([orders.c.order])
.where(orders.c.order == "x")
.cte("regional_sales")
)
s = select([s.c.order]).where(s.c.order == "y")
self.assert_compile(
s,
'WITH regional_sales AS (SELECT orders."order" AS '
'"order" FROM orders WHERE orders."order" = :1) '
'SELECT regional_sales."order" FROM regional_sales '
'WHERE regional_sales."order" = :2',
checkpositional=("x", "y"),
dialect=dialect,
)
def test_positional_binds_2(self):
orders = table("orders", column("order"))
s = select([orders.c.order, literal("x")]).cte("regional_sales")
s = select([s.c.order, literal("y")])
dialect = default.DefaultDialect()
dialect.positional = True
dialect.paramstyle = "numeric"
s1 = (
select([orders.c.order])
.where(orders.c.order == "x")
.cte("regional_sales_1")
)
s1a = s1.alias()
s2 = (
select(
[
orders.c.order == "y",
s1a.c.order,
orders.c.order,
s1.c.order,
]
)
.where(orders.c.order == "z")
.cte("regional_sales_2")
)
s3 = select([s2])
self.assert_compile(
s3,
'WITH regional_sales_1 AS (SELECT orders."order" AS "order" '
'FROM orders WHERE orders."order" = :1), regional_sales_2 AS '
'(SELECT orders."order" = :2 AS anon_1, '
'anon_2."order" AS "order", '
'orders."order" AS "order", '
'regional_sales_1."order" AS "order" FROM orders, '
"regional_sales_1 "
"AS anon_2, regional_sales_1 "
'WHERE orders."order" = :3) SELECT regional_sales_2.anon_1, '
'regional_sales_2."order" FROM regional_sales_2',
checkpositional=("x", "y", "z"),
dialect=dialect,
)
def test_positional_binds_2_asliteral(self):
orders = table("orders", column("order"))
s = select([orders.c.order, literal("x")]).cte("regional_sales")
s = select([s.c.order, literal("y")])
dialect = default.DefaultDialect()
dialect.positional = True
dialect.paramstyle = "numeric"
s1 = (
select([orders.c.order])
.where(orders.c.order == "x")
.cte("regional_sales_1")
)
s1a = s1.alias()
s2 = (
select(
[
orders.c.order == "y",
s1a.c.order,
orders.c.order,
s1.c.order,
]
)
.where(orders.c.order == "z")
.cte("regional_sales_2")
)
s3 = select([s2])
self.assert_compile(
s3,
"WITH regional_sales_1 AS "
'(SELECT orders."order" AS "order" '
"FROM orders "
"WHERE orders.\"order\" = 'x'), "
"regional_sales_2 AS "
"(SELECT orders.\"order\" = 'y' AS anon_1, "
'anon_2."order" AS "order", orders."order" AS "order", '
'regional_sales_1."order" AS "order" '
"FROM orders, regional_sales_1 AS anon_2, regional_sales_1 "
"WHERE orders.\"order\" = 'z') "
'SELECT regional_sales_2.anon_1, regional_sales_2."order" '
"FROM regional_sales_2",
checkpositional=(),
dialect=dialect,
literal_binds=True,
)
def test_all_aliases(self):
orders = table("order", column("order"))
s = select([orders.c.order]).cte("regional_sales")
r1 = s.alias()
r2 = s.alias()
s2 = select([r1, r2]).where(r1.c.order > r2.c.order)
self.assert_compile(
s2,
'WITH regional_sales AS (SELECT "order"."order" '
'AS "order" FROM "order") '
'SELECT anon_1."order", anon_2."order" '
"FROM regional_sales AS anon_1, "
'regional_sales AS anon_2 WHERE anon_1."order" > anon_2."order"',
)
s3 = select([orders]).select_from(
orders.join(r1, r1.c.order == orders.c.order)
)
self.assert_compile(
s3,
"WITH regional_sales AS "
'(SELECT "order"."order" AS "order" '
'FROM "order")'
' SELECT "order"."order" '
'FROM "order" JOIN regional_sales AS anon_1 '
'ON anon_1."order" = "order"."order"',
)
def test_suffixes(self):
orders = table("order", column("order"))
s = select([orders.c.order]).cte("regional_sales")
s = s.suffix_with("pg suffix", dialect="postgresql")
s = s.suffix_with("oracle suffix", dialect="oracle")
stmt = select([orders]).where(orders.c.order > s.c.order)
self.assert_compile(
stmt,
'WITH regional_sales AS (SELECT "order"."order" AS "order" '
'FROM "order") SELECT "order"."order" FROM "order", '
'regional_sales WHERE "order"."order" > regional_sales."order"',
)
self.assert_compile(
stmt,
'WITH regional_sales AS (SELECT "order"."order" AS "order" '
'FROM "order") oracle suffix '
'SELECT "order"."order" FROM "order", '
'regional_sales WHERE "order"."order" > regional_sales."order"',
dialect="oracle",
)
self.assert_compile(
stmt,
'WITH regional_sales AS (SELECT "order"."order" AS "order" '
'FROM "order") pg suffix SELECT "order"."order" FROM "order", '
'regional_sales WHERE "order"."order" > regional_sales."order"',
dialect="postgresql",
)
def test_upsert_from_select(self):
orders = table(
"orders",
column("region"),
column("amount"),
column("product"),
column("quantity"),
)
upsert = (
orders.update()
.where(orders.c.region == "Region1")
.values(amount=1.0, product="Product1", quantity=1)
.returning(*(orders.c._all_columns))
.cte("upsert")
)
insert = orders.insert().from_select(
orders.c.keys(),
select(
[
literal("Region1"),
literal(1.0),
literal("Product1"),
literal(1),
]
).where(~exists(upsert.select())),
)
self.assert_compile(
insert,
"WITH upsert AS (UPDATE orders SET amount=:amount, "
"product=:product, quantity=:quantity "
"WHERE orders.region = :region_1 "
"RETURNING orders.region, orders.amount, "
"orders.product, orders.quantity) "
"INSERT INTO orders (region, amount, product, quantity) "
"SELECT :param_1 AS anon_1, :param_2 AS anon_2, "
":param_3 AS anon_3, :param_4 AS anon_4 WHERE NOT (EXISTS "
"(SELECT upsert.region, upsert.amount, upsert.product, "
"upsert.quantity FROM upsert))",
)
def test_anon_update_cte(self):
orders = table("orders", column("region"))
stmt = (
orders.update()
.where(orders.c.region == "x")
.values(region="y")
.returning(orders.c.region)
.cte()
)
self.assert_compile(
stmt.select(),
"WITH anon_1 AS (UPDATE orders SET region=:region "
"WHERE orders.region = :region_1 RETURNING orders.region) "
"SELECT anon_1.region FROM anon_1",
)
def test_anon_insert_cte(self):
orders = table("orders", column("region"))
stmt = (
orders.insert().values(region="y").returning(orders.c.region).cte()
)
self.assert_compile(
stmt.select(),
"WITH anon_1 AS (INSERT INTO orders (region) "
"VALUES (:region) RETURNING orders.region) "
"SELECT anon_1.region FROM anon_1",
)
def test_pg_example_one(self):
products = table("products", column("id"), column("date"))
products_log = table("products_log", column("id"), column("date"))
moved_rows = (
products.delete()
.where(
and_(products.c.date >= "dateone", products.c.date < "datetwo")
)
.returning(*products.c)
.cte("moved_rows")
)
stmt = products_log.insert().from_select(
products_log.c, moved_rows.select()
)
self.assert_compile(
stmt,
"WITH moved_rows AS "
"(DELETE FROM products WHERE products.date >= :date_1 "
"AND products.date < :date_2 "
"RETURNING products.id, products.date) "
"INSERT INTO products_log (id, date) "
"SELECT moved_rows.id, moved_rows.date FROM moved_rows",
)
def test_pg_example_two(self):
products = table("products", column("id"), column("price"))
t = (
products.update()
.values(price="someprice")
.returning(*products.c)
.cte("t")
)
stmt = t.select()
assert "autocommit" not in stmt._execution_options
eq_(stmt.compile().execution_options["autocommit"], True)
self.assert_compile(
stmt,
"WITH t AS "
"(UPDATE products SET price=:price "
"RETURNING products.id, products.price) "
"SELECT t.id, t.price "
"FROM t",
)
def test_pg_example_three(self):
parts = table("parts", column("part"), column("sub_part"))
included_parts = (
select([parts.c.sub_part, parts.c.part])
.where(parts.c.part == "our part")
.cte("included_parts", recursive=True)
)
pr = included_parts.alias("pr")
p = parts.alias("p")
included_parts = included_parts.union_all(
select([p.c.sub_part, p.c.part]).where(p.c.part == pr.c.sub_part)
)
stmt = (
parts.delete()
.where(parts.c.part.in_(select([included_parts.c.part])))
.returning(parts.c.part)
)
# the outer RETURNING is a bonus over what PG's docs have
self.assert_compile(
stmt,
"WITH RECURSIVE included_parts(sub_part, part) AS "
"(SELECT parts.sub_part AS sub_part, parts.part AS part "
"FROM parts "
"WHERE parts.part = :part_1 "
"UNION ALL SELECT p.sub_part AS sub_part, p.part AS part "
"FROM parts AS p, included_parts AS pr "
"WHERE p.part = pr.sub_part) "
"DELETE FROM parts WHERE parts.part IN "
"(SELECT included_parts.part FROM included_parts) "
"RETURNING parts.part",
)
def test_insert_in_the_cte(self):
products = table("products", column("id"), column("price"))
cte = (
products.insert()
.values(id=1, price=27.0)
.returning(*products.c)
.cte("pd")
)
stmt = select([cte])
assert "autocommit" not in stmt._execution_options
eq_(stmt.compile().execution_options["autocommit"], True)
self.assert_compile(
stmt,
"WITH pd AS "
"(INSERT INTO products (id, price) VALUES (:id, :price) "
"RETURNING products.id, products.price) "
"SELECT pd.id, pd.price "
"FROM pd",
)
def test_update_pulls_from_cte(self):
products = table("products", column("id"), column("price"))
cte = products.select().cte("pd")
assert "autocommit" not in cte._execution_options
stmt = products.update().where(products.c.price == cte.c.price)
eq_(stmt.compile().execution_options["autocommit"], True)
self.assert_compile(
stmt,
"WITH pd AS "
"(SELECT products.id AS id, products.price AS price "
"FROM products) "
"UPDATE products SET id=:id, price=:price FROM pd "
"WHERE products.price = pd.price",
)
|
from dtl.dag import *
|
from .LocalDatabricksConfig import LocalDatabricksConfig
from .secret_lookup import *
|
from buzzard._actors.message import Msg
import collections
class ActorProducer(object):
"""Actor that takes care of waiting for cache tiles reads and launching resamplings"""
def __init__(self, raster):
self._raster = raster
self._alive = True
self._produce_per_query = collections.defaultdict(dict) # type: Mapping[CachedQueryInfos, Mapping[int, _ProdArray]]
self.address = '/Raster{}/Producer'.format(self._raster.uid)
@property
def alive(self):
return self._alive
# ******************************************************************************************* **
def receive_make_this_array(self, qi, prod_idx):
"""Receive message: Start making this array
Parameters
----------
qi: _actors.cached.query_infos.QueryInfos
prod_idx: int
"""
msgs = []
pi = qi.prod[prod_idx] # type: CacheProduceInfos
assert pi.share_area is (len(pi.cache_fps) != 0)
if pi.share_area:
# If this prod_idx requires some cache file reads (this is the case most of the time)
msgs += [Msg(
'CacheExtractor', 'sample_those_cache_files_to_an_array', qi, prod_idx,
)]
for resample_fp in pi.resample_fps:
sample_fp = pi.resample_sample_dep_fp[resample_fp]
if sample_fp is None:
# Start the 'resampling' step of the resample_fp fully outside of raster
assert (
resample_fp not in pi.resample_cache_deps_fps or
len(pi.resample_cache_deps_fps[resample_fp]) == 0
)
msgs += [Msg(
'Resampler', 'resample_and_accumulate',
qi, prod_idx, None, resample_fp, None,
)]
self._produce_per_query[qi][prod_idx] = _ProdArray(pi)
return msgs
def receive_sampled_a_cache_file_to_the_array(self, qi, prod_idx, cache_fp, array):
"""Receive message: A cache file was read for that output array
Parameters
----------
qi: _actors.cached.query_infos.QueryInfos
prod_idx: int
cache_fp: Footprint
The cache_fp that was just read by the reader
array: ndarray
The array onto which the reader fills rectangles one by one
"""
msgs = []
pr = self._produce_per_query[qi][prod_idx]
pi = qi.prod[prod_idx]
# The constraints on `cache_fp` are now satisfied
for resample_fp, cache_fps in pr.resample_needs.items():
if cache_fp in cache_fps:
cache_fps.remove(cache_fp)
resample_ready = [
resample_fp
for resample_fp, cache_fps in pr.resample_needs.items()
if len(cache_fps) == 0
]
for resample_fp in resample_ready:
del pr.resample_needs[resample_fp]
subsample_fp = pi.resample_sample_dep_fp[resample_fp]
assert subsample_fp is not None
subsample_array = array[subsample_fp.slice_in(pi.sample_fp)]
assert subsample_array.shape[:2] == tuple(subsample_fp.shape)
msgs += [Msg(
'Resampler', 'resample_and_accumulate',
qi, prod_idx, subsample_fp, resample_fp, subsample_array,
)]
return msgs
def receive_made_this_array(self, qi, prod_idx, array):
"""Receive message: Done creating an output array"""
del self._produce_per_query[qi][prod_idx]
if len(self._produce_per_query[qi]) == 0:
del self._produce_per_query[qi]
return [Msg(
'QueriesHandler', 'made_this_array', qi, prod_idx, array
)]
def receive_cancel_this_query(self, qi):
"""Receive message: One query was dropped
Parameters
----------
qi: _actors.cached.query_infos.QueryInfos
"""
if qi in self._produce_per_query:
del self._produce_per_query[qi]
return []
def receive_die(self):
"""Receive message: The raster was killed"""
assert self._alive
self._alive = False
self._produce_per_query.clear()
self._raster = None
return []
# ******************************************************************************************* **
class _ProdArray(object):
def __init__(self, pi):
self.resample_needs = {
resample_fp: set(cache_fps)
for resample_fp, cache_fps in pi.resample_cache_deps_fps.items()
}
|
# pylint: disable=invalid-name,pointless-statement
def f() -> None:
4 # noqa: B018
def g() -> int:
return 4
|
# време + 15 минути
# Да се напише програма, която въвежда час и минути от 24-часово денонощие и изчислява колко ще е часът след 15 минути.
# Резултатът да се отпечата във формат hh:mm. Часовете винаги са между 0 и 23, а минутите винаги са между 0 и 59.
# Часовете се изписват с една или две цифри.
# Минутите се изписват винаги с по две цифри и с водеща нула, когато е необходимо.
current_h = int(input())
current_m = int(input())
current_minutes = current_h * 60 + current_m
future_minutes = current_minutes + 15
future_h = future_minutes // 60
future_m = future_minutes % 60
if future_h == 24:
future_h = 0
if 0 <= future_m <= 9:
print(f'{future_h}:0{future_m}')
else:
print(f'{future_h}:{future_m}')
|
# Code based on https://github.com/OpenNMT/OpenNMT-py/blob/master/preprocess.py
import pickle
import numpy as np
import argparse
import sys
import dsol
from keras.preprocessing import text
__author__ = 'Sameer Khurana'
__email__ = 'sameerkhurana10@gmail.com'
__version__ = '0.2'
parser = argparse.ArgumentParser(description='preprocess.py')
# Preprocess options
parser.add_argument('-config', help="Read options from this file")
parser.add_argument('-train_src', required=True,
help="Path to the training source data")
parser.add_argument('-train_src_bio', required=True,
help="Path to the training bio source data")
parser.add_argument('-train_tgt', required=True,
help="Path to the training target data")
parser.add_argument('-valid_src', required=False,
help="Path to the validation source data")
parser.add_argument('-valid_src_bio', required=False,
help="Path to the validation bio source data")
parser.add_argument('-valid_tgt', required=False,
help="Path to the validation target data")
parser.add_argument('-test_src', required=False,
help="Path to the test source data")
parser.add_argument('-test_src_bio', required=False,
help="Path to the test bio source data")
parser.add_argument('-test_tgt', required=False,
help="Path to the target data")
parser.add_argument('-save_data', required=True,
help="Output file for the prepared data")
parser.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
parser.add_argument('-seed', type=int, default=3435,
help="Random seed")
parser.add_argument('-lower', default=False,
help="Lower case dataset")
parser.add_argument('-start_char', default=1,
help="label for the start of the sequence")
parser.add_argument('-oov_char', default=2,
help="label for the out of vocab words")
parser.add_argument('-index_from', default=3,
help="start the words indices from")
parser.add_argument('-skip_top', default=0,
help="")
parser.add_argument('-char_level', default=True,
help="whether to have character level features")
parser.add_argument('-lowercase', default=False,
help="whether to lowercase the data")
parser.add_argument('-num_words', default=None,
help="restirct the vocab to the number of words")
opt = parser.parse_args()
np.random.seed(opt.seed)
def save_vocabulary(name, vocab, file):
print('Saving ' + name + ' vocabulary to \'' + file + '\'...')
vocab.writeFile(file)
def make_vocabulary(filename):
vocab = dsol.Dict([dsol.Constants.PAD_WORD, dsol.Constants.UNK_WORD,
dsol.Constants.BOS_WORD, dsol.Constants.EOS_WORD],
lower=opt.lower)
with open(filename) as f:
for sent in f.readlines():
for char in sent.rstrip():
vocab.add(char)
print('Created dictionary of size %d' %
(vocab.size()))
save_vocabulary('voc', vocab, 'data/vocabulary')
return vocab
def init_vocabulary(name, dataFile):
vocab = None
print('Building ' + name + ' vocabulary...')
gen_word_vocab = make_vocabulary(dataFile)
vocab = gen_word_vocab
print()
return vocab
def make_data(src_file, src_file_bio, tgt_file, train=False):
"""
"""
src, tgt = [], []
sizes = []
count = 0
print('Processing %s & %s & %s ...' % (src_file, src_file_bio, tgt_file))
srcF = open(src_file, 'r')
tgtF = open(tgt_file, 'r')
all_lines = []
all_tgts = []
while True:
sline = srcF.readline()
tline = tgtF.readline()
# end of file
if sline == "" and tline == "":
break
# source or target does not have same number of lines
if sline == "" or tline == "":
print('Error: source and target do not have the same number of sentences')
sys.exit(-1)
break
sline = sline.strip()
tline = tline.strip()
# source and/or target are empty
if sline == "" or tline == "":
print('WARNING: ignoring an empty line ('+str(count+1)+')')
continue
all_lines.append(sline)
all_tgts.append(tline)
srcF.close()
tgtF.close()
if train:
vectorizer = text.Tokenizer(lower=opt.lower, split=" ", num_words=opt.num_words, char_level=opt.char_level)
vectorizer.fit_on_texts(all_lines)
opt.vectorizer = vectorizer
# a list of lists of indices
X = opt.vectorizer.texts_to_sequences(all_lines)
# adding start of sequence character
X = [[opt.start_char] + [w + opt.index_from for w in x] for x in X]
nb_words = opt.num_words
if nb_words is None:
nb_words = max([max(x) for x in X])
# replace indices with oov index if the word_idx in the list exceed num_words
src = [[opt.oov_char if (w >= nb_words or w < opt.skip_top) else w for w in x] for x in X]
tgt = all_tgts
src_bio = np.loadtxt(src_file_bio)
if opt.shuffle == 1:
if (len(src)>1):
print('... shuffling sequences')
perm = np.random.permutation(len(src))
src = [src[idx] for idx in perm]
src_bio = [src_bio[idx] for idx in perm]
tgt = [tgt[idx] for idx in perm]
else:
src_bio = [src_bio]
else:
if (len(src)==1):
src_bio = [src_bio]
print('Prepared %d sentences' %
(len(src)))
return src, src_bio, tgt
def main():
print('Preparing training ...')
train = {}
train['src'], train['src_bio'], train['tgt'] = make_data(opt.train_src, opt.train_src_bio, opt.train_tgt,
train=True)
valid = {}
if (opt.valid_src!=None and opt.valid_src_bio!=None and opt.valid_tgt!=None):
print('Preparing validation ...')
valid['src'], valid['src_bio'], valid['tgt'] = make_data(opt.valid_src, opt.valid_src_bio, opt.valid_tgt)
test = {}
if (opt.test_src!=None and opt.test_src_bio!=None and opt.test_tgt!=None):
print('Preparing Test ...')
test['src'], test['src_bio'], test['tgt'] = make_data(opt.test_src, opt.test_src_bio, opt.test_tgt)
print('Saving data to \'' + opt.save_data + '\'...')
save_data = {'train': train,
'valid': valid,
'test': test}
with open(opt.save_data, 'wb') as handle:
pickle.dump(save_data, handle)
if __name__ == "__main__":
main()
|
from __future__ import unicode_literals
from django.apps import AppConfig
class EncryptiburConfig(AppConfig):
name = 'Encryptibur'
|
import numpy as np
import cv2
import subprocess
import sys
import shutil
import os
import argparse
import configparser
import midi
import note
def is_note_on(event):
"""
Sometimes Note Offs are marked by
event.name = "Note On" and velocity = 0.
That's why we have to check both event.name and
velocity.
"""
velocity = event.data[1]
return event.name == "Note On" and velocity > 0
def read_midi(filename):
"""
Returns a list of tracks.
Each track is a list containing 128 lists of notes.
"""
midi_tracks = midi.read_midifile(filename)
resolution = midi_tracks.resolution
tempo_bpm = 120.0 # may be changed repeatedly in the loop
note_tracks = []
for t_index, t in enumerate(midi_tracks):
notes_pitchwise = [[] for i in range(128)]
total_ticks = 0
for elem in t:
total_ticks += elem.tick
if elem.name in ["Note On", "Note Off"]:
pitch = elem.data[0]
if is_note_on(elem):
n = note.Note(
velocity=elem.data[1],
pitch=pitch,
start_ticks=total_ticks,
track=t_index)
notes_pitchwise[pitch].append(n)
else:
for n in reversed(notes_pitchwise[pitch]):
if not n.finished:
n.end_ticks = total_ticks
n.finished = True
else:
break
elif elem.name == "Set Tempo":
tempo_bpm = elem.get_bpm()
note_tracks.append(notes_pitchwise)
return note_tracks, tempo_bpm, resolution
def calculate_note_times(note_tracks, tempo_bpm, resolution):
"""
Calculate start_time and end_time for all notes.
This only works if the MIDI file does not contain
any tempo changes.
"""
for t in note_tracks:
for pl in t:
for n in pl:
n.calculate_start_and_end_time(tempo_bpm, resolution)
def get_maximum_time(note_tracks):
"""
Determines the largest value of end_time
among all notes. This is required to know
when the video should end.
"""
maximum_time = -999999.9
for t in note_tracks:
for pitch_list in t:
if pitch_list != []:
if pitch_list[-1].end_time > maximum_time:
maximum_time = pitch_list[-1].end_time
return maximum_time
def get_pitch_min_max(note_tracks):
"""
In order not to waste space,
we may want to know in advance what the highest and lowest
pitches of the MIDI notes are.
"""
pitch_min = 128
pitch_max = 0
for t in note_tracks:
for pitch_list in t:
for note in pitch_list:
pitch = note.pitch
if pitch > pitch_max:
pitch_max = pitch
if pitch < pitch_min:
pitch_min = pitch
return pitch_min, pitch_max
def print_progress(msg, current, total):
"""
This keeps the output on the same line.
"""
text = "\r" + msg + " {:9.1f}/{:.1f}".format(current, total)
sys.stdout.write(text)
sys.stdout.flush()
def create_video(note_tracks, config):
frame_rate = float(config["frame_rate"])
waiting_time_before_end = float(config["waiting_time_before_end"])
start_time = float(config["start_time"])
time_before_current = float(config["time_before_current"])
time_after_current = float(config["time_after_current"])
pitch_min, pitch_max = get_pitch_min_max(note_tracks)
if config["pitch_min"] != "auto":
pitch_min = int(config["pitch_min"])
if config["pitch_max"] != "auto":
pitch_max = int(config["pitch_max"])
if config["end_time"] == "auto":
end_time = get_maximum_time(note_tracks) + waiting_time_before_end
else:
end_time = float(config["end_time"])
current_note_indices = [
[0 for i in range(128)] for k in range(len(note_tracks))]
img_index = 0
dt = 1.0 / frame_rate
time = start_time
while time < end_time:
time_left = time - time_before_current
time_right = time + time_after_current
current_notes = []
for track_index, track in enumerate(note_tracks):
for pitch_index in range(128):
min_note_index = current_note_indices[track_index][pitch_index]
max_note_index = len(track[pitch_index])
for note_index in range(min_note_index, max_note_index):
note = track[pitch_index][note_index]
if note.end_time < time_left:
current_note_indices[track_index][pitch_index] += 1
elif note.start_time < time_right:
current_notes.append(note)
else:
break
img = create_image(current_notes, time, time_left, time_right, time_before_current,
time_after_current, pitch_min, pitch_max, config)
cv2.imwrite("./tmp_images/%08i.png" % img_index, img)
time += dt
img_index += 1
print_progress("Current time:", time, end_time)
print("")
size_x = int(config["size_x"])
size_y = int(config["size_y"])
run_ffmpeg(frame_rate, size_x, size_y)
def run_ffmpeg(frame_rate, size_x, size_y):
"""
Convert all images into a video.
"""
call_list = []
call_list.append("ffmpeg")
call_list.append("-r")
call_list.append("{:f}".format(frame_rate))
call_list.append("-f")
call_list.append("image2")
call_list.append("-s")
call_list.append("{:d}x{:d}".format(size_x, size_y))
call_list.append("-i")
call_list.append("./tmp_images/%08d.png")
call_list.append("-vcodec")
call_list.append("libx264")
call_list.append("-crf")
call_list.append("25")
call_list.append("-pix_fmt")
call_list.append("yuv420p")
call_list.append("./output/final.mp4")
subprocess.call(call_list)
def create_empty_image(bg_color, size_x=1920, size_y=1080):
"""
This returns the array on which will be drawn.
"""
bg = np.array(bg_color, dtype=np.uint8)
img = bg * np.ones((size_y, size_x, 3),
dtype=np.uint8) * np.ones((size_y, size_x, 1), dtype=np.uint8)
return img
def get_color_from_string(color_str):
"""
This converts the colors from the options file
to a list of ints: [b,g,r].
"""
return [int(c) for c in color_str.split(",")]
def create_image(current_notes, time, time_left, time_right, time_before_current,
time_after_current, pitch_min, pitch_max, config):
"""
For each frame, this function is called.
The notes which appear in this image (current_notes) have
already been selected.
"""
margin_y = int(config["margin_y"])
size_x = int(config["size_x"])
size_y = int(config["size_y"])
color_active = get_color_from_string(config["color_active"])
color_silent = get_color_from_string(config["color_silent"])
bg_color = get_color_from_string(config["bg_color"])
pixels_to_remove_from_notes_x = float(
config["pixels_to_remove_from_notes_x"])
pixels_to_remove_from_notes_y = float(
config["pixels_to_remove_from_notes_y"])
no_of_rows = pitch_max - pitch_min + 1
row_height = (size_y - 2.0 * margin_y) / no_of_rows
pixels_per_second = size_x / (time_before_current + time_after_current)
note_height = int(
round(max(1, row_height - pixels_to_remove_from_notes_y)))
note_pos_y_offset = 0.5 * (row_height - note_height)
img = create_empty_image(bg_color, size_x, size_y)
for note in current_notes:
row_no = note.pitch - pitch_min
y_pos = int(round(size_y - margin_y - (row_no + 1)
* row_height + note_pos_y_offset))
x_pos = int(round((note.start_time - time_left) * pixels_per_second))
x_length = int(round((note.end_time - note.start_time)
* pixels_per_second - pixels_to_remove_from_notes_x))
p1 = (x_pos, y_pos)
p2 = (x_pos + x_length, y_pos + note_height)
if is_note_active(note, time):
note_color = color_active
else:
note_color = color_silent
cv2.rectangle(img, p1, p2, note_color, -1)
return img
def is_note_active(note, time):
"""
Notes that are currently playing may be treated differently.
"""
if note.start_time < time and note.end_time >= time:
return True
else:
return False
def delete_and_create_folders():
"""
Clean everything up first.
"""
foldernames = ["./output", "./tmp_images"]
for f in foldernames:
if os.path.isdir(f):
shutil.rmtree(f)
os.mkdir(f)
def get_config(filename):
"""
All settings are stored in an external text file.
"""
config = configparser.ConfigParser()
config.read(filename)
return config
def main():
delete_and_create_folders()
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--config",
required=False,
default="options.cfg",
help="path to program options file")
arguments = vars(parser.parse_args())
filename = arguments["config"]
config = get_config(filename)["DEFAULT"]
note_tracks, tempo_bpm, resolution = read_midi(config["midi_filename"])
calculate_note_times(note_tracks, tempo_bpm, resolution)
create_video(note_tracks, config)
shutil.rmtree("./tmp_images")
if __name__ == '__main__':
main()
|
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from api.grades.models import Grade, Module
from rest_framework.response import Response
from rest_framework.decorators import action
from api.grades.serializers import ModuleSerializer
class ModuleViewSet(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
def get_serializer_class(self):
if self.action == "list":
return ModuleSerializer
def get_queryset(self):
queryset = Module.objects.all()
return queryset
def destroy(self, request, *args, **kwargs):
user = request.user
if user.is_supervisor:
return Response({"detail": "Only students can delete grades."}, status=403)
# Check if grade exists, delete it if it does.
grade = Grade.objects.filter(student=user, module=self.get_object())
if grade:
grade.delete()
return Response(status=200)
return Response({"detail": "No grade for this module."}, status=400)
@action(
detail=True,
methods=["POST"],
url_path="",
authentication_classes=[IsAuthenticated],
)
def create_grade(self, request, pk):
user = request.user
if user.is_supervisor:
return Response({"detail": "Only students can delete grades."}, status=403)
score = request.data.get("score")
if score is None or 0 > score > 100:
return Response({"detail": "Wrong body."}, status=400)
# Update or create grade with score.
Grade.objects.update_or_create(
user=user, module=self.get_object(), defaults={"score": score}
)
return Response(status=200)
|
import os
import shutil
def parse_lines_in_file(filename):
file = open(filename, 'r')
lines = list()
try:
lines_origin = file.readlines()
for line in lines_origin:
lines.append(line[:-1])
finally:
file.close()
return lines
del_files = parse_lines_in_file('delete_ciks.txt')
for del_file in del_files:
f = os.path.join(r'D:\dataset\edgar\temp\data', del_file)
if os.path.exists(f):
print('deleting: {}'.format(f))
shutil.rmtree(f)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created Nov 2020
@author: hassi
"""
from qiskit import QuantumCircuit, Aer, execute
from qiskit.visualization import plot_histogram
from IPython.core.display import display
print("Ch 4: Upside down quantum coin toss")
print("-----------------------------------")
qc = QuantumCircuit(1, 1)
initial_vector = [0.+0.j, 1.+0.j]
qc.initialize(initial_vector,0)
#qc.x(0)
qc.h(0)
qc.measure(0, 0)
print(qc)
#display(qc.draw())
backend = Aer.get_backend('qasm_simulator')
counts = execute(qc, backend, shots=1).result().get_counts(qc)
display(plot_histogram(counts))
|
# Copyright (C) 2017 Seeed Technology Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
from apa102 import APA102
from gpiozero import LED
from threading import Thread
from time import sleep
try:
from queue import Queue
except ImportError:
from Queue import Queue
class AlexaLedPattern(object):
def __init__(self, show=None, number=12):
self.pixels_number = number
self.pixels = [0] * 4 * number
if not show or not callable(show):
def dummy(data):
pass
show = dummy
self.show = show
self.stop = False
def wakeup(self, direction=0):
position = int((direction + 15) /
(360 / self.pixels_number)) % self.pixels_number
pixels = [0, 0, 0, 24] * self.pixels_number
pixels[position * 4 + 2] = 48
self.show(pixels)
def listen(self):
pixels = [0, 0, 0, 24] * self.pixels_number
self.show(pixels)
def think(self):
pixels = [0, 0, 12, 12, 0, 0, 0, 24] * self.pixels_number
while not self.stop:
self.show(pixels)
sleep(0.2)
pixels = pixels[-4:] + pixels[:-4]
def speak(self):
step = 1
position = 12
while not self.stop:
pixels = [0, 0, position, 24 - position] * self.pixels_number
self.show(pixels)
sleep(0.01)
if position <= 0:
step = 1
sleep(0.4)
elif position >= 12:
step = -1
sleep(0.4)
position += step
def off(self):
self.show([0] * 4 * 12)
class GoogleHomeLedPattern(object):
def __init__(self, show=None):
self.basis = numpy.array([0] * 4 * 12)
self.basis[0 * 4 + 1] = 2
self.basis[3 * 4 + 1] = 1
self.basis[3 * 4 + 2] = 1
self.basis[6 * 4 + 2] = 2
self.basis[9 * 4 + 3] = 2
self.pixels = self.basis * 24
if not show or not callable(show):
def dummy(data):
pass
show = dummy
self.show = show
self.stop = False
def wakeup(self, direction=0):
position = int((direction + 15) / 30) % 12
basis = numpy.roll(self.basis, position * 4)
for i in range(1, 25):
pixels = basis * i
self.show(pixels)
sleep(0.005)
pixels = numpy.roll(pixels, 4)
self.show(pixels)
sleep(0.1)
for i in range(2):
new_pixels = numpy.roll(pixels, 4)
self.show(new_pixels * 0.5 + pixels)
pixels = new_pixels
sleep(0.1)
self.show(pixels)
self.pixels = pixels
def listen(self):
pixels = self.pixels
for i in range(1, 25):
self.show(pixels * i / 24)
sleep(0.01)
def think(self):
pixels = self.pixels
while not self.stop:
pixels = numpy.roll(pixels, 4)
self.show(pixels)
sleep(0.2)
t = 0.1
for i in range(0, 5):
pixels = numpy.roll(pixels, 4)
self.show(pixels * (4 - i) / 4)
sleep(t)
t /= 2
self.pixels = pixels
def speak(self):
pixels = self.pixels
step = 1
brightness = 5
while not self.stop:
self.show(pixels * brightness / 24)
sleep(0.02)
if brightness <= 5:
step = 1
sleep(0.4)
elif brightness >= 24:
step = -1
sleep(0.4)
brightness += step
def off(self):
self.show([0] * 4 * 12)
class Pixels:
NUM_LEDS = 12
PIN_MOSI = 10
PIN_SCLK = 11
PIN_SEL = 7 # CE1
brightness = 5
def __init__(self, pattern=AlexaLedPattern):
self.pattern = pattern(show=self.show)
self.dev = APA102(num_led=self.NUM_LEDS)
self.power = LED(5)
self.power.on()
self.queue = Queue()
self.thread = Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
self.last_direction = None
def wakeup(self, direction=0):
self.last_direction = direction
def f():
self.pattern.wakeup(direction)
self.put(f)
def listen(self):
if self.last_direction:
def f():
self.pattern.wakeup(self.last_direction)
self.put(f)
else:
self.put(self.pattern.listen)
def think(self):
self.put(self.pattern.think)
def speak(self):
self.put(self.pattern.speak)
def off(self):
self.put(self.pattern.off)
def put(self, func):
self.pattern.stop = True
self.queue.put(func)
def _run(self):
while True:
func = self.queue.get()
self.pattern.stop = False
func()
def show(self, data):
for i in range(self.NUM_LEDS):
self.dev.set_pixel(i, int(data[4*i + 1]), int(data[4*i + 2]), int(data[4*i + 3]))
self.dev.show()
|
import os
import numpy as np
import torch
from .helper_func import tensor_to_np
class Dataset:
def __init__(self, path):
self.path = path
self.data = dict()
self.files = set(os.listdir(path))
def __getattr__(self, item):
xname = item + "_x"
yname = item + "_y"
x = self.__get_helper(xname)
y = self.__get_helper(yname)
return x, y
def __get_helper(self, fname, type='np', device='cpu'):
if fname not in self.data:
if fname + '.npy' in self.files:
self.data[fname] = np.load(fname)
elif fname + '.csv' in self.files:
self.data[fname] = np.loadtxt(fname, delimiter=',')
if type == 'torch':
self.data[fname] = torch.Tensor(self.data[fname])
if device != 'cpu':
self.data[fname] = self.data[fname].to(device)
return self.data[fname]
def save(self, path=None, verbose=False):
path = self.path if path is None else path
for fname in self.data:
file_path = os.path.join(path, fname + '.npy')
odata = tensor_to_np(self.data[fname])
np.save(file_path, odata)
if verbose:
print('File {} saved with shape {}.'.format(fname, odata.shape))
|
import os
import codecs
from collections import OrderedDict
import l20n.format.lol.parser as parser
import l20n.format.lol.serializer as serializer
import l20n.format.lol.ast as ast
import pyast
import l10ndiff
def read_file(path):
with codecs.open(path, 'r', encoding='utf-8') as file:
text = file.read()
return text
def write_file(path, s):
f = codecs.open(path, encoding='utf_8', mode='w+')
f.write(s)
f.close()
def reduce_complex_string(s):
if isinstance(s, ast.ComplexString):
return unicode(s)
elif isinstance(s, ast.String):
return s.content
elif s is None:
return s
raise Exception("FUCK!")
def add_entity(lol, k, value):
id = ast.Identifier(k)
entity = ast.Entity(id)
entity.value = value
lol.body.append(entity)
def remove_entity(lol, id):
for n,elem in enumerate(lol.body):
if isinstance(elem, ast.Entity):
if elem.id.name == id:
del lol.body[n]
def update_entity(lol, id, entity):
pass
def get_entity_dict(lol):
res = OrderedDict()
for entry in lol.body:
if isinstance(entry, ast.Entity):
res[entry.id.name] = entry
return res
def get_entity_pos(lol, eid):
pos = -1
i = -1
for entry in lol.body:
i += 1
if isinstance(entry, ast.Entity):
if entry.id.name == eid:
pos = i
break
return pos
def locate_pos(lol, pos):
after = get_entity_pos(lol, pos['after'])
if after == -1:
before = get_entity_pos(lol, pos['before'])
return before
return after+1
def apply_ediff(lol, ediff):
pass
def apply_ldiff(lol, ldiff, source=0, result=1):
for key, hunk in ldiff.items():
#print(key)
#print(hunk)
if 'added' in hunk['flags']:
if hunk['elem'][source] is None:
#inject new entity
pos = locate_pos(lol, hunk['pos'])
lol.body.insert(pos, hunk['elem'][result])
#print(pos)
pass
if hunk['elem'][result] is None:
#removing obsolete entity
pos = get_entity_pos(lol, key)
del lol.body[pos]
del lol._template_body[pos]
if 'present' in hunk['flags']:
print(hunk)
if 'value' in hunk['elem']:
pos = get_entity_pos(lol, key)
if lol.body[pos].value is None:
pass
else:
lol.body[pos].value.content = hunk['elem']['value']['content'][result]
print(lol.body[pos].value.content)
return
def update_locale():
source_locale = 'en-US'
locale = 'pl'
module = 'homescreen'
mpath = '/Users/zbraniecki/projects/mozilla/gaia/apps/homescreen'
orig_file = read_file(os.path.join('data', locale, '%s.lol.orig' % module))
trans_file = read_file(os.path.join('data', locale, '%s.lol' % module))
source_file = read_file(os.path.join(mpath, 'locale', '%s.lol' % source_locale))
result = {
'nottranslated': {},
'outdated': {},
'obsolete': {},
'added': {},
'uptodate': {},
}
p = parser.Parser()
s = serializer.Serializer()
orig_lol = p.parse(orig_file)
trans_lol = p.parse(trans_file)
source_lol = p.parse(source_file)
orig_dict = get_entity_dict(orig_lol)
trans_dict = get_entity_dict(trans_lol)
source_dict = get_entity_dict(source_lol)
# deal with added/removed entities
ldiff = l10ndiff.lists(trans_dict, source_dict, values=False)
apply_ldiff(trans_lol, ldiff)
# deal with modified entities
ldiff = l10ndiff.lists(orig_dict, source_dict, values=True)
ldiff2 = {}
for key in ldiff:
if 'present' in ldiff[key]['flags']:
ldiff2[key] = ldiff[key]
#print('%s: %s' % (key, ldiff2[key]))
print('---')
print(trans_lol)
print('---')
apply_ldiff(trans_lol, ldiff2)
print('====')
print(trans_lol)
print('====')
#new_trans_lol = s.serialize(trans_lol)
#new_orig_lol = s.serialize(orig_lol)
#write_file(os.path.join('data', locale, '%s.lol.orig2' % module), new_orig_lol)
#write_file(os.path.join('data', locale, '%s.lol2' % module), new_trans_lol)
#print_result('homescreen', result)
class Example(pyast.Node):
seq = pyast.seq(pyast.re("[a-z]{2}"))
if __name__ == '__main__':
e = Example(['foo'])
#update_locale()
|
from importlib import import_module, metadata
from inspect import isclass, stack, getmodule
from django.templatetags.static import static
from django.utils.safestring import mark_safe
class SourceBase:
static_path = None
cdn_path = None
filename = None
js_filename = None
css_filename = None
legacy_js = None
js_path = cdn_js_path = 'js/'
css_path = cdn_css_path = 'css/'
cdn_scheme = 'https://'
def __init__(self, version, legacy=False):
self.root = None
self._cdn = None
self.version = version
self._version = None
self.legacy = legacy
@property
def version_qs(self):
return self._version
@property
def cdn(self):
return self._cdn
@cdn.setter
def cdn(self, cdn):
if cdn != self._cdn:
self.root = (self.cdn_scheme + self.cdn_path) if cdn else static(self.static_path)
self._version = '?v=' + self.version if not cdn and self.version else ''
self._cdn = cdn
def combine_filename(self, path, filename):
if not isinstance(filename, (tuple, list)):
filename = [filename]
return [path + f + self.version_qs for f in filename]
def _js_filename(self):
if self.legacy and self.legacy_js:
filename = self.legacy_js
elif self.js_filename:
filename = self.js_filename
elif isinstance(self.filename, (tuple, list)):
filename = [f + '.js' for f in self.filename]
elif self.filename:
filename = self.filename + '.js'
else:
return []
return self.combine_filename(self.cdn_js_path if self.cdn else self.js_path, filename)
def _css_filename(self):
if self.css_filename:
filename = self.css_filename
elif isinstance(self.filename, (tuple, list)):
filename = [f + '.css' for f in self.filename]
elif self.filename:
filename = self.filename + '.css'
else:
return []
return self.combine_filename(self.cdn_css_path if self.cdn else self.css_path, filename)
def javascript(self):
return ''.join([f'<script src="{self.root + f}"></script>' for f in self._js_filename()])
def css(self):
return ''.join([f'<link href="{self.root + f}" rel="stylesheet">' for f in self._css_filename()])
def includes(self, cdn=False):
cdn = cdn or False
if self.static_path is None:
self.cdn = True
elif self.cdn_path is None:
self.cdn = False
else:
self.cdn = cdn
return self.javascript() + self.css()
def html_include(library=None, cdn=False, module=None, legacy=False):
"""
Returns a string with javascript and css includes defined in a subclass of SourceBase in the calling module or
defined in passed module as a module or string.
"""
if isinstance(module, str):
module = import_module(module)
elif not module:
module = getmodule(stack()[1][0])
if not library:
library = 'DefaultInclude'
version = getattr(module, 'version', '')
packages = getattr(module, 'packages', None)
if packages and library in packages:
return mark_safe('\n'.join([lib(version, legacy).includes(cdn) for lib in packages[library]]))
source_class = getattr(module, library, None)
if isclass(source_class) and issubclass(source_class, SourceBase):
return mark_safe(source_class(version, legacy).includes(cdn))
return ''
def pip_version(package):
try:
return metadata.version(package)
except metadata.PackageNotFoundError:
return 'local'
|
import librosa
import numpy as np
import sklearn
from tqdm import tqdm
import itertools
# helper Function
def normalize(x, axis=0):
return sklearn.preprocessing.minmax_scale(x, axis=axis)
def Extract_Mfcc(DataFrame):
features = []
for audio_data in tqdm(DataFrame['File_List'].to_list()):
x , sr = librosa.load(audio_data)
mfccs = librosa.feature.mfcc(x, sr=sr,n_mfcc=40)
features.append(normalize(np.mean(mfccs.T, axis = 0)))
return features,DataFrame['Label'].to_list()
def Extract_Spectral_Centroids(DataFrame):
features = []
for audio_data in tqdm(DataFrame['File_List'].to_list()):
x , sr = librosa.load(audio_data)
spectral_centroids = librosa.feature.spectral_centroid(x, sr=sr)[0]
features.append(spectral_centroids)
features_New = np.array(list(itertools.zip_longest(*features, fillvalue=0))).T
return features_New,DataFrame['Label'].to_list()
def Extract_Spectral_Rolloff(DataFrame):
features = []
for audio_data in tqdm(DataFrame['File_List'].to_list()):
x , sr = librosa.load(audio_data)
spectral_rolloff = librosa.feature.spectral_rolloff(x+0.01, sr=sr)[0]
features.append(spectral_rolloff)
features_New = np.array(list(itertools.zip_longest(*features, fillvalue=0))).T
return features_New,DataFrame['Label'].to_list()
# helper Function
def zero_crossings_helper(val):
if val==True:
return 1
return 0
def Extract_Zero_Crossings(DataFrame):
features = []
n0 = 9000
n1 = 9100
for audio_data in tqdm(DataFrame['File_List'].to_list()):
x , sr = librosa.load(audio_data)
zero_crossings = librosa.zero_crossings(x[n0:n1], pad=False)
features.append(np.array([zero_crossings_helper(i) for i in zero_crossings]))
features_New = np.array(list(itertools.zip_longest(*features, fillvalue=0))).T
return features_New,DataFrame['Label'].to_list()
def Extract_Spectral_Bandwidth(DataFrame):
features = []
for audio_data in tqdm(DataFrame['File_List'].to_list()):
x , sr = librosa.load(audio_data)
spectral_bandwidth = librosa.feature.spectral_bandwidth(x+0.01, sr=sr)[0]
features.append(spectral_bandwidth)
features_New = np.array(list(itertools.zip_longest(*features, fillvalue=0))).T
return features_New,DataFrame['Label'].to_list()
def Extract_Chromagram(DataFrame):
features = []
for audio_data in tqdm(DataFrame['File_List'].to_list()):
x , sr = librosa.load(audio_data)
chromagram = librosa.feature.chroma_stft(x, sr=sr)
features.append(normalize(np.mean(chromagram.T, axis = 0)))
return features,DataFrame['Label'].to_list()
def Extract_Stft(DataFrame):
features = []
for audio_data in tqdm(DataFrame['File_List'].to_list()):
x , sr = librosa.load(audio_data)
stft = np.abs(librosa.stft(x))
features.append(normalize(np.mean(stft.T, axis = 0)))
return features,DataFrame['Label'].to_list()
|
import errno
import os
import os.path as osp
def makedirs(path: str):
r"""Recursive directory creation function."""
try:
os.makedirs(osp.expanduser(osp.normpath(path)))
except OSError as e:
if e.errno != errno.EEXIST and osp.isdir(path):
raise
|
import json
from django.conf import settings
from go.api.go_api import client
from go.base.tests.helpers import GoDjangoTestCase
from mock import patch
class TestClient(GoDjangoTestCase):
@patch('requests.post')
def test_rpc(self, mock_req):
client.rpc('123', 'do_something', ['foo', 'bar'], id='abc')
mock_req.assert_called_with(
settings.GO_API_URL,
auth=('session_id', '123'),
data=json.dumps({
'jsonrpc': '2.0',
'id': 'abc',
'params': ['foo', 'bar'],
'method': 'do_something',
}))
|
import stomp
import time
host_and_ports = [('0.0.0.0', 61613)]
conn = stomp.Connection(host_and_ports=host_and_ports)
conn.start()
conn.connect('admin', 'password', wait=True)
counter = 0
while counter <= 5:
test_msg = "BookmarkMessage" + str(counter) + "{status=status, userId=userId, element=element, rate=rate, vol=vol, num=num, page=page, comment='comment'}"
conn.send(body=test_msg, destination='/queue/messages')
counter += 1
time.sleep(0.1)
time.sleep(1)
# messages = base_listener.message_list
# print(messages)
conn.disconnect()
|
# File: oletools_connector.py
# Copyright (c) 2021 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
# Python 3 Compatibility imports
from __future__ import print_function, unicode_literals
# Phantom App imports
import phantom.app as phantom
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
from phantom import vault
from oletools_consts import *
import requests
import json
import oletools.oleid
from oletools.mraptor import MacroRaptor
from oletools import olevba
class RetVal(tuple):
def __new__(cls, val1, val2=None):
return tuple.__new__(RetVal, (val1, val2))
class OletoolsConnector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(OletoolsConnector, self).__init__()
self._state = None
def _get_error_message_from_exception(self, e):
"""
Get appropriate error message from the exception.
:param e: Exception object
:return: error message
"""
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
try:
if hasattr(e, "args"):
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = ERR_CODE_MSG
error_msg = e.args[0]
except:
pass
try:
if error_code in ERR_CODE_MSG:
error_text = "Error Message: {}".format(error_msg)
else:
error_text = "Error Code: {}. Error Message: {}".format(error_code, error_msg)
except:
self.debug_print(PARSE_ERR_MSG)
error_text = PARSE_ERR_MSG
return error_text
def _handle_test_connectivity(self, param):
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
self.save_progress("Test Connectivity Passed")
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_mraptor_scan(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
summary = action_result.update_summary({})
vault_id = param['vault_id']
try:
success, message, info = vault.vault_info(vault_id=vault_id, container_id=self.get_container_id(), trace=True)
if phantom.is_fail(success):
return action_result.set_status(phantom.APP_ERROR, message)
info = list(info)
except Exception as e:
error_msg = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, error_msg)
# phantom vault file path
vault_path = info[0].get("path")
if not vault_path:
return action_result.set_status(phantom.APP_ERROR, OLETOOLS_ERR_UNABLE_TO_FETCH_FILE)
try:
oid = oletools.oleid.OleID(vault_path)
indicators = oid.check()
result = {
"oleid": {},
"mraptor": {}
}
for i in indicators:
result["oleid"][i.id] = {"id": i.id, "name": i.name, "value": str(i.value)}
summary["ftype"] = result["oleid"].get("ftype", {}).get("value")
vba_parser = olevba.VBA_Parser(filename=vault_path)
if vba_parser.detect_vba_macros():
vba_code_all_modules = ''
vba_code_all_modules = vba_parser.get_vba_code_all_modules()
mraptor = MacroRaptor(vba_code_all_modules)
mraptor.scan()
result["mraptor"] = mraptor.__dict__
summary['suspicious'] = mraptor.suspicious
except Exception as e:
error_msg = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, error_msg)
action_result.add_data(result)
# Add a dictionary that is made up of the most important values from data into the summary
if not summary.get('suspicious'):
summary["suspicious"] = False
# Return success, no need to set the message, only the status
# BaseConnector will create a textual message based off of the summary dictionary
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
# Get the action that we are supposed to execute for this App Run
action_id = self.get_action_identifier()
self.debug_print("action_id: {}".format(self.get_action_identifier()))
if action_id == 'test_connectivity':
ret_val = self._handle_test_connectivity(param)
elif action_id == 'mraptor_scan':
ret_val = self._handle_mraptor_scan(param)
return ret_val
def initialize(self):
# Load the state in initialize, use it to store data
# that needs to be accessed across actions
self._state = self.load_state()
return phantom.APP_SUCCESS
def finalize(self):
# Save the state, this data is saved across actions and app upgrades
self.save_state(self._state)
return phantom.APP_SUCCESS
def main():
import pudb
import argparse
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = getpass.getpass("Password: ")
if username and password:
try:
login_url = OletoolsConnector._get_phantom_base_url() + '/login'
print("Accessing the Login page")
r = requests.get(login_url, verify=False)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken=' + csrftoken
headers['Referer'] = login_url
print("Logging into Platform to get the session id")
r2 = requests.post(login_url, verify=False, data=data, headers=headers)
session_id = r2.cookies['sessionid']
except Exception as e:
print("Unable to get session id from the platform. Error: " + str(e))
exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = OletoolsConnector()
connector.print_progress_message = True
if session_id is not None:
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
exit(0)
if __name__ == '__main__':
main()
|
from django.shortcuts import redirect, render
from .models import userlist
# Create your views here.
def database():
return ()
def login_check(request):
if request.method == "POST":
uid = request.POST.get('email')
passwd = request.POST.get('pass')
context = {}
ulist = userlist.objects.filter(email = uid, password = passwd).values()
if (len(ulist)==0):
return redirect('/login')
else:
for key in ulist:
request.session['name'] = key['name'].split(" ")[0]
request.session['logged'] = 1
return redirect('/dashboard')
def newUser(request):
if request.method == "POST":
uname = request.POST.get('name')
ugender = request.POST.get('gender')
udob = request.POST.get('dob')
ucontact = request.POST.get('contact')
ucountry = request.POST.get('Country')
uemail = request.POST.get('email')
npass = request.POST.get('npass')
ulist = userlist.objects.create(name = uname, gender = ugender, dob = udob, contact = ucontact, country = ucountry, email = uemail, password = npass)
ulist.save()
request.session['name'] = ulist.name.split(" ")[0]
request.session['logged'] = 1
print(request.session['name'])
return redirect('/dashboard')
|
from os.path import dirname, basename, isfile, abspath
import glob
modules = glob.glob(dirname(abspath(__file__))+"/*/*.py")
print '1', dirname(abspath(__file__))
print '2', modules
__all__ = [ basename(f)[:-3] for f in modules if isfile(f)]
print '3', __all__
# import pkgutil
# import sys
# def load_all_modules_from_dir(dirname):
# print 'here'
# for importer, package_name, _ in pkgutil.iter_modules([dirname]):
# print importer, package_name
# full_package_name = '%s.%s' % (dirname, package_name)
# print full_package_name
# if full_package_name not in sys.modules:
# module = importer.find_module(package_name
# ).load_module(full_package_name)
# print module
# load_all_modules_from_dir('firmbase_ticket')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.