hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acead08d25ccbe48950d1f92cb1d0795972b9cb9 | 1,863 | py | Python | TFM/scripts/calculateSuccess.py | jmanday/Master | 388ee71d04a3fb1f64ed4b2d1164f1b5ec45179f | [
"Apache-2.0"
] | 1 | 2017-03-21T17:57:22.000Z | 2017-03-21T17:57:22.000Z | TFM/scripts/calculateSuccess.py | jmanday/Master | 388ee71d04a3fb1f64ed4b2d1164f1b5ec45179f | [
"Apache-2.0"
] | 28 | 2016-10-16T19:42:37.000Z | 2018-09-14T21:29:48.000Z | TFM/scripts/calculateSuccess.py | jmanday/Master | 388ee71d04a3fb1f64ed4b2d1164f1b5ec45179f | [
"Apache-2.0"
] | null | null | null | #########################################################################
### Jesus Garcia Manday
### calculateSuccess.py
### @Descripcion: script que calcula el porcentaje de acierto en la
### comparación de las imágenes. Recoge como entrada un
### fichero csv con los campos "Image Classifier", "Image
### Matching" y "Value", donde dice a cada imagen a
### clasificar que imagen de matching es la que mayor valor
### ha obtenido de compararlas con todas
#########################################################################
import os
import sys
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import csv
PATH_SCRIPTS = "/Users/jesusgarciamanday/Documents/Master/TFM/scripts/"
class DataMatching:
def __init__(self, imageClassifier, imageMatching, value):
self.imageClassifier = imageClassifier
self.imageMatching = imageMatching
self.value = value
def upperMatching(fileName):
count = 0
success = 0
res = fileName.split("/")
name = res[len(res)-1]
detector = name.split("-")[len(name.split("-")) - 1]
detector = detector.split(".")[0]
descriptor = name.split("-")[len(name.split("-")) - 2]
print("\nDetector: ", detector, " ", "Descriptor: ", descriptor)
with open(fileName, 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
if (count != 0):
if (row[0] == row[1]):
success += 1
count += 1
result = (success/count) * 100
print("Precisión: ", result, "%\n")
if __name__ == "__main__":
fileName = sys.argv[1]
upperMatching (fileName) | 32.684211 | 78 | 0.536232 |
acead0b0dc2a8ca60fd626fd49d4aede737c199d | 25,036 | py | Python | pysnmp/WL400-DOT11EXT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/WL400-DOT11EXT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/WL400-DOT11EXT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module WL400-DOT11EXT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/WL400-DOT11EXT-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:29:46 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
TimeTicks, iso, Integer32, Gauge32, IpAddress, NotificationType, Unsigned32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, ObjectIdentity, Bits, Counter32, Counter64, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "iso", "Integer32", "Gauge32", "IpAddress", "NotificationType", "Unsigned32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "ObjectIdentity", "Bits", "Counter32", "Counter64", "Bits")
TextualConvention, RowStatus, DisplayString, MacAddress, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "RowStatus", "DisplayString", "MacAddress", "TruthValue")
wl400Products, wl400Modules = mibBuilder.importSymbols("WL400-GLOBAL-REG", "wl400Products", "wl400Modules")
dot11ExtMibModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 232, 143, 1, 2))
if mibBuilder.loadTexts: dot11ExtMibModule.setLastUpdated('9906020000Z')
if mibBuilder.loadTexts: dot11ExtMibModule.setOrganization('Compaq Computer Corporation')
dot11ExtMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1))
dot11ExtConf = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1, 1))
dot11ExtGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1, 1, 1))
dot11ExtCompl = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1, 1, 2))
dot11ExtGenObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1, 2))
smt = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1))
dot11ExtAPObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1, 3))
assoc = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 1))
security = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 2))
dot11ExtWBUObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1, 5))
roam = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1))
dot11ExtEvents = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1, 4))
dot11ExtEventsV2 = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145, 1, 4, 0))
smtAssociationID = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2007))).setMaxAccess("readonly")
if mibBuilder.loadTexts: smtAssociationID.setStatus('current')
smtCapabilityInfo = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 2), Bits().clone(namedValues=NamedValues(("privacy", 0), ("cfPollRequest", 1), ("cfPollable", 2), ("ibss", 3), ("ess", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: smtCapabilityInfo.setStatus('current')
smtPowerSaveInterval = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtPowerSaveInterval.setStatus('current')
smtListenInterval = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtListenInterval.setStatus('current')
smtATIMWindow = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtATIMWindow.setStatus('current')
smtOperationalChannels = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 6), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtOperationalChannels.setStatus('current')
smtCurrentBSSID = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 7), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: smtCurrentBSSID.setStatus('current')
smtCurrentSSID = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 8), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: smtCurrentSSID.setStatus('current')
smtCurrentBSSType = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("infrastructure", 1), ("adhoc", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: smtCurrentBSSType.setStatus('current')
smtPublicKeyEnable = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtPublicKeyEnable.setStatus('current')
smtQualityLevel0 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtQualityLevel0.setStatus('current')
smtQualityLevel1 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 12), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtQualityLevel1.setStatus('current')
smtQualityLevel2 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 13), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtQualityLevel2.setStatus('current')
smtQualityPenalty = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 14), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtQualityPenalty.setStatus('current')
smtStationDBTimeout = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 15), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtStationDBTimeout.setStatus('current')
smtQualityIndicator = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 3))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtQualityIndicator.setStatus('current')
smtQualityUpperLimit = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 17), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtQualityUpperLimit.setStatus('current')
smtQualityLowerLimit = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 18), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtQualityLowerLimit.setStatus('current')
smtOEMCapabilityInformation = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: smtOEMCapabilityInformation.setStatus('current')
smtCWMin = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 20), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtCWMin.setStatus('current')
smtCWMax = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 21), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtCWMax.setStatus('current')
smtACKWindow = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 2, 1, 22), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smtACKWindow.setStatus('current')
assocTableMaxLength = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: assocTableMaxLength.setStatus('current')
assocTable = MibTable((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 1, 2), )
if mibBuilder.loadTexts: assocTable.setStatus('current')
assocEntry = MibTableRow((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 1, 2, 1), ).setIndexNames((0, "WL400-DOT11EXT-MIB", "assocIndex"))
if mibBuilder.loadTexts: assocEntry.setStatus('current')
assocIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2007)))
if mibBuilder.loadTexts: assocIndex.setStatus('current')
assocAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 1, 2, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: assocAddress.setStatus('current')
assocQuality = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 1, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: assocQuality.setStatus('current')
assocAge = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 1, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: assocAge.setStatus('current')
assocRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 1, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: assocRSSI.setStatus('current')
secMACAclMaxTableLength = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: secMACAclMaxTableLength.setStatus('current')
secMACAclTable = MibTable((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 2, 2), )
if mibBuilder.loadTexts: secMACAclTable.setStatus('current')
secMACAclEntry = MibTableRow((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 2, 2, 1), ).setIndexNames((0, "WL400-DOT11EXT-MIB", "secMACAclIndex"))
if mibBuilder.loadTexts: secMACAclEntry.setStatus('current')
secMACAclIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2007)))
if mibBuilder.loadTexts: secMACAclIndex.setStatus('current')
secMACAclAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 2, 2, 1, 2), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: secMACAclAddress.setStatus('current')
secMACAclAllowed = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 2, 2, 1, 3), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: secMACAclAllowed.setStatus('current')
secMACAclRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 2, 2, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: secMACAclRowStatus.setStatus('current')
secLastError = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 2, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: secLastError.setStatus('current')
secLastErrorAddress = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 3, 2, 4), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: secLastErrorAddress.setStatus('current')
securityViolation = NotificationType((1, 3, 6, 1, 4, 1, 232, 145, 1, 4, 0, 1)).setObjects(("WL400-DOT11EXT-MIB", "secLastError"), ("WL400-DOT11EXT-MIB", "secLastErrorAddress"))
if mibBuilder.loadTexts: securityViolation.setStatus('current')
roamScanType = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("passive", 0), ("active", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamScanType.setStatus('current')
roamScanInterval = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamScanInterval.setStatus('current')
roamProbeDelay = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamProbeDelay.setStatus('current')
roamMinChannelTime = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamMinChannelTime.setStatus('current')
roamMaxChannelTime = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamMaxChannelTime.setStatus('current')
roamJoinTimeout = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamJoinTimeout.setStatus('current')
roamBeaconPeriodTimeout = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamBeaconPeriodTimeout.setStatus('current')
roamDontSwitch = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamDontSwitch.setStatus('current')
roamBlackout = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamBlackout.setStatus('current')
roamDisassociateTime = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamDisassociateTime.setStatus('current')
roamHandoffTime = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamHandoffTime.setStatus('current')
roamWeightMetric1 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 12), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamWeightMetric1.setStatus('current')
roamWeightMetric2 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 13), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamWeightMetric2.setStatus('current')
roamWeightMetric3 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 14), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamWeightMetric3.setStatus('current')
roamWeightMetric4 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 15), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamWeightMetric4.setStatus('current')
roamWeightMetric5 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 16), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamWeightMetric5.setStatus('current')
roamWeightMetric6 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 17), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamWeightMetric6.setStatus('current')
roamWeightMetric7 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 18), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamWeightMetric7.setStatus('current')
roamWeightMetric8 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 19), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamWeightMetric8.setStatus('current')
roamMisc1 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 20), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamMisc1.setStatus('current')
roamMisc2 = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 21), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: roamMisc2.setStatus('current')
roamTableLength = MibScalar((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamTableLength.setStatus('current')
roamTable = MibTable((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23), )
if mibBuilder.loadTexts: roamTable.setStatus('current')
roamEntry = MibTableRow((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1), ).setIndexNames((0, "WL400-DOT11EXT-MIB", "roamIndex"))
if mibBuilder.loadTexts: roamEntry.setStatus('current')
roamIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 1), Integer32())
if mibBuilder.loadTexts: roamIndex.setStatus('current')
roamBSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamBSSID.setStatus('current')
roamSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamSSID.setStatus('current')
roamBSSType = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("infrastructure", 1), ("independent", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamBSSType.setStatus('current')
roamChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamChannel.setStatus('current')
roamAge = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamAge.setStatus('current')
roamQuality = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamQuality.setStatus('current')
roamLoad = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamLoad.setStatus('current')
roamBeaconPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamBeaconPeriod.setStatus('current')
roamDTIMPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamDTIMPeriod.setStatus('current')
roamCapabilityInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 11), Bits().clone(namedValues=NamedValues(("privacy", 0), ("cfPollRequest", 1), ("cfPollable", 2), ("ibss", 3), ("ess", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamCapabilityInfo.setStatus('current')
roamRates = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 12), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamRates.setStatus('current')
roamRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 145, 1, 5, 1, 23, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: roamRSSI.setStatus('current')
smtGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 232, 145, 1, 1, 1, 1)).setObjects(("WL400-DOT11EXT-MIB", "smtAssociationID"), ("WL400-DOT11EXT-MIB", "smtCapabilityInfo"), ("WL400-DOT11EXT-MIB", "smtPowerSaveInterval"), ("WL400-DOT11EXT-MIB", "smtListenInterval"), ("WL400-DOT11EXT-MIB", "smtATIMWindow"), ("WL400-DOT11EXT-MIB", "smtOperationalChannels"), ("WL400-DOT11EXT-MIB", "smtCurrentBSSID"), ("WL400-DOT11EXT-MIB", "smtCurrentSSID"), ("WL400-DOT11EXT-MIB", "smtCurrentBSSType"), ("WL400-DOT11EXT-MIB", "smtPublicKeyEnable"), ("WL400-DOT11EXT-MIB", "smtQualityLevel0"), ("WL400-DOT11EXT-MIB", "smtQualityLevel1"), ("WL400-DOT11EXT-MIB", "smtQualityLevel2"), ("WL400-DOT11EXT-MIB", "smtQualityPenalty"), ("WL400-DOT11EXT-MIB", "smtStationDBTimeout"), ("WL400-DOT11EXT-MIB", "smtQualityIndicator"), ("WL400-DOT11EXT-MIB", "smtQualityUpperLimit"), ("WL400-DOT11EXT-MIB", "smtQualityLowerLimit"), ("WL400-DOT11EXT-MIB", "smtOEMCapabilityInformation"), ("WL400-DOT11EXT-MIB", "smtCWMin"), ("WL400-DOT11EXT-MIB", "smtCWMax"), ("WL400-DOT11EXT-MIB", "smtACKWindow"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
smtGroup = smtGroup.setStatus('current')
assocGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 232, 145, 1, 1, 1, 2)).setObjects(("WL400-DOT11EXT-MIB", "assocTableMaxLength"), ("WL400-DOT11EXT-MIB", "assocAddress"), ("WL400-DOT11EXT-MIB", "assocQuality"), ("WL400-DOT11EXT-MIB", "assocAge"), ("WL400-DOT11EXT-MIB", "assocRSSI"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
assocGroup = assocGroup.setStatus('current')
securityGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 232, 145, 1, 1, 1, 3)).setObjects(("WL400-DOT11EXT-MIB", "secMACAclMaxTableLength"), ("WL400-DOT11EXT-MIB", "secMACAclAddress"), ("WL400-DOT11EXT-MIB", "secMACAclAllowed"), ("WL400-DOT11EXT-MIB", "secMACAclRowStatus"), ("WL400-DOT11EXT-MIB", "secLastError"), ("WL400-DOT11EXT-MIB", "secLastErrorAddress"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
securityGroup = securityGroup.setStatus('current')
secNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 232, 145, 1, 1, 1, 4)).setObjects(("WL400-DOT11EXT-MIB", "securityViolation"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
secNotificationGroup = secNotificationGroup.setStatus('current')
roamGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 232, 145, 1, 1, 1, 5)).setObjects(("WL400-DOT11EXT-MIB", "roamScanType"), ("WL400-DOT11EXT-MIB", "roamScanInterval"), ("WL400-DOT11EXT-MIB", "roamBeaconPeriodTimeout"), ("WL400-DOT11EXT-MIB", "roamBlackout"), ("WL400-DOT11EXT-MIB", "roamDisassociateTime"), ("WL400-DOT11EXT-MIB", "roamHandoffTime"), ("WL400-DOT11EXT-MIB", "roamWeightMetric1"), ("WL400-DOT11EXT-MIB", "roamWeightMetric2"), ("WL400-DOT11EXT-MIB", "roamWeightMetric3"), ("WL400-DOT11EXT-MIB", "roamWeightMetric4"), ("WL400-DOT11EXT-MIB", "roamWeightMetric5"), ("WL400-DOT11EXT-MIB", "roamWeightMetric6"), ("WL400-DOT11EXT-MIB", "roamWeightMetric7"), ("WL400-DOT11EXT-MIB", "roamWeightMetric8"), ("WL400-DOT11EXT-MIB", "roamMisc1"), ("WL400-DOT11EXT-MIB", "roamMisc2"), ("WL400-DOT11EXT-MIB", "roamProbeDelay"), ("WL400-DOT11EXT-MIB", "roamDontSwitch"), ("WL400-DOT11EXT-MIB", "roamMinChannelTime"), ("WL400-DOT11EXT-MIB", "roamMaxChannelTime"), ("WL400-DOT11EXT-MIB", "roamJoinTimeout"), ("WL400-DOT11EXT-MIB", "roamTableLength"), ("WL400-DOT11EXT-MIB", "roamSSID"), ("WL400-DOT11EXT-MIB", "roamBSSID"), ("WL400-DOT11EXT-MIB", "roamBSSType"), ("WL400-DOT11EXT-MIB", "roamChannel"), ("WL400-DOT11EXT-MIB", "roamAge"), ("WL400-DOT11EXT-MIB", "roamQuality"), ("WL400-DOT11EXT-MIB", "roamLoad"), ("WL400-DOT11EXT-MIB", "roamBeaconPeriod"), ("WL400-DOT11EXT-MIB", "roamDTIMPeriod"), ("WL400-DOT11EXT-MIB", "roamCapabilityInfo"), ("WL400-DOT11EXT-MIB", "roamRates"), ("WL400-DOT11EXT-MIB", "roamRSSI"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
roamGroup = roamGroup.setStatus('current')
dot11ExtBasicCompl = ModuleCompliance((1, 3, 6, 1, 4, 1, 232, 145, 1, 1, 2, 1)).setObjects(("WL400-DOT11EXT-MIB", "smtGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dot11ExtBasicCompl = dot11ExtBasicCompl.setStatus('current')
dot11ExtAPCompl = ModuleCompliance((1, 3, 6, 1, 4, 1, 232, 145, 1, 1, 2, 2)).setObjects(("WL400-DOT11EXT-MIB", "smtGroup"), ("WL400-DOT11EXT-MIB", "assocGroup"), ("WL400-DOT11EXT-MIB", "securityGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dot11ExtAPCompl = dot11ExtAPCompl.setStatus('current')
dot11ExtWBUCompl = ModuleCompliance((1, 3, 6, 1, 4, 1, 232, 145, 1, 1, 2, 3)).setObjects(("WL400-DOT11EXT-MIB", "roamGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dot11ExtWBUCompl = dot11ExtWBUCompl.setStatus('current')
mibBuilder.exportSymbols("WL400-DOT11EXT-MIB", roamWeightMetric8=roamWeightMetric8, smt=smt, roamCapabilityInfo=roamCapabilityInfo, security=security, roamScanInterval=roamScanInterval, PYSNMP_MODULE_ID=dot11ExtMibModule, smtATIMWindow=smtATIMWindow, roamRSSI=roamRSSI, assocEntry=assocEntry, roamBSSID=roamBSSID, secMACAclEntry=secMACAclEntry, roamChannel=roamChannel, assocQuality=assocQuality, smtCurrentBSSID=smtCurrentBSSID, smtQualityPenalty=smtQualityPenalty, smtQualityLevel2=smtQualityLevel2, roamGroup=roamGroup, dot11ExtAPObjs=dot11ExtAPObjs, roamWeightMetric1=roamWeightMetric1, roamSSID=roamSSID, roamDTIMPeriod=roamDTIMPeriod, roamBeaconPeriod=roamBeaconPeriod, smtPowerSaveInterval=smtPowerSaveInterval, roamMinChannelTime=roamMinChannelTime, roamProbeDelay=roamProbeDelay, dot11ExtWBUCompl=dot11ExtWBUCompl, dot11ExtCompl=dot11ExtCompl, smtCapabilityInfo=smtCapabilityInfo, roam=roam, smtOEMCapabilityInformation=smtOEMCapabilityInformation, assocIndex=assocIndex, roamMisc2=roamMisc2, roamBSSType=roamBSSType, secMACAclIndex=secMACAclIndex, smtPublicKeyEnable=smtPublicKeyEnable, roamBlackout=roamBlackout, dot11ExtWBUObjs=dot11ExtWBUObjs, roamTable=roamTable, smtACKWindow=smtACKWindow, roamDontSwitch=roamDontSwitch, roamWeightMetric7=roamWeightMetric7, roamIndex=roamIndex, roamMaxChannelTime=roamMaxChannelTime, smtQualityUpperLimit=smtQualityUpperLimit, roamMisc1=roamMisc1, smtCurrentSSID=smtCurrentSSID, securityViolation=securityViolation, dot11ExtAPCompl=dot11ExtAPCompl, roamRates=roamRates, assocAge=assocAge, smtGroup=smtGroup, secMACAclTable=secMACAclTable, smtQualityIndicator=smtQualityIndicator, roamQuality=roamQuality, dot11ExtGenObjs=dot11ExtGenObjs, secMACAclAddress=secMACAclAddress, assocAddress=assocAddress, assoc=assoc, dot11ExtEventsV2=dot11ExtEventsV2, roamScanType=roamScanType, secNotificationGroup=secNotificationGroup, roamWeightMetric2=roamWeightMetric2, secMACAclMaxTableLength=secMACAclMaxTableLength, roamJoinTimeout=roamJoinTimeout, smtAssociationID=smtAssociationID, smtQualityLevel0=smtQualityLevel0, smtQualityLevel1=smtQualityLevel1, secLastError=secLastError, assocRSSI=assocRSSI, dot11ExtGroups=dot11ExtGroups, smtOperationalChannels=smtOperationalChannels, roamWeightMetric3=roamWeightMetric3, smtCWMin=smtCWMin, roamBeaconPeriodTimeout=roamBeaconPeriodTimeout, secMACAclRowStatus=secMACAclRowStatus, secLastErrorAddress=secLastErrorAddress, smtStationDBTimeout=smtStationDBTimeout, securityGroup=securityGroup, roamLoad=roamLoad, smtCurrentBSSType=smtCurrentBSSType, roamDisassociateTime=roamDisassociateTime, secMACAclAllowed=secMACAclAllowed, assocGroup=assocGroup, roamAge=roamAge, dot11ExtMibModule=dot11ExtMibModule, roamEntry=roamEntry, smtListenInterval=smtListenInterval, roamWeightMetric6=roamWeightMetric6, assocTableMaxLength=assocTableMaxLength, smtCWMax=smtCWMax, roamWeightMetric5=roamWeightMetric5, smtQualityLowerLimit=smtQualityLowerLimit, dot11ExtBasicCompl=dot11ExtBasicCompl, roamTableLength=roamTableLength, dot11ExtConf=dot11ExtConf, dot11ExtMIB=dot11ExtMIB, roamWeightMetric4=roamWeightMetric4, dot11ExtEvents=dot11ExtEvents, roamHandoffTime=roamHandoffTime, assocTable=assocTable)
| 117.539906 | 3,165 | 0.735022 |
acead230b3f68c2f0630045a97134786838d4ae3 | 1,226 | py | Python | setup.py | Bhanuprakash-ch/apployer | 8638102c6b8bfbe868680cc65da3331f09f36e70 | [
"Apache-2.0"
] | 2 | 2017-02-17T09:21:25.000Z | 2018-11-14T05:09:09.000Z | setup.py | Bhanuprakash-ch/apployer | 8638102c6b8bfbe868680cc65da3331f09f36e70 | [
"Apache-2.0"
] | 2 | 2016-04-14T15:21:43.000Z | 2016-07-01T08:50:00.000Z | setup.py | Bhanuprakash-ch/apployer | 8638102c6b8bfbe868680cc65da3331f09f36e70 | [
"Apache-2.0"
] | 2 | 2016-04-03T22:51:17.000Z | 2020-01-22T18:14:08.000Z | #
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from setuptools import setup, find_packages
project_name = 'apployer'
version = '0.0.1'
setup_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(setup_dir, 'requirements.txt')) as req_file:
requirements = [lib.split('==')[0] for lib in req_file.readlines()]
with open(os.path.join(setup_dir, 'README.md')) as readme_file:
readme = readme_file.read()
setup(
name=project_name,
version=version,
packages=find_packages(exclude=['tests*']),
install_requires=requirements,
entry_points={'console_scripts': ['{0} = {0}.main:cli'.format(project_name)]},
license='Apache 2.0')
| 32.263158 | 82 | 0.731648 |
acead244a83498f2c4b3061aaceba7ab0dce588f | 620 | py | Python | sniffersapp/daily_dockets/migrations/0024_auto_20180902_1913.py | jamesokane/Oneworksite-Application | 1749ffa89430be75394ae0d43905f3dd30a24fc6 | [
"MIT"
] | null | null | null | sniffersapp/daily_dockets/migrations/0024_auto_20180902_1913.py | jamesokane/Oneworksite-Application | 1749ffa89430be75394ae0d43905f3dd30a24fc6 | [
"MIT"
] | 7 | 2020-06-05T19:27:52.000Z | 2022-03-11T23:34:52.000Z | sniffersapp/daily_dockets/migrations/0024_auto_20180902_1913.py | jamesokane/Oneworksite-Application | 1749ffa89430be75394ae0d43905f3dd30a24fc6 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.6 on 2018-09-02 09:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('daily_dockets', '0023_auto_20180902_1723'),
]
operations = [
migrations.RenameField(
model_name='docket',
old_name='equipment_hours',
new_name='equipment_start_hours',
),
migrations.AddField(
model_name='docket',
name='equipment_finish_hours',
field=models.CharField(default='TEST', max_length=80),
preserve_default=False,
),
]
| 24.8 | 66 | 0.6 |
acead32d3bcb3b07ecfc50c4ac14c9cd22b3b742 | 7,140 | py | Python | tensorflow/python/kernel_tests/sparse_valid_cutoff_op_test.py | aalbersk/DeepRec | f673a950780959b44dcda99398880a1d883ab338 | [
"Apache-2.0"
] | 292 | 2021-12-24T03:24:33.000Z | 2022-03-31T15:41:05.000Z | tensorflow/python/kernel_tests/sparse_valid_cutoff_op_test.py | aalbersk/DeepRec | f673a950780959b44dcda99398880a1d883ab338 | [
"Apache-2.0"
] | 54 | 2021-12-24T06:40:09.000Z | 2022-03-30T07:57:24.000Z | tensorflow/python/kernel_tests/sparse_valid_cutoff_op_test.py | aalbersk/DeepRec | f673a950780959b44dcda99398880a1d883ab338 | [
"Apache-2.0"
] | 75 | 2021-12-24T04:48:21.000Z | 2022-03-29T10:13:39.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseValidCutoff."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SparseSliceOpTest(test.TestCase):
def _SparseTensor_4x6(self, val_dtype=np.int64):
# [0 | |2 | |4 |5 ]
# [ |11| |13|14| ]
# [20| | |23| |25]
# [30| |32|33| |35]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1,
4], [2, 0],
[2, 3], [2, 5], [3, 0], [3, 2], [3, 3], [3, 5]]).astype(
np.int64)
val = np.array([0, 2, 4, 5, 11, 13, 14, 20, 23, 25, 30, 32, 33, 35]).astype(
val_dtype)
shape = np.array([4, 6]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensorValue_3x4x2(self):
# slice(:,:, 0)
# ['a0'| |'b0'| ]
# [ |'c0'| |'d0']
# [ | |'e0'| ]
# slice(:,:, 1)
# ['a1'| |'b1'| ]
# [ |'c1'| |'d1']
# [ | |'e1'| ]
ind = np.array([[0, 0, 0], [0, 0, 1], [0, 2, 0], [0, 2, 1], [1, 1, 0],
[1, 1, 1], [1, 3, 0], [1, 3, 1], [2, 2, 0], [2, 2,
1]]).astype(
np.int64)
val = np.array(['a0', 'a1', 'b0', 'b1', 'c0', 'c1', 'd0', 'd1', 'e0', 'e1'])
shape = np.array([3, 4, 2]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensor_3x4x2(self):
return sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_3x4x2())
def testCutoff2D(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sparse_tensor0 = sparse_ops.sparse_valid_cutoff(sp_input,
1,
4,
side="right")
sparse_tensor1 = sparse_ops.sparse_valid_cutoff(sp_input,
1,
4,
side="left")
sparse_tensor2 = sparse_ops.sparse_valid_cutoff(sp_input,
1,
8,
side="left")
self.assertAllEqual(sparse_tensor0.indices.eval(),
[[0, 0], [0, 2], [1, 1], [1, 3], [2, 0], [2, 3], [3, 0], [3, 2], [3, 3]])
self.assertAllEqual(sparse_tensor0.values.eval(), [0, 2, 11, 13, 20, 23, 30, 32, 33])
self.assertAllEqual(sparse_tensor0.dense_shape.eval(), [4, 4])
self.assertAllEqual(sparse_tensor1.indices.eval(),
[[0, 0], [0, 2], [0, 3], [1, 0], [1, 2], [1, 3], [2, 1], [2, 3], [3, 0], [3, 1], [3, 3]])
self.assertAllEqual(sparse_tensor1.values.eval(), [2, 4, 5, 11, 13, 14, 23, 25, 32, 33, 35])
self.assertAllEqual(sparse_tensor1.dense_shape.eval(), [4, 4])
self.assertAllEqual(sparse_tensor2.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5],
[1, 1], [1, 3], [1, 4],
[2, 0], [2, 3], [2, 5],
[3, 0], [3, 2], [3, 3], [3, 5]])
self.assertAllEqual(sparse_tensor2.values.eval(),
[0, 2, 4, 5,
11, 13, 14,
20, 23, 25,
30, 32, 33, 35])
self.assertAllEqual(sparse_tensor2.dense_shape.eval(), [4, 8])
def testCutoff3D(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_3x4x2()
sparse_tensor0 = sparse_ops.sparse_valid_cutoff(sp_input,
1,
2,
side="right")
sparse_tensor1 = sparse_ops.sparse_valid_cutoff(sp_input,
1,
2,
side="left")
sparse_tensor2 = sparse_ops.sparse_valid_cutoff(sp_input,
2,
1,
side="right")
sparse_tensor3 = sparse_ops.sparse_valid_cutoff(sp_input,
1,
2,
side="right",
reverse=True)
self.assertAllEqual(sparse_tensor0.indices.eval(),
[[0, 0, 0], [0, 0, 1], [1, 1, 0], [1, 1, 1]])
self.assertAllEqual(sparse_tensor0.values.eval(), [b'a0', b'a1', b'c0', b'c1'])
self.assertAllEqual(sparse_tensor0.dense_shape.eval(), [3, 2, 2])
self.assertAllEqual(sparse_tensor1.indices.eval(),
[[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1], [2, 1, 0], [2, 1, 1]])
self.assertAllEqual(sparse_tensor1.values.eval(), [b'b0', b'b1', b'd0', b'd1', b'e0', b'e1'])
self.assertAllEqual(sparse_tensor1.dense_shape.eval(), [3, 2, 2])
self.assertAllEqual(sparse_tensor2.indices.eval(),
[[0, 0, 0], [0, 2, 0], [1, 1, 0], [1, 3, 0], [2, 2, 0]])
self.assertAllEqual(sparse_tensor2.values.eval(), [b'a0', b'b0', b'c0', b'd0', b'e0'])
self.assertAllEqual(sparse_tensor2.dense_shape.eval(), [3, 4, 1])
self.assertAllEqual(sparse_tensor3.indices.eval(),
[[0, 0, 0], [0, 0, 1], [1, 0, 0], [1, 0, 1]])
self.assertAllEqual(sparse_tensor3.values.eval(), [b'a0', b'a1', b'c0', b'c1'])
self.assertAllEqual(sparse_tensor3.dense_shape.eval(), [3, 2, 2])
if __name__ == '__main__':
test.main()
| 50.28169 | 115 | 0.452381 |
acead49976a05fabd13a553b9dd578d65090678f | 635 | py | Python | backend/manage.py | crowdbotics-apps/old-scene-29688 | 4b269c434c6f3d1e9bcb78c92ea1806d8b2c9505 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/old-scene-29688 | 4b269c434c6f3d1e9bcb78c92ea1806d8b2c9505 | [
"FTL",
"AML",
"RSA-MD"
] | 52 | 2021-08-10T12:03:10.000Z | 2022-03-20T17:37:41.000Z | backend/manage.py | crowdbotics-apps/old-scene-29688 | 4b269c434c6f3d1e9bcb78c92ea1806d8b2c9505 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'old_scene_29688.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.863636 | 79 | 0.686614 |
acead547d8d552a29897e65e9562dfd952b74dfa | 2,185 | py | Python | vc_vidyo/indico_vc_vidyo/models/vidyo_extensions.py | aloysiuszeno/indico-plugins | 9b489292425284e717fd67b73d65c08da825f07c | [
"MIT"
] | null | null | null | vc_vidyo/indico_vc_vidyo/models/vidyo_extensions.py | aloysiuszeno/indico-plugins | 9b489292425284e717fd67b73d65c08da825f07c | [
"MIT"
] | null | null | null | vc_vidyo/indico_vc_vidyo/models/vidyo_extensions.py | aloysiuszeno/indico-plugins | 9b489292425284e717fd67b73d65c08da825f07c | [
"MIT"
] | null | null | null | # This file is part of the Indico plugins.
# Copyright (C) 2002 - 2019 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from __future__ import unicode_literals
import urllib
from sqlalchemy.event import listens_for
from sqlalchemy.orm.attributes import flag_modified
from indico.core.db.sqlalchemy import db
from indico.util.string import return_ascii
class VidyoExtension(db.Model):
__tablename__ = 'vidyo_extensions'
__table_args__ = {'schema': 'plugin_vc_vidyo'}
#: ID of the videoconference room
vc_room_id = db.Column(
db.Integer,
db.ForeignKey('events.vc_rooms.id'),
primary_key=True
)
extension = db.Column(
db.BigInteger,
index=True
)
owned_by_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=False
)
vc_room = db.relationship(
'VCRoom',
lazy=False,
backref=db.backref(
'vidyo_extension',
cascade='all, delete-orphan',
uselist=False,
lazy=False
)
)
#: The user who owns the Vidyo room
owned_by_user = db.relationship(
'User',
lazy=True,
backref=db.backref(
'vc_rooms_vidyo',
lazy='dynamic'
)
)
@property
def join_url(self):
from indico_vc_vidyo.plugin import VidyoPlugin
url = self.vc_room.data['url']
custom_url_tpl = VidyoPlugin.settings.get('client_chooser_url')
if custom_url_tpl:
return custom_url_tpl + '?' + urllib.urlencode({'url': url})
return url
@return_ascii
def __repr__(self):
return '<VidyoExtension({}, {}, {})>'.format(self.vc_room, self.extension, self.owned_by_user)
@listens_for(VidyoExtension.owned_by_user, 'set')
def _owned_by_user_set(target, user, *unused):
if target.vc_room and user.as_principal != tuple(target.vc_room.data['owner']):
target.vc_room.data['owner'] = user.as_principal
flag_modified(target.vc_room, 'data')
| 27.658228 | 102 | 0.642563 |
acead5ca085ad976392d05825ec73c5fa813641e | 1,423 | py | Python | egghead/python/files.py | broox9/learning | bd2078f4b77cf456a352835864afe53ac1fa3466 | [
"MIT"
] | null | null | null | egghead/python/files.py | broox9/learning | bd2078f4b77cf456a352835864afe53ac1fa3466 | [
"MIT"
] | 3 | 2020-09-04T03:25:36.000Z | 2021-05-06T22:45:17.000Z | egghead/python/files.py | broox9/learning | bd2078f4b77cf456a352835864afe53ac1fa3466 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# ### READING
# # implied read and manual close
# f = open('animals.csv')
#
# for line in f:
# print(line)
#
# # implied close, manual 'r' for read
# with open('animals.csv', 'r') as file:
# print(file.read())
# nice csv and JSON packages
import csv
import json
with open('animals.csv', 'r') as f:
animals = csv.reader(f)
for row in animals:
if row[-1] == 'True':
print(f"{row[0]} the {row[1]} is allowed in the house")
else:
print(f"don't let {row[0]} the {row[1]} in this house!!")
with open('animals.json', 'r') as j:
data = json.load(j)
for obj in data:
if obj['housebroken'] == 'True':
print(f"{obj['name']} the {obj['type']} is allowed inside")
else:
print(f"{obj['name']} the {obj['type']} has to stay outside!!")
### WRITING
# 'w' overwrites, 'a' appends
w = open('cars.txt', 'w')
cars1 = ['bmw', 'lexus', 'mercedes']
for car in cars1:
# at this point the file is emptied, but not written to just buffered
w.write(car + '\n')
w.close() # actuall write happens on close
with open('cars.txt', 'a') as a:
cars = ['chevy', 'ford', 'vw']
for car in cars:
a.write(car + '\n')
with open('cars.json', 'w') as wj:
cars = [
{"make": "chevy"},
{"make": "tesla"},
{"make": "amc"},
{"make": "scion"}
]
json.dump(cars, wj)
| 23.716667 | 75 | 0.543219 |
acead76317b4e03412c7133db795f31c5c0f43d4 | 145 | py | Python | print_matrix.py | Guigui14460/functional-analysis-project | 6c3bb53a991d8290c7a592e85e4707ce210ad966 | [
"MIT"
] | null | null | null | print_matrix.py | Guigui14460/functional-analysis-project | 6c3bb53a991d8290c7a592e85e4707ce210ad966 | [
"MIT"
] | null | null | null | print_matrix.py | Guigui14460/functional-analysis-project | 6c3bb53a991d8290c7a592e85e4707ce210ad966 | [
"MIT"
] | null | null | null | def print_matrix(matrix, string_format="{:.2f}"):
for row in matrix:
print("\t".join([string_format.format(float(e)) for e in row]))
| 36.25 | 71 | 0.648276 |
acead82e8b85617803901690496495014afd508e | 9,208 | py | Python | yetagain/models.py | felixbrunner/yetagain | edae7b650d143b237fc02c3dd75dd9c2453d0e13 | [
"BSD-3-Clause"
] | null | null | null | yetagain/models.py | felixbrunner/yetagain | edae7b650d143b237fc02c3dd75dd9c2453d0e13 | [
"BSD-3-Clause"
] | null | null | null | yetagain/models.py | felixbrunner/yetagain | edae7b650d143b237fc02c3dd75dd9c2453d0e13 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import scipy as sp
import warnings
import copy
# from scipy.stats import norm
from yetagain.dists import NormalDistribution, MixtureDistribution, StudentTDistribution
from yetagain.estimation import EstimationMixin
class ModelMixin:
'''Mixin class for models.'''
def __init__(self):
self.is_fitted = False
def errors(self, y, X=None, method='mean'):
'''Returns errors made by the model when predicting input data.'''
assert method != 'distribution', \
'distribution not an allowed prediction method to calculate errors'
errors = y - self.predict(y=y, X=X, method=method)
return errors
def squared_errors(self, y, X=None, method='mean'):
'''Returns errors made by the model when predicting input data.'''
squared_errors = self.errors(y=y, X=X, method=method)**2
return squared_errors
def iterate(self, steps=1):
'''Iterates the model forward the input number of steps.'''
return self
def copy(self):
'''Returns a deep copy with new memory address.'''
_model = copy.deepcopy(self)
return _model
def __repr__(self):
return str(self)
@property
def params_(self):
assert self.is_fitted, \
'Model has no fitted parameters.'
return self.params
def predict(self, y, X=None, method='distribution', **kwargs):
'''Returns an array with predictions for an input sample.'''
y = np.atleast_1d(y)
if method == 'distribution' or method == None:
return [self.distribution.copy() for y_t in y]
elif method == 'mean':
return np.full(shape=y.shape, fill_value=self.mean)
elif method == 'mode':
return np.full(shape=y.shape, fill_value=self.mode)
elif method == 'median':
return np.full(shape=y.shape, fill_value=self.median)
elif method == 'var':
return np.full(shape=y.shape, fill_value=self.var)
elif method == 'std':
return np.full(shape=y.shape, fill_value=self.std)
else:
raise NotImplementedError('Prediction method not implemented')
def draw(self, size=1, return_distributions=False):
'''Draw a random sequence of specified length.'''
# draw sample from distribution
sample = self.distribution.draw(size=size)
# return sequence of distributions if required
if return_distributions:
if size is 1:
distributions = self.distribution.copy()
else:
distributions = [self.distribution.copy() for i in range(size)]
return (sample, distributions)
else:
return sample
def forecast(self, horizons=[1], method=None, **kwargs):
'''returns a forecast of h steps ahead.'''
# make sure horizons is iterable
horizons = np.atleast_1d(horizons)
# calculate forecasts
forecasts = []
for horizon in horizons:
forecast_model = self.iterate(horizon)
distribution = forecast_model.distribution
# extract forecast statistic
if method == None or method == 'distribution':
forecasts += [distribution]
elif method == 'mean':
forecasts += [distribution.mean]
elif method == 'mode':
forecasts += [distribution.mode]
elif method == 'median':
forecasts += [distribution.median]
elif method == 'var':
forecasts += [distribution.var]
elif method == 'std':
forecasts += [distribution.std]
else:
raise NotImplementedError('Forecast method not implemented')
return forecasts
def likelihood(self, y, X=None, **kwargs):
'''Returns the likelihoods of the observations in a sample.'''
distributions = self.predict(y=y, X=X, **kwargs)
likelihood = [dist_t.pdf(y_t) for y_t, dist_t in zip(y, distributions)]
return likelihood
def score(self, y, X=None, weights=None, **kwargs):
'''Returns the (weighted) log-likelihood of a sample.'''
# weights
if weights is None:
weights = np.ones(np.array(y).shape)
else:
weights = np.array(weights)
# score log-likelihood
score = (weights * np.log(self.likelihood(y=y, X=X, weights=weights, **kwargs))).sum()
return score
class NormalModel(ModelMixin, EstimationMixin, NormalDistribution):
'''i.i.d. normal distribution model.'''
def __init__(self, mu=0, sigma=1):
self.mu = mu
self.sigma = sigma
ModelMixin.__init__(self)
@property
def params(self):
params = {'mu': self.mu,
'sigma': self.sigma}
return params
@params.setter
def params(self, params):
for k, v in params.items():
setattr(self, k, v)
def _step(self, y, X, weights):
'''Performs one estimation step.
Recalculates the distribution mean and variance.
'''
# estimate mean
mean = np.average(y, weights=weights)
self.mu = float(mean)
# estimate variance
errors = self.squared_errors(y)
variance = np.average(errors, weights=weights)
self.sigma = float(np.sqrt(variance))
# set status
self.converged = True
@property
def distribution(self):
'''Extracts and returns a NormalDistribution object
with the the same parameters as the model.
'''
norm = NormalDistribution(mu=self.mu, sigma=self.sigma)
return norm
def __str__(self):
'''Returns a summarizing string.'''
string = 'NormalModel(mu={}, sigma={})'.format(round(self.mu, 4), round(self.sigma, 4))
return string
class StudentTModel(ModelMixin, EstimationMixin, StudentTDistribution):
'''i.i.d. normal distribution model.'''
def __init__(self, mu=0, sigma=1, df=np.inf):
self.mu = mu
self.sigma = sigma
self.df = df
ModelMixin.__init__(self)
@property
def params(self):
params = {'df': self.df,
'mu': self.mu,
'sigma': self.sigma}
return params
@params.setter
def params(self, params):
for k, v in params.items():
setattr(self, k, v)
def _e_step(self, y):
'''Performs the expectation step to update estimation weights.'''
# intialise the EM algorithm with the equally weighted scipy implementation
if self.iteration == 0:
(self.df, self.mu, self.sigma) = sp.stats.t.fit(y)
# update weights
w_ = ((self.df+1)*self.sigma**2) \
/ (self.df*self.sigma**2 + self.squared_errors(y))
self.w_ = w_
def _m_step(self, y, weights):
'''Performs the maximisation step to update location and
scale of the distribution.
'''
# update mu
self.mu = np.average(y, weights=weights*self.w_)
# update sigma
squared_errors = self.squared_errors(y)
self.sigma = np.sqrt(np.average(squared_errors*self.w_, weights=weights))
# update df
const = 1 - np.log((self.df+1)/2) + np.average(np.log(self.w_)-self.w_, weights=weights) + sp.special.digamma((self.df+1)/2)
fun = lambda df: np.log(df/2) - sp.special.digamma(df/2) + const
self.df = sp.optimize.fsolve(fun, self.df)[0]
def _step(self, y, X, weights):
'''Performs one estimation step.
Recalculates the distribution mean and variance.
'''
self._e_step(y)
self._m_step(y, weights)
@property
def distribution(self):
'''Extracts and returns a NormalDistribution object
with the the same parameters as the model.
'''
distribution = StudentTDistribution(mu=self.mu,
sigma=self.sigma,
df=self.df)
return distribution
def __str__(self):
'''Returns a summarizing string.'''
string = 'StudentTModel(mu={}, sigma={}, df={})'\
.format(round(self.mu, 4), round(self.sigma, 4), round(self.df, 4))
return string
class MixtureModel(ModelMixin, MixtureDistribution):
'''mixture model of arbitrary distributions.'''
def __init__(self, components=[]):
self.components = components
def fit(self, Y, weights=None, method='em'):
### use EM algorithm
raise NotImplementedError('fit method not implemented')
@property
def distribution(self):
raise NotImplementedError('distribution not implemented')
def __str__(self):
'''Returns a summarizing string'''
string = 'MixtureModel(\n'
for (component, weight) in self.components:
string += '\t {}, weight={},\n'.format(component.__str__(), weight)
string += ')'
return string | 34.230483 | 132 | 0.583732 |
acead96e737a06dde6f1eafb97b311434a1098b9 | 1,520 | py | Python | saleor/graphql/account/mutations/deprecated_staff.py | jduan/saleor | a03a8d0a19eaeb3df6f60c21f3194b7d802c75cb | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/account/mutations/deprecated_staff.py | jduan/saleor | a03a8d0a19eaeb3df6f60c21f3194b7d802c75cb | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/account/mutations/deprecated_staff.py | jduan/saleor | a03a8d0a19eaeb3df6f60c21f3194b7d802c75cb | [
"CC-BY-4.0"
] | null | null | null | import graphene
from django.conf import settings
from django.contrib.auth.tokens import default_token_generator
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from ....account import emails, models
from ...core.mutations import BaseMutation
def send_user_password_reset_email(user, site):
context = {
"email": user.email,
"uid": urlsafe_base64_encode(force_bytes(user.pk)),
"token": default_token_generator.make_token(user),
"site_name": site.name,
"domain": site.domain,
"protocol": "https" if settings.ENABLE_SSL else "http",
}
emails.send_password_reset_email.delay(context, user.email, user.pk)
class PasswordReset(BaseMutation):
class Arguments:
email = graphene.String(description="Email", required=True)
class Meta:
description = (
"DEPRECATED: Use RequestPasswordReset instead."
"Sends an email with the account password change link to customer."
)
permissions = ("account.manage_users",)
@classmethod
def perform_mutation(cls, _root, info, email):
try:
user = models.User.objects.get(email=email)
except ObjectDoesNotExist:
raise ValidationError({"email": "User with this email doesn't exist"})
site = info.context.site
send_user_password_reset_email(user, site)
return PasswordReset()
| 34.545455 | 82 | 0.697368 |
acead997652694cce86fec4ed4d4fb10f66dfb42 | 357 | py | Python | venv/Lib/site-packages/pip/__init__.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 150 | 2021-11-02T05:31:51.000Z | 2022-03-24T06:22:22.000Z | venv/Lib/site-packages/pip/__init__.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 16 | 2022-02-01T06:06:00.000Z | 2022-02-01T06:21:40.000Z | venv/Lib/site-packages/pip/__init__.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 33 | 2021-11-03T00:29:41.000Z | 2022-03-15T13:15:56.000Z | from typing import List, Optional
__version__ = "21.3.1"
def main(args: Optional[List[str]] = None) -> int:
"""This is an internal API only meant for use by pip's own console scripts.
For additional details, see https://github.com/pypa/pip/issues/7498.
"""
from pip._internal.utils.entrypoints import _wrapper
return _wrapper(args)
| 25.5 | 79 | 0.703081 |
aceada31f9adf5c9466c0c905176c498085fa07a | 421 | py | Python | plotly/validators/violin/box/_fillcolor.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/violin/box/_fillcolor.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | null | null | null | plotly/validators/violin/box/_fillcolor.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class FillcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name='fillcolor', parent_name='violin.box', **kwargs
):
super(FillcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='style',
role='style',
**kwargs
)
| 26.3125 | 73 | 0.631829 |
aceadb0cfd8ac0fa1d260b0b66fc972c16ad5666 | 11,725 | py | Python | quranpy/surah.py | niztg/quranpy | 90513cc54be12d8becf42ec680b6d372873b34bc | [
"MIT"
] | null | null | null | quranpy/surah.py | niztg/quranpy | 90513cc54be12d8becf42ec680b6d372873b34bc | [
"MIT"
] | null | null | null | quranpy/surah.py | niztg/quranpy | 90513cc54be12d8becf42ec680b6d372873b34bc | [
"MIT"
] | null | null | null | """
﷽
Alhamdulillah.
"""
from typing import Optional, List, Union, Iterable
from requests import get as request
from .enums import Editions, Chapters
from .exceptions import (
SurahNotFound,
IncorrectAyahArguments,
IncorrectPageNumber,
IncorrectJuzNumber,
SearchError
)
__all__ = (
'Surah',
'Verse',
'Page',
'Juz',
'Search',
'show_verses',
'Ayah',
'Chapter'
)
_URL = "http://api.alquran.cloud/v1/{0}/{1}/{2}"
SEARCH_URL = "http://api.alquran.cloud/v1/search/{0}/{1}/{2}"
SURAH_URL = "http://api.alquran.cloud/v1/surah/{0}/editions/{1}"
class Surah:
__slots__ = (
'data',
'edition',
'chapter',
'number',
'arabic_name',
'name',
'translation',
'period',
'num_verses',
'str_verses'
)
def __init__(
self,
chapter: Union[int, str, Chapters],
edition: Optional[Editions] = Editions.sahih_international
):
if isinstance(chapter, (int, str)):
chapter = int(chapter)
if (chapter > 114) or (chapter < 1):
raise SurahNotFound(
"%s is not a chapter number in the Qur'an. The number must be inbetween 1 and 114" % chapter)
self.chapter = chapter
else:
chapter = chapter.value
self.chapter = chapter
data = request(SURAH_URL.format(self.chapter, edition.value)).json()
self.data = data['data'][0]
self.edition = edition
self.number = self.data.get('number')
self.arabic_name = self.data.get('name')
self.name = self.data.get('englishName')
self.translation = self.data.get('englishNameTranslation')
self.period = self.data.get('revelationType')
self.num_verses = self.data.get('numberOfAyahs')
self.str_verses = [verse['text'] for verse in self.data.get('ayahs')]
def __repr__(self):
return f"Surah {self.name} ({self.translation})"
def __iter__(
self
) -> Iterable:
return iter(list(self.verses))
@property
def verses(
self
) -> List:
ayahs = list()
for ayah in self.data.get('ayahs'):
verse = ayah['number']
ayahs.append(Verse(verse, self.edition))
return ayahs
def show_verses(
self,
ayah: Union[int, str],
) -> List:
try:
verse = int(ayah)
if (verse < 1) or (verse > len(self.str_verses)):
raise IncorrectAyahArguments("Ayah must be inbetween 1 and %s" % len(self.str_verses))
except:
_range = ayah.split("-")
if len(_range) != 1:
if len(_range) != 2:
raise IncorrectAyahArguments(
"Please enter your ayahs in the following format: 1:1-4 (For verses 1-4 of Surah Fatiha)"
)
else:
verse = list()
try:
offset, limit = list(map(int, _range))
except ValueError:
raise IncorrectAyahArguments("You may not use any words to define your ayah!") from ValueError
if offset > limit:
offset = limit
limit = offset
for x in range(offset, limit + 1):
try:
verse.append(Verse(f"{self.chapter}:{x}", self.edition))
except:
break
else:
verse = [Verse(f"{self.chapter}:{ayah}", self.edition)]
if isinstance(verse, int):
return [Verse(f"{self.chapter}:{verse}")]
else:
return list(verse)
def show_str_verses(
self,
ayah: Union[int, str],
) -> List[str]:
try:
verse = int(ayah)
if (verse < 1) or (verse > len(self.str_verses)):
raise IncorrectAyahArguments("Ayah must be inbetween 1 and %s" % len(self.str_verses))
except:
_range = ayah.split("-")
if len(_range) != 1:
if len(_range) != 2:
raise IncorrectAyahArguments(
"Please enter your ayahs in the following format: 1:1-4 (For verses 1-4 of Surah Fatiha)"
)
else:
try:
offset, limit = list(map(int, _range))
except ValueError:
raise IncorrectAyahArguments("You may not use any words or special characters to define your "
"ayah other than -.") from ValueError
_return = list(self.str_verses[offset - 1:limit])
if not _return:
raise IncorrectAyahArguments(f"Verse {self.chapter}:{offset} does not exist!")
return _return
else:
try:
return [self.str_verses[int(ayah) - 1]]
except Exception as error:
if isinstance(error, IndexError):
raise IncorrectAyahArguments(f"Verse {self.chapter}:{int(ayah)} does not exist!")
elif isinstance(error, ValueError):
raise IncorrectAyahArguments("You may not use any words or special characters to represent "
"your verses other than -.")
else:
raise error
if isinstance(verse, int):
return [self.str_verses[verse - 1]]
def get_verses(
self,
*verses
):
return list(map(self.show_verses, list(verses)))
def get_str_verses(
self,
*verses
):
return list(map(self.show_str_verses, list(verses)))
class Page:
__slots__ = (
'edition',
'data',
'number',
'num_verses',
'num_surahs'
)
def __init__(
self,
page: Union[int, str],
edition: Optional[Editions] = Editions.sahih_international
):
page = int(page)
if page > 604:
raise IncorrectPageNumber("Page number should be betwen 1 and 604")
data = request(_URL.format('page', page, edition.value)).json()
self.edition = edition
self.data = data['data']
self.number = self.data.get('number')
self.num_verses = len(self.data.get('ayahs'))
self.num_surahs = len(self.data.get('surahs'))
def __repr__(self):
return f"Qur'an Page {self.number} : {self.num_verses} verses"
@property
def surahs(self) -> List[Surah]:
to_return = list()
for surah in self.data.get('surahs').values():
to_return.append(Surah(surah['number'], self.edition))
return to_return
class Juz:
__slots__ = (
'edition',
'data',
'number',
'num_ayahs',
'num_surahs'
)
def __init__(
self,
number: Union[int, str],
edition: Optional[Editions] = Editions.sahih_international
):
number = int(number)
if (number > 30) or (number < 1):
raise IncorrectJuzNumber("Juz number should be inbetween 1 and 30.")
data = request(_URL.format('juz', number, edition.value)).json()
self.edition = edition
self.data = data['data']
self.number = self.data.get('number')
self.num_ayahs = len(self.data.get('ayahs'))
self.num_surahs = len(self.data.get('surahs'))
def __repr__(self):
return f"Juz {self.number} - {self.num_surahs} surahs"
@property
def surahs(self) -> List[Surah]:
to_return = list()
for surah in self.data.get('surahs').keys():
to_return.append(Surah(surah, self.edition))
return to_return
class Verse:
__slots__ = (
'data',
'edition',
'number',
'text',
'number_in_surah',
'position',
'is_sajda'
)
def __init__(
self,
ayah: Union[int, str],
edition: Optional[Editions] = Editions.sahih_international
):
data = request(_URL.format('ayah', ayah, edition.value)).json()
if data.get('code') != 200:
raise IncorrectAyahArguments(f"Verse {ayah} of the quran does not exist!")
self.data = data['data']
self.edition = edition
self.number = self.data.get('number')
self.text = self.data.get('text')
self.number_in_surah = self.data.get('numberInSurah')
self.position = f"{self.data['surah']['number']}:{self.number_in_surah}"
self.is_sajda = self.data.get('sajda')
def __repr__(self) -> str:
return self.text
@property
def surah(self) -> Surah:
return Surah(self.data['surah']['number'], self.edition)
@property
def juz(self) -> Juz:
return Juz(self.data['juz'], self.edition)
@property
def page(self) -> Page:
return Page(self.data['page'], self.edition)
class Search:
__slots__ = (
'_surah',
'term',
'edition',
'data',
'count',
'str_verses'
)
def __init__(
self,
term: str,
surah: Optional[Union[int, str, Chapters]] = None,
edition: Optional[Editions] = Editions.sahih_international
):
self._surah = surah
if not self._surah:
surah = "all"
else:
if isinstance(surah, Chapters):
surah = surah.value
try:
data = request(SEARCH_URL.format(term, surah, edition.value)).json()
except:
raise SearchError("There are no results of this term in this edition.")
self.term = term
self.edition = edition
self.data = data['data']
self.count = self.data.get('count')
self.str_verses = [verse['text'] for verse in self.data.get('matches')]
def __repr__(self):
if self._surah:
return f"{self.count} count(s) of \"{self.term}\" in" \
f" Surah {self.data['matches'][0]['surah']['englishName']}" \
f" (in this edition)"
else:
return f"{self.count} count(s) of \"{self.term}\" in the Qur'an (in this edition)"
def __iter__(self) -> Iterable:
return iter(list(self.verses))
@property
def verses(self) -> List[Verse]:
ayahs = list()
for ayah in self.data.get('matches'):
verse = ayah['number']
try:
ayahs.append(Verse(verse, self.edition))
except:
break
return sorted(ayahs, key=lambda x: x.number)
def show_verses(
ayah: Union[int, str],
edition: Optional[Editions] = Editions.sahih_international
) -> List[str]:
if isinstance(ayah, int) or ayah.isdigit():
return [str(Verse(ayah, edition))]
else:
try:
surah, verses = ayah.split(":")
except ValueError:
raise IncorrectAyahArguments(
"Please enter your verses in the following format: 2:225 (For Surah Baqarah verse 255)") from ValueError
try:
surah = int(surah)
except:
raise IncorrectAyahArguments("You may not use any words to define your verse")
return list(Surah(int(surah), edition).show_str_verses(verses))
class Ayah(Verse):
pass
class Chapter(Surah):
pass
| 31.518817 | 120 | 0.528443 |
aceadb93c9f4c38b9d476ef9d0fc602ac6c28391 | 3,420 | py | Python | social_rl/gym_multigrid/envs/tag.py | GT-melee/google-research | 43ea0036c969dfed441ca408116cd0428019504b | [
"Apache-2.0"
] | null | null | null | social_rl/gym_multigrid/envs/tag.py | GT-melee/google-research | 43ea0036c969dfed441ca408116cd0428019504b | [
"Apache-2.0"
] | null | null | null | social_rl/gym_multigrid/envs/tag.py | GT-melee/google-research | 43ea0036c969dfed441ca408116cd0428019504b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Implements a gym_minigrid tag environment.
The agents are split into two teams, where one team is rewarded for being
near the other team and the other team has a symmetric penalty.
"""
from social_rl.gym_multigrid.gym_minigrid import minigrid
import numpy as np
from social_rl.gym_multigrid import multigrid
from social_rl.gym_multigrid.register import register
class TagEnv(multigrid.MultiGridEnv):
"""Tag grid environment with obstacles, sparse reward."""
def __init__(self,
size=15,
hide_agents=1,
seek_agents=1,
n_clutter=25,
max_steps=250,
**kwargs):
"""Constructor for multi-agent gridworld environment generator.
Args:
size: Number of tiles for the width and height of the square grid.
hide_agents: The number of agents hiding.
seek_agents: The number of agents seeking.
n_clutter: The number of blocking objects in the environment.
max_steps: Number of environment steps before the episode end (max episode
length).
**kwargs: See superclass.
"""
self.n_clutter = n_clutter
self.hide_agents = hide_agents
self.seek_agents = seek_agents
super().__init__(
grid_size=size,
max_steps=max_steps,
n_agents=hide_agents + seek_agents,
fully_observed=True,
**kwargs)
def _gen_grid(self, width, height):
self.grid = multigrid.Grid(width, height)
self.grid.wall_rect(0, 0, width, height)
for _ in range(self.n_clutter):
self.place_obj(minigrid.Wall(), max_tries=100)
self.place_agent()
self.mission = 'Play tag'
def step(self, action):
obs, _, done, info = multigrid.MultiGridEnv.step(self, action)
reward = [0] * self.n_agents
for i in range(self.hide_agents):
for j in range(self.hide_agents, self.hide_agents + self.seek_agents):
if np.sum(np.abs(self.agent_pos[i] - self.agent_pos[j])) == 1:
reward[i] -= 10.0
reward[j] += 10.0
return obs, reward, done, info
class RandomTagEnv6x6(TagEnv):
def __init__(self, **kwargs):
super().__init__(
size=6, hide_agents=1, seek_agents=1, n_clutter=5, **kwargs)
class RandomTagEnv8x8(TagEnv):
def __init__(self, **kwargs):
super().__init__(
size=8, hide_agents=2, seek_agents=3, n_clutter=10, **kwargs)
if hasattr(__loader__, 'name'):
module_path = __loader__.name
elif hasattr(__loader__, 'fullname'):
module_path = __loader__.fullname
register(env_id='MultiGrid-Tag-v0', entry_point=module_path + ':TagEnv')
register(
env_id='MultiGrid-Tag-Random-6x6-v0',
entry_point=module_path + ':RandomTagEnv6x6')
register(
env_id='MultiGrid-Tag-Random-8x8-v0',
entry_point=module_path + ':RandomTagEnv8x8')
| 31.376147 | 80 | 0.69269 |
aceadc5b7fe1bbf6dd7d04e4a873d63203a03fa3 | 2,787 | py | Python | cinder/temp/a/bbbb.py | usernameisnull/cinder-explanation | 354ab16c9761327a1f1c6b58bcd94bf912c0c638 | [
"Apache-2.0"
] | null | null | null | cinder/temp/a/bbbb.py | usernameisnull/cinder-explanation | 354ab16c9761327a1f1c6b58bcd94bf912c0c638 | [
"Apache-2.0"
] | null | null | null | cinder/temp/a/bbbb.py | usernameisnull/cinder-explanation | 354ab16c9761327a1f1c6b58bcd94bf912c0c638 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env/python
# coding=utf-8
from routes import Mapper
from routes import middleware
import webob.dec
from wsgiref.simple_server import make_server
class Controller(object):
def __init__(self):
self.i = 1
def __call__(self):
print self.i
def search(self):
return "do search()"
def show(self):
return "do show()"
def index(self):
return "do index()"
def update(self):
return "do update()"
def delete(self):
return "do delete()"
def create(self):
return "do create()"
def create_many(self):
return "do create_many()"
def update_many(self):
return "do update_many()"
def list_many(self):
return "do list_many()"
def delete_many(self):
return "do delete_many()"
class Appclass(object):
def __init__(self):
a = Controller()
_map = Mapper()
"""路由匹配条件1"""
# map.connect('/images',controller=a,
# action='search',
# conditions={'method':['GET']})
"""路由匹配条件2"""
# map.connect('name',"/{action}/{pid}",controller=a)
"""路由匹配条件3"""
# map.resource("message","messages",controller=a,collection={'search':'GET'})
"""路由匹配条件4"""
# map.resource('message', 'messages',controller=a,
# collection={'list_many':'GET','create_many':'POST'},
# member={'update_many':'POST','delete_many':'POST'})
"""路由匹配条件5"""
_map.resource('message', 'messages', controller=a, path_prefix='/{projectid}',
collection={'list_many': 'GET', 'create_many': 'POST'},
member={'update_many': 'POST', 'delete_many': 'POST'})
# _map.resource('type', 'types', controller=other_controller,
# parent_resource=dict(member_name='message',
# collection_name='messages'),
# path_prefix='{projectid}/%s/:%s_id' % ('nex', 'nexs'))
self.route = middleware.RoutesMiddleware(self._dispatch, _map)
@webob.dec.wsgify
def __call__(self, req):
return self.route
@staticmethod
@webob.dec.wsgify
def _dispatch(req):
match = req.environ['wsgiorg.routing_args'][1]
print "route match result is:", match
if not match:
return "fake url"
controller = match['controller']
action = match['action']
if hasattr(controller, action):
func = getattr(controller, action)
ret = func()
return ret
else:
return "has no action:%s" % action
if __name__ == "__main__":
app = Appclass()
server = make_server('', 8088, app)
server.serve_forever()
| 27.594059 | 86 | 0.555795 |
aceadd774823c60d368f88fb6d3c3a1a2f869f3b | 2,353 | py | Python | potion/envs/gridworld.py | T3p/potion | c6349111e3f2d2a32e85dc29e052f36cf36edcdc | [
"MIT"
] | null | null | null | potion/envs/gridworld.py | T3p/potion | c6349111e3f2d2a32e85dc29e052f36cf36edcdc | [
"MIT"
] | null | null | null | potion/envs/gridworld.py | T3p/potion | c6349111e3f2d2a32e85dc29e052f36cf36edcdc | [
"MIT"
] | 1 | 2019-09-08T15:11:55.000Z | 2019-09-08T15:11:55.000Z | """
GridWorld
"""
import logging
import gym
import gym.spaces
import random
import time
import math
logger = logging.getLogger(__name__)
class GridWorld(gym.Env):
def __init__(self):
self.height = 2
self.width = 2
self.start = [(0,0)]
self.absorbing = {(1,1)}
self.goals = {(1,1): 1., (1,0): -1}
self.n_actions = 4
self.n_states = self.height * self.width
self.action_space = gym.spaces.Discrete(self.n_actions)
self.observation_space = gym.spaces.Discrete(self.n_states)
self.reset()
def seed(self, seed=None):
random.seed(seed)
return [seed]
def step(self, action):
#Move
r = self.state[0]
c = self.state[1]
if action == 0:
s = [r + 1, c]
elif action == 1:
s = [r, c + 1]
elif action == 2:
s = [r - 1, c]
else:
s = [r, c - 1]
#Borders
if s[0] < 0:
s[0] = 0
if s[0] >= self.height:
s[0] = self.height - 1
if s[1] < 0:
s[1] = 0
if s[1] >= self.width:
s[1] = self.width - 1
done = False
reward = 0.
self.state = tuple(s)
if self.state in self.absorbing:
done = True
if self.state in self.active_goals:
reward = self.active_goals[self.state]
self.active_goals[self.state] = 0
return self._get_state(), reward, done, {}
def reset(self,initial=None):
self.state = random.choice(self.start)
self.active_goals = dict(self.goals)
return self._get_state()
def render(self, mode='human', close=False):
for i in range(self.height):
for j in range(self.width):
if self.state == (i,j):
print('Y', end='')
elif (i,j) in self.active_goals and self.active_goals[(i,j)]>0:
print('*', end='')
elif (i,j) in self.active_goals:
print('x', end='')
else:
print('_', end='')
print()
print()
time.sleep(.1)
def _get_state(self):
return self.state[0] * self.width + self.state[1]
| 26.438202 | 79 | 0.474713 |
aceadd975b4ac2266e2bcea80d83bc66b1c047c0 | 107 | py | Python | VENV/lib/python3.6/site-packages/PyInstaller/lib/modulegraph/__init__.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 3 | 2018-11-27T06:30:23.000Z | 2021-05-30T15:56:32.000Z | VENV/lib/python3.6/site-packages/PyInstaller/lib/modulegraph/__init__.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 1 | 2018-09-12T06:32:17.000Z | 2018-09-12T19:03:50.000Z | VENV/lib/python3.6/site-packages/PyInstaller/lib/modulegraph/__init__.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 2 | 2018-12-29T07:49:59.000Z | 2020-03-18T02:44:31.000Z | # For PyInstaller/lib/ define the version here, since there is no
# package-resource.
__version__ = '0.13'
| 26.75 | 65 | 0.747664 |
aceadddb4a3a3be1f895f7a788a8d89147ac9d19 | 8,369 | py | Python | tests/test_e2e_integration.py | PennyLaneAI/pennylane-orquestra | 9673b7b72ccbd8b184156d8b0d89de8abb89af33 | [
"Apache-2.0"
] | 4 | 2020-12-10T17:05:29.000Z | 2021-01-13T02:10:20.000Z | tests/test_e2e_integration.py | PennyLaneAI/pennylane-orquestra | 9673b7b72ccbd8b184156d8b0d89de8abb89af33 | [
"Apache-2.0"
] | 27 | 2020-12-09T17:19:07.000Z | 2022-03-23T22:03:37.000Z | tests/test_e2e_integration.py | PennyLaneAI/pennylane-orquestra | 9673b7b72ccbd8b184156d8b0d89de8abb89af33 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
End-to-end integration tests between the local machine and the remote Orquestra
platform.
These test cases connect to the Orquestra platform. Prior authentication to the
Orquestra platform is required for running them. The roundtrip time can be 1-2
minutes for test cases including assertions on the output of the computation.
"""
import pytest
import numpy as np
import math
import yaml
import os
import pennylane as qml
from pennylane_orquestra import OrquestraDevice, QeQiskitDevice, QeIBMQDevice
import pennylane_orquestra.gen_workflow as gw
from pennylane_orquestra.cli_actions import qe_submit, workflow_details
from conftest import (
qe_list_workflow,
backend_specs_default,
operator_string_default,
qasm_circuit_default,
resources_default,
)
qiskit_analytic_specs = '{"module_name": "qeqiskit.simulator", "function_name": "QiskitSimulator", "device_name": "qasm_simulator"}'
qiskit_sampler_specs = '{"module_name": "qeqiskit.simulator", "function_name": "QiskitSimulator", "device_name": "qasm_simulator", "n_samples": 1000}'
analytic_tol = 10e-10
# The tolerance for sampling is expected to be higher
tol = 10e-2
class TestWorkflowSubmissionIntegration:
"""Test that workflow generation works as expected."""
@pytest.mark.parametrize("resources", [None, resources_default])
def test_can_submit_and_query_workflow_details(self, resources, tmpdir):
"""Test that filling in the workflow template for getting expectation
values can be submitted to Orquestra and workflow details can be queried."""
# Skip if not logged in to Orquestra
try_resp = qe_list_workflow()
need_login_msg = "token has expired, please log in again\n"
if need_login_msg in try_resp:
pytest.skip("Has not logged in to the Orquestra platform.")
backend_component = "qe-forest"
op = ['["[Z0]"]']
circuits = [qasm_circuit_default]
# Fill in workflow template
workflow = gw.gen_expval_workflow(
backend_component, backend_specs_default, circuits, op, resources=resources
)
file_name = tmpdir.join("test_workflow.yaml")
with open(file_name, "w") as file:
yaml.dump(workflow, file)
# Submit a workflow
workflow_id = qe_submit(file_name)
workflow_msg = workflow_details(workflow_id)
details_string = "".join(workflow_msg)
assert workflow_id in details_string
@pytest.mark.parametrize("backend_component", list(gw.backend_import_db.keys()))
def test_submit_raises(self, backend_component, tmpdir):
"""Test that submitting a workflow to Orquestra with invalid
requirements raises an error."""
# Skip if not logged in to Orquestra
try_resp = qe_list_workflow()
need_login_msg = "token has expired, please log in again\n"
if need_login_msg in try_resp:
pytest.skip("Has not logged in to the Orquestra platform.")
circuits = [qasm_circuit_default]
# This will not be a valid operator: will raise error
operator = []
# Fill in workflow template
workflow = gw.gen_expval_workflow(
backend_component, backend_specs_default, circuits, operator
)
file_name = tmpdir.join("test_workflow.yaml")
with open(file_name, "w") as file:
yaml.dump(workflow, file)
# Submit a workflow --- error due to the operator
with pytest.raises(ValueError, match="Error"):
workflow_id = qe_submit(file_name)
devices = [
("orquestra.forest", "wavefunction-simulator", True),
("orquestra.forest", "wavefunction-simulator", False),
("orquestra.qiskit", "statevector_simulator", True),
("orquestra.qiskit", "qasm_simulator", False),
]
class TestOrquestraIntegration:
"""Test the Orquestra integration with PennyLane."""
@pytest.mark.parametrize("device_name,backend,analytic", devices)
def test_apply_hadamard(self, device_name, backend, analytic):
"""Test a simple circuit that applies Hadamard on the first wire."""
shots = None if analytic else 10000
dev = qml.device(device_name, wires=3, backend=backend, shots=shots, keep_files=False)
TOL = analytic_tol if dev.analytic else tol
# Skip if not logged in to Orquestra
try_resp = qe_list_workflow()
need_login_msg = "token has expired, please log in again\n"
if need_login_msg in try_resp:
pytest.skip("Has not logged in to the Orquestra platform.")
@qml.qnode(dev)
def circuit():
qml.Hadamard(0)
return qml.expval(qml.PauliZ(0))
assert math.isclose(circuit(), 0, abs_tol=TOL)
def test_compute_expval_including_identity(self):
"""Test a simple circuit that involves computing the expectation value of the
Identity operator."""
dev = qml.device("orquestra.qiskit", wires=3)
# Skip if not logged in to Orquestra
try_resp = qe_list_workflow()
need_login_msg = "token has expired, please log in again\n"
if need_login_msg in try_resp:
pytest.skip("Has not logged in to the Orquestra platform.")
@qml.qnode(dev)
def circuit():
qml.PauliX(0)
qml.PauliX(1)
qml.PauliX(2)
return (
qml.expval(qml.Identity(0)),
qml.expval(qml.PauliZ(1)),
qml.expval(qml.Identity(2)),
)
assert np.allclose(circuit(), np.array([1, -1, 1]))
def test_jacobian_with_multi_step_execute(self):
"""Test that the value of the jacobian computed using the internal
batch_execute method corresponds to the value computed with
the default.qubit device.
There are ``qubits * layers * 3 * 2`` many circuits to evaluate.
"""
try_resp = qe_list_workflow()
need_login_msg = "token has expired, please log in again\n"
if need_login_msg in try_resp:
pytest.skip("Has not logged in to the Orquestra platform.")
# Evaluate 12 circuits (2 * 1 * 3 * 2)
# By default, this fits into two separate workflow files
qubits = 2
layers = 1
weights = qml.init.strong_ent_layers_uniform(layers, qubits)
dev1 = qml.device(
"orquestra.qiskit",
backend="statevector_simulator",
wires=qubits,
shots=None,
keep_files=False,
)
dev2 = qml.device("default.qubit", wires=qubits, shots=None)
def func(weights):
qml.templates.StronglyEntanglingLayers(weights, wires=range(qubits))
return qml.expval(qml.PauliZ(0))
orquestra_qnode = qml.QNode(func, dev1)
default_qnode = qml.QNode(func, dev2)
dfunc1 = qml.grad(orquestra_qnode)
dfunc2 = qml.grad(default_qnode)
res_orquestra = dfunc1(weights)
res_default_qubit = dfunc2(weights)
assert np.allclose(res_orquestra, res_default_qubit)
class TestOrquestraIBMQIntegration:
def test_apply_x(self, token):
"""Test a simple circuit that applies PauliX on the first wire."""
TOL = tol
dev = qml.device("orquestra.ibmq", wires=3, ibmqx_token=token)
# Skip if not logged in to Orquestra
try_resp = qe_list_workflow()
need_login_msg = "token has expired, please log in again\n"
if need_login_msg in try_resp:
pytest.skip("Has not logged in to the Orquestra platform.")
@qml.qnode(dev)
def circuit():
qml.PauliX(0)
return qml.expval(qml.PauliZ(0))
assert math.isclose(circuit(), -1, abs_tol=TOL)
| 35.764957 | 150 | 0.668778 |
aceadea4994db7e32cdcb9d579aa65309d60cd06 | 14,284 | py | Python | hooks/charmhelpers/fetch/__init__.py | projectcalico/charm-bird | 3224e887329c527f6bed2520346e66fb4e795fe8 | [
"Apache-2.0"
] | null | null | null | hooks/charmhelpers/fetch/__init__.py | projectcalico/charm-bird | 3224e887329c527f6bed2520346e66fb4e795fe8 | [
"Apache-2.0"
] | null | null | null | hooks/charmhelpers/fetch/__init__.py | projectcalico/charm-bird | 3224e887329c527f6bed2520346e66fb4e795fe8 | [
"Apache-2.0"
] | 1 | 2022-03-16T16:12:32.000Z | 2022-03-16T16:12:32.000Z | import importlib
from tempfile import NamedTemporaryFile
import time
from yaml import safe_load
from charmhelpers.core.host import (
lsb_release
)
import subprocess
from charmhelpers.core.hookenv import (
config,
log,
)
import os
import six
if six.PY3:
from urllib.parse import urlparse, urlunparse
else:
from urlparse import urlparse, urlunparse
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
PROPOSED_POCKET = """# Proposed
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
"""
CLOUD_ARCHIVE_POCKETS = {
# Folsom
'folsom': 'precise-updates/folsom',
'precise-folsom': 'precise-updates/folsom',
'precise-folsom/updates': 'precise-updates/folsom',
'precise-updates/folsom': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'precise-folsom/proposed': 'precise-proposed/folsom',
'precise-proposed/folsom': 'precise-proposed/folsom',
# Grizzly
'grizzly': 'precise-updates/grizzly',
'precise-grizzly': 'precise-updates/grizzly',
'precise-grizzly/updates': 'precise-updates/grizzly',
'precise-updates/grizzly': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'precise-grizzly/proposed': 'precise-proposed/grizzly',
'precise-proposed/grizzly': 'precise-proposed/grizzly',
# Havana
'havana': 'precise-updates/havana',
'precise-havana': 'precise-updates/havana',
'precise-havana/updates': 'precise-updates/havana',
'precise-updates/havana': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'precise-havana/proposed': 'precise-proposed/havana',
'precise-proposed/havana': 'precise-proposed/havana',
# Icehouse
'icehouse': 'precise-updates/icehouse',
'precise-icehouse': 'precise-updates/icehouse',
'precise-icehouse/updates': 'precise-updates/icehouse',
'precise-updates/icehouse': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
'precise-icehouse/proposed': 'precise-proposed/icehouse',
'precise-proposed/icehouse': 'precise-proposed/icehouse',
# Juno
'juno': 'trusty-updates/juno',
'trusty-juno': 'trusty-updates/juno',
'trusty-juno/updates': 'trusty-updates/juno',
'trusty-updates/juno': 'trusty-updates/juno',
'juno/proposed': 'trusty-proposed/juno',
'trusty-juno/proposed': 'trusty-proposed/juno',
'trusty-proposed/juno': 'trusty-proposed/juno',
# Kilo
'kilo': 'trusty-updates/kilo',
'trusty-kilo': 'trusty-updates/kilo',
'trusty-kilo/updates': 'trusty-updates/kilo',
'trusty-updates/kilo': 'trusty-updates/kilo',
'kilo/proposed': 'trusty-proposed/kilo',
'trusty-kilo/proposed': 'trusty-proposed/kilo',
'trusty-proposed/kilo': 'trusty-proposed/kilo',
}
# The order of this list is very important. Handlers should be listed in from
# least- to most-specific URL matching.
FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
)
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
class SourceConfigError(Exception):
pass
class UnhandledSource(Exception):
pass
class AptLockError(Exception):
pass
class BaseFetchHandler(object):
"""Base class for FetchHandler implementations in fetch plugins"""
def can_handle(self, source):
"""Returns True if the source can be handled. Otherwise returns
a string explaining why it cannot"""
return "Wrong source type"
def install(self, source):
"""Try to download and unpack the source. Return the path to the
unpacked files or raise UnhandledSource."""
raise UnhandledSource("Wrong source type {}".format(source))
def parse_url(self, url):
return urlparse(url)
def base_url(self, url):
"""Return url without querystring or fragment"""
parts = list(self.parse_url(url))
parts[4:] = ['' for i in parts[4:]]
return urlunparse(parts)
def filter_installed_packages(packages):
"""Returns a list of packages that require installation"""
cache = apt_cache()
_pkgs = []
for package in packages:
try:
p = cache[package]
p.current_ver or _pkgs.append(package)
except KeyError:
log('Package {} has no installation candidate.'.format(package),
level='WARNING')
_pkgs.append(package)
return _pkgs
def apt_cache(in_memory=True):
"""Build and return an apt cache"""
import apt_pkg
apt_pkg.init()
if in_memory:
apt_pkg.config.set("Dir::Cache::pkgcache", "")
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
return apt_pkg.Cache()
def apt_install(packages, options=None, fatal=False):
"""Install one or more packages"""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
cmd.append('install')
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
_run_apt_command(cmd, fatal)
def apt_upgrade(options=None, fatal=False, dist=False):
"""Upgrade all packages"""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
if dist:
cmd.append('dist-upgrade')
else:
cmd.append('upgrade')
log("Upgrading with options: {}".format(options))
_run_apt_command(cmd, fatal)
def apt_update(fatal=False):
"""Update local apt cache"""
cmd = ['apt-get', 'update']
_run_apt_command(cmd, fatal)
def apt_purge(packages, fatal=False):
"""Purge one or more packages"""
cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Purging {}".format(packages))
_run_apt_command(cmd, fatal)
def apt_hold(packages, fatal=False):
"""Hold one or more packages"""
cmd = ['apt-mark', 'hold']
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Holding {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def add_source(source, key=None):
"""Add a package source to this system.
@param source: a URL or sources.list entry, as supported by
add-apt-repository(1). Examples::
ppa:charmers/example
deb https://stub:key@private.example.com/ubuntu trusty main
In addition:
'proposed:' may be used to enable the standard 'proposed'
pocket for the release.
'cloud:' may be used to activate official cloud archive pockets,
such as 'cloud:icehouse'
'distro' may be used as a noop
@param key: A key to be added to the system's APT keyring and used
to verify the signatures on packages. Ideally, this should be an
ASCII format GPG public key including the block headers. A GPG key
id may also be used, but be aware that only insecure protocols are
available to retrieve the actual public key from a public keyserver
placing your Juju environment at risk. ppa and cloud archive keys
are securely added automtically, so sould not be provided.
"""
if source is None:
log('Source is not present. Skipping')
return
if (source.startswith('ppa:') or
source.startswith('http') or
source.startswith('deb ') or
source.startswith('cloud-archive:')):
subprocess.check_call(['add-apt-repository', '--yes', source])
elif source.startswith('cloud:'):
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
fatal=True)
pocket = source.split(':')[-1]
if pocket not in CLOUD_ARCHIVE_POCKETS:
raise SourceConfigError(
'Unsupported cloud: source option %s' %
pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
elif source == 'proposed':
release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
elif source == 'distro':
pass
else:
log("Unknown source: {!r}".format(source))
if key:
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
with NamedTemporaryFile('w+') as key_file:
key_file.write(key)
key_file.flush()
key_file.seek(0)
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
else:
# Note that hkp: is in no way a secure protocol. Using a
# GPG key id is pointless from a security POV unless you
# absolutely trust your network and DNS.
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv',
key])
def configure_sources(update=False,
sources_var='install_sources',
keys_var='install_keys'):
"""
Configure multiple sources from charm configuration.
The lists are encoded as yaml fragments in the configuration.
The frament needs to be included as a string. Sources and their
corresponding keys are of the types supported by add_source().
Example config:
install_sources: |
- "ppa:foo"
- "http://example.com/repo precise main"
install_keys: |
- null
- "a1b2c3d4"
Note that 'null' (a.k.a. None) should not be quoted.
"""
sources = safe_load((config(sources_var) or '').strip()) or []
keys = safe_load((config(keys_var) or '').strip()) or None
if isinstance(sources, six.string_types):
sources = [sources]
if keys is None:
for source in sources:
add_source(source, None)
else:
if isinstance(keys, six.string_types):
keys = [keys]
if len(sources) != len(keys):
raise SourceConfigError(
'Install sources and keys lists are different lengths')
for source, key in zip(sources, keys):
add_source(source, key)
if update:
apt_update(fatal=True)
def install_remote(source, *args, **kwargs):
"""
Install a file tree from a remote source
The specified source should be a url of the form:
scheme://[host]/path[#[option=value][&...]]
Schemes supported are based on this modules submodules.
Options supported are submodule-specific.
Additional arguments are passed through to the submodule.
For example::
dest = install_remote('http://example.com/archive.tgz',
checksum='deadbeef',
hash_type='sha1')
This will download `archive.tgz`, validate it using SHA1 and, if
the file is ok, extract it and return the directory in which it
was extracted. If the checksum fails, it will raise
:class:`charmhelpers.core.host.ChecksumError`.
"""
# We ONLY check for True here because can_handle may return a string
# explaining why it can't handle a given source.
handlers = [h for h in plugins() if h.can_handle(source) is True]
installed_to = None
for handler in handlers:
try:
installed_to = handler.install(source, *args, **kwargs)
except UnhandledSource:
pass
if not installed_to:
raise UnhandledSource("No handler found for source {}".format(source))
return installed_to
def install_from_config(config_var_name):
charm_config = config()
source = charm_config[config_var_name]
return install_remote(source)
def plugins(fetch_handlers=None):
if not fetch_handlers:
fetch_handlers = FETCH_HANDLERS
plugin_list = []
for handler_name in fetch_handlers:
package, classname = handler_name.rsplit('.', 1)
try:
handler_class = getattr(
importlib.import_module(package),
classname)
plugin_list.append(handler_class())
except (ImportError, AttributeError):
# Skip missing plugins so that they can be ommitted from
# installation if desired
log("FetchHandler {} not found, skipping plugin".format(
handler_name))
return plugin_list
def _run_apt_command(cmd, fatal=False):
"""
Run an APT command, checking output and retrying if the fatal flag is set
to True.
:param: cmd: str: The apt command to run.
:param: fatal: bool: Whether the command's output should be checked and
retried.
"""
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
retry_count = 0
result = None
# If the command is considered "fatal", we need to retry if the apt
# lock was not acquired.
while result is None or result == APT_NO_LOCK:
try:
result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > APT_NO_LOCK_RETRY_COUNT:
raise
result = e.returncode
log("Couldn't acquire DPKG lock. Will retry in {} seconds."
"".format(APT_NO_LOCK_RETRY_DELAY))
time.sleep(APT_NO_LOCK_RETRY_DELAY)
else:
subprocess.call(cmd, env=env)
| 33.688679 | 84 | 0.639247 |
aceadf87854c601b5f67acce9e7cf178ffc9b984 | 103,918 | py | Python | src/interface/Python/paramonte/_pmreqs.py | ekourkchi/paramonte | 15f8ea27cb514078a94d9c4ee4b60e4f45826f17 | [
"MIT"
] | null | null | null | src/interface/Python/paramonte/_pmreqs.py | ekourkchi/paramonte | 15f8ea27cb514078a94d9c4ee4b60e4f45826f17 | [
"MIT"
] | null | null | null | src/interface/Python/paramonte/_pmreqs.py | ekourkchi/paramonte | 15f8ea27cb514078a94d9c4ee4b60e4f45826f17 | [
"MIT"
] | null | null | null | ####################################################################################################################################
####################################################################################################################################
####
#### MIT License
####
#### ParaMonte: plain powerful parallel Monte Carlo library.
####
#### Copyright (C) 2012-present, The Computational Data Science Lab
####
#### This file is part of the ParaMonte library.
####
#### Permission is hereby granted, free of charge, to any person obtaining a
#### copy of this software and associated documentation files (the "Software"),
#### to deal in the Software without restriction, including without limitation
#### the rights to use, copy, modify, merge, publish, distribute, sublicense,
#### and/or sell copies of the Software, and to permit persons to whom the
#### Software is furnished to do so, subject to the following conditions:
####
#### The above copyright notice and this permission notice shall be
#### included in all copies or substantial portions of the Software.
####
#### THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#### EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#### MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#### IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
#### DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
#### OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
#### OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
####
#### ACKNOWLEDGMENT
####
#### ParaMonte is an honor-ware and its currency is acknowledgment and citations.
#### As per the ParaMonte library license agreement terms, if you use any parts of
#### this library for any purposes, kindly acknowledge the use of ParaMonte in your
#### work (education/research/industry/development/...) by citing the ParaMonte
#### library as described on this page:
####
#### https://github.com/cdslaborg/paramonte/blob/master/ACKNOWLEDGMENT.md
####
####################################################################################################################################
####################################################################################################################################
import os
import sys
import copy
import typing as tp
import numpy as np
import _paramonte as pm
import warnings
Struct = pm.Struct
newline = pm.newline
verificationStatusFilePath = os.path.join( pm.path.auxil, ".verificationEnabled" )
####################################################################################################################################
buildInstructionNoteWindows = ""
buildInstructionNoteUnix = ("If your platform is non-Windows and is compatible with the " + newline
+ "GNU Compiler Collection (GCC), you can also build the required " + newline
+ "ParaMonte kernel's shared object files on your system by calling " + newline
+ "the ParaMonte's build() method from within your Python session, " + newline
+ newline
+ " import paramonte as pm" + newline
+ " pm.build()"
)
####################################################################################################################################
#### verify
####################################################################################################################################
def verify(reset = True):
"""
checks (or rechecks) the requirements of the installed ParaMonte library.
**Usage**
.. code-block:: python
import paramonte as pm
pm.verify()
**Parameters**
reset
A boolean whose default value is ``True``. If ``True``,
a thorough verification of the existence of the required
libraries will performed, as if it is the first ParaMonte
module import.
**Returns**
None
"""
if not isinstance(reset,bool):
raise Exception ( newline
+ "The input argument reset must be a logical (boolean) value: True or False"
+ newline
)
#### require Python >3.6 for type hints
_MIN_PYTHON = (3,6)
if sys.version_info < _MIN_PYTHON:
sys.exit("Python %s.%s or newer is required for ParaMonte. Please install the latest version of Python.\n" % _MIN_PYTHON)
#### ensure messages are printed only for the first-time import
if reset: writeVerificationStatusFile("True")
with open(verificationStatusFilePath, "r") as verificationStatusFile:
verificationEnabledString = verificationStatusFile.readline()
if verificationEnabledString=="False":
verificationEnabled = False
elif verificationEnabledString=="True" or verificationEnabledString=="Testing":
verificationEnabled = True
else:
raise Exception ( newline
+ "The internal settings of the ParaMonte library appear to have been messed up " + newline
+ "potentially by the user, the operating system, Python, or other applications. " + newline
+ "Please reinstall ParaMonte by typing the following commands " + newline
+ "on a Python-aware command-line interface: " + newline
+ newline
+ " pip uninstall paramonte" + newline
+ " pip install --user --upgrade paramonte" + newline
+ newline
)
if verificationEnabled:
#### ensure 64-bit architecture
if (pm.platform.arch=="x64") and (pm.platform.isWin32 or pm.platform.isLinux or pm.platform.isMacOS):
displayParaMonteBanner()
#### display dependency version message
displayDependencyVersionMessage()
#### verify module dependencies
#### On some systems like TACC, the matplotlib causes segmentation fault that is not controllable in any way.
#### This is apparently a bug in the older versions of matplotlib. Until it is fully resolved, the following
#### dependency version check is commented out.
# verifyDependencyVersion()
#### library path
if not pm.platform.isWin32: setupUnixPath()
#### search for the MPI library
mpi = findMPI()
if mpi.install.found and not mpi.path.broken:
writeVerificationStatusFile("False")
else:
if mpi.install.found and mpi.path.broken:
msg=( "An MPI library installation appears to exist on your system, however, " + newline
+ "some components of the library appear to be missing, or the environmental " + newline
+ "path to the MPI library installation is corrupted. You can inspect the " + newline
+ "contents of the environmental path variable for potential path " + newline
+ "corruptions by typing, " + newline
+ newline
+ " import os" + newline
+ " os.environ[\"PATH\"]" + newline
+ newline
+ "on your Python command line. If you or the ParaMonte library (on your behalf) " + newline
+ "have already successfully installed an MPI library on your system, " + newline
+ "you can safely ignore this warning and avoid further reinstallation of the " + newline
+ "MPI library. Otherwise, you can continue to reinstall the MPI library."
)
else:
msg=( "The MPI runtime libraries for 64-bit architecture could not be detected " + newline
+ "on your system. The MPI runtime libraries are required for the parallel " + newline
+ "ParaMonte simulations."
)
pm.note ( msg = msg + newline
+ "For Windows and Linux operating systems, you can download and install the " + newline
+ "Intel MPI runtime libraries, free of charge, from the Intel website, " + newline
+ newline
+ " " + pm.website.intel.mpi.home.url + newline
+ newline
+ "For macOS (Darwin), you can download and install the Open-MPI library. " + newline
+ newline
+ " " + pm.website.openmpi.home.url + newline
+ newline
+ "Alternatively, the ParaMonte library can automatically install these " + newline
+ "libraries for you now. If you don't know how to download and install the " + newline
+ "correct MPI runtime library version, we strongly recommend that you let the " + newline
+ "ParaMonte library to install this library for you. If so, ParaMonte will need " + newline
+ "access to the World-Wide-Web to download the library and will need your " + newline
+ "administrative permission to install it on your system. Therefore, if " + newline
+ "you have any active firewall on your system such as ZoneAlarm, please " + newline
+ "make sure your firewall allows ParaMonte to access the Internet."
, marginTop = 1
, marginBot = 1
, methodName = pm.names.paramonte
)
if verificationEnabledString=="Testing":
writeVerificationStatusFile("False")
else:
isYes = getUserResponse( msg = "\n Do you wish to download and install the MPI runtime library "
"\n (only needed for parallel simulations) on your system now (y/n)? "
)
if isYes:
installMPI()
writeVerificationStatusFile("Testing")
else:
pm.note ( msg = "Skipping the MPI library installation... " + newline
+ "It is now the user's responsibility to ensure an MPI runtime library " + newline
+ "exists on the system for parallel simulations. " + newline
#+ "If you ever wish to install the MPI libraries via ParaMonte again, " + newline
#+ "you can try: " + newline
#+ newline
#+ " import paramonte as pm" + newline
#+ " pm.verify()" + newline
#+ newline
+ "For more information visit: " + newline
+ newline
+ " " + pm.website.home.url
, marginTop = 1
, marginBot = 1
, methodName = pm.names.paramonte
)
writeVerificationStatusFile("False")
dispFinalMessage()
else:
warnForUnsupportedPlatform()
build()
return None
####################################################################################################################################
#### getUserResponse
####################################################################################################################################
def getUserResponse(msg=""):
while True:
answer = input(msg)
if answer.lower()=="y":
return True
elif answer.lower()=="n":
return False
else:
print("Invalid answer. Please enter either y or n, then press enter.")
continue
####################################################################################################################################
#### download
####################################################################################################################################
def download(url,filePath):
import urllib.request
import shutil
with urllib.request.urlopen(url) as response, open(filePath, 'wb') as out_file: shutil.copyfileobj(response, out_file)
return None
####################################################################################################################################
#### warnForUnsupportedPlatform
####################################################################################################################################
def warnForUnsupportedPlatform():
pm.warn ( msg = "The ParaMonte sampler kernel is currently exclusively available on " + newline
+ "AMD64 (64-bit) architecture for Windows/Linux/Darwin Operating Systems (OS). " + newline
+ "Your system appears to be of a different architecture or OS. As a result, " + newline
+ "the core sampler routines of ParaMonte will not be available on your system. " + newline
+ "However, the generic Python interface of ParaMonte will be available on your " + newline
+ "system, which can be used for post-processing and visualization of the output " + newline
+ "files from already-performed ParaMonte simulations or other similar Monte " + newline
+ "Carlo simulations. There are ongoing efforts, right now as you read this " + newline
+ "message, to further increase the availability of ParaMonte library on a " + newline
+ "wider-variety of platforms and architectures. Stay tuned for updates by " + newline
+ "visiting, " + newline
+ newline
+ " " + pm.website.home.url + newline
+ newline
+ "That said, " + newline
+ newline
+ "if your platform is non-Windows and is compatible with the GNU Compiler " + newline
+ "Collection (GCC), you can also build the required ParaMonte kernel's " + newline
+ "shared object files on your system by calling ParaMonte module's " + newline
+ "build() function from within your Python environment."
, marginTop = 1
, marginBot = 1
, methodName = pm.names.paramonte
)
return None
####################################################################################################################################
#### getBashrcContents
####################################################################################################################################
def getBashrcContents():
bashrcPath = os.path.expanduser("~/.bashrc")
if os.path.isfile(bashrcPath):
with open(bashrcPath,"r") as bashrcFile:
bashrcContents = bashrcFile.read()
else:
bashrcContents = ""
with open(bashrcPath,"w") as bashrcFile:
pass
return bashrcContents
####################################################################################################################################
#### getBashProfileContents
####################################################################################################################################
def getBashProfileContents():
bashProfilePath = os.path.expanduser("~/.bash_profile")
bashProfileFileExists = os.path.isfile(bashProfilePath)
bashProfileContents = ""
if bashProfileFileExists:
with open(bashProfilePath,"r") as bashProfileFile:
bashProfileContents = bashProfileFile.read()
if ".bashrc" not in bashProfileContents:
with open(bashProfilePath,"a+") as bashProfileFile:
bashProfileFile.write("\n[ -f $HOME/.bashrc ] && . $HOME/.bashrc\n")
return bashProfileContents
####################################################################################################################################
#### setupUnixPath
####################################################################################################################################
def setupUnixPath():
bashrcContents = getBashrcContents()
dlibcmd = "export LD_LIBRARY_PATH=" + pm.path.lib + ":$LD_LIBRARY_PATH"
if dlibcmd not in bashrcContents:
os.system( "chmod 777 ~/.bashrc")
os.system( "chmod 777 ~/.bashrc && echo '' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '# >>> ParaMonte shared library setup >>>' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo 'if [ -z ${LD_LIBRARY_PATH+x} ]; then' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo ' export LD_LIBRARY_PATH=.' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo 'fi' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '" + dlibcmd + "' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '# <<< ParaMonte shared library setup <<<' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && sh ~/.bashrc" )
localInstallDir = getLocalInstallDir()
if localInstallDir.root is not None:
pathcmd = None
dlibcmd = None
if localInstallDir.gnu.bin is not None: pathcmd = "export PATH=" + localInstallDir.gnu.bin + ":$PATH"
if localInstallDir.gnu.lib is not None: dlibcmd = "export LD_LIBRARY_PATH=" + localInstallDir.gnu.lib + ":$LD_LIBRARY_PATH"
if (pathcmd is not None) or (dlibcmd is not None):
if (pathcmd not in bashrcContents) or (dlibcmd not in bashrcContents):
os.system( "chmod 777 ~/.bashrc")
os.system( "chmod 777 ~/.bashrc && echo '' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '# >>> ParaMonte local GNU installation setup >>>' >> ~/.bashrc" )
if pathcmd is not None:
if pathcmd not in bashrcContents:
os.system( "chmod 777 ~/.bashrc && echo 'if [ -z ${PATH+x} ]; then' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo ' export PATH=.' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo 'fi' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '" + pathcmd + "' >> ~/.bashrc" )
if dlibcmd is not None:
if dlibcmd not in bashrcContents:
os.system( "chmod 777 ~/.bashrc && echo 'if [ -z ${LD_LIBRARY_PATH+x} ]; then' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo ' export LD_LIBRARY_PATH=.' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo 'fi' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '" + dlibcmd + "' >> ~/.bashrc" )
if pathcmd not in bashrcContents or dlibcmd not in bashrcContents:
os.system( "chmod 777 ~/.bashrc && echo '# <<< ParaMonte local GNU installation setup <<<' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && sh ~/.bashrc" )
pathcmd = None
dlibcmd = None
if localInstallDir.mpi.bin is not None: pathcmd = "export PATH=" + localInstallDir.mpi.bin + ":$PATH"
if localInstallDir.mpi.lib is not None: dlibcmd = "export LD_LIBRARY_PATH=" + localInstallDir.mpi.lib + ":$LD_LIBRARY_PATH"
if (pathcmd is not None) or (dlibcmd is not None):
if (pathcmd not in bashrcContents) or (dlibcmd not in bashrcContents):
os.system( "chmod 777 ~/.bashrc")
os.system( "chmod 777 ~/.bashrc && echo '' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '# >>> ParaMonte local MPI installation setup >>>' >> ~/.bashrc" )
if pathcmd is not None:
if pathcmd not in bashrcContents:
os.system( "chmod 777 ~/.bashrc && echo 'if [ -z ${PATH+x} ]; then' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo ' export PATH=.' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo 'fi' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '" + pathcmd + "' >> ~/.bashrc" )
if dlibcmd is not None:
if dlibcmd not in bashrcContents:
os.system( "chmod 777 ~/.bashrc && echo 'if [ -z ${LD_LIBRARY_PATH+x} ]; then' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo ' export LD_LIBRARY_PATH=.' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo 'fi' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '" + dlibcmd + "' >> ~/.bashrc" )
if pathcmd not in bashrcContents or dlibcmd not in bashrcContents:
os.system( "chmod 777 ~/.bashrc && echo '# <<< ParaMonte local MPI installation setup <<<' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && sh ~/.bashrc" )
return None
####################################################################################################################################
#### getLocalInstallDir
####################################################################################################################################
def getLocalInstallDir():
localInstallDir = Struct()
localInstallDir.root = None
localInstallDir.mpi = Struct()
localInstallDir.mpi.root = None
localInstallDir.mpi.bin = None
localInstallDir.mpi.lib = None
localInstallDir.gnu = Struct()
localInstallDir.gnu.root = None
localInstallDir.gnu.bin = None
localInstallDir.gnu.lib = None
localInstallDir.caf = Struct()
localInstallDir.caf.root = None
localInstallDir.caf.bin = None
localInstallDir.caf.lib = None
pmGitRootDir = os.path.join( pm.path.root , "paramonte-master" )
if os.path.isdir(pmGitRootDir):
localInstallDir.root = pmGitRootDir
# mpi
_ = os.path.join( localInstallDir.root, "build", "prerequisites", "prerequisites", "installations", "mpich", "3.2" )
if os.path.isdir(_):
localInstallDir.mpi.root = _
_ = os.path.join( localInstallDir.mpi.root, "bin" )
if os.path.isdir(_): localInstallDir.mpi.bin = _
_ = os.path.join( localInstallDir.mpi.root, "lib" )
if os.path.isdir(_): localInstallDir.mpi.lib = _
# gnu
_ = os.path.join( localInstallDir.root, "build", "prerequisites", "prerequisites", "installations", "gnu", "8.3.0" )
if os.path.isdir(_):
localInstallDir.gnu.root = _
_ = os.path.join( localInstallDir.gnu.root, "bin" )
if os.path.isdir(_): localInstallDir.gnu.bin = _
_ = os.path.join( localInstallDir.gnu.root, "lib64" )
if os.path.isdir(_): localInstallDir.gnu.lib = _
# caf
_ = os.path.join( localInstallDir.root, "build", "prerequisites", "prerequisites", "installations", "opencoarrays", "2.8.0" )
if os.path.isdir(_):
localInstallDir.caf.root = _
_ = os.path.join( localInstallDir.caf.root, "bin" )
if os.path.isdir(_): localInstallDir.caf.bin = _
_ = os.path.join( localInstallDir.caf.root, "lib64" )
if os.path.isdir(_): localInstallDir.caf.lib = _
return localInstallDir
####################################################################################################################################
#### findMPI
####################################################################################################################################
def findMPI():
"""
Return a structure containing the paths to
different components of the MPI library.
"""
mpi = Struct()
mpi.path = Struct()
mpi.install = Struct()
mpi.install.bin = Struct()
mpi.install.bin.mpiexec = Struct()
mpi.install.bin.mpivars = Struct()
mpi.path.broken = False
mpi.install.found = False
mpi.install.bin.found = False
mpi.install.bin.path = None
mpi.install.bin.mpiexec.found = False
mpi.install.bin.mpiexec.path = None
mpi.install.bin.mpivars.found = False
mpi.install.bin.mpivars.path = None
if pm.platform.isWin32:
pathList = os.environ['PATH'].split(";")
for thisPath in pathList:
pathLower = thisPath.lower().replace("\\","")
if ("mpiintel64bin" in pathLower):
mpi.install.bin.found = True
mpi.install.bin.path = thisPath
mpiexecFilePath = os.path.join(mpi.install.bin.path,"mpiexec.exe")
if os.path.isfile(mpiexecFilePath):
mpi.install.bin.mpiexec.found = True
mpi.install.bin.mpiexec.path = mpiexecFilePath
mpivarsFilePath = os.path.join( thisPath, "mpivars.bat" )
if os.path.isfile(mpivarsFilePath):
mpi.install.bin.mpivars.found = True
mpi.install.bin.mpivars.path = mpivarsFilePath
mpivarsCommand = '"' + mpivarsFilePath + '"'
pm.note ( msg = "Intel MPI library for 64-bit architecture detected at: " + newline
+ newline
+ " " + thisPath + newline
+ newline
+ "To perform ParaMonte simulations in parallel on a single node, " + newline
+ "run the following two commands, in the form and order specified, " + newline
+ "on a Python-aware mpiexec-aware command-line interface such as " + newline
+ "Anaconda3 Windows command prompt: " + newline
+ newline
+ " " + mpivarsCommand + newline
+ newline
+ " mpiexec -localonly -n NUM_PROCESSES python main.py" + newline
+ newline
+ "where, " + newline
+ newline
+ " 0. the first command defines the essential environment variables, " + newline
+ " and the second command runs in the simulation in parallel, where, " + newline
+ " 1. you should replace NUM_PROCESSES with the number of processors " + newline
+ " you wish to assign to your simulation task and, " + newline
+ " 2. the flag '-localonly' indicates a parallel simulation on only " + newline
+ " a single node (this flag will obviate the need for the MPI " + newline
+ " library credentials registration). For more information, visit: " + newline
+ " " + pm.website.intel.mpi.windows.url + " " + newline
+ " 3. main.py is the Python file which serves as the entry point to " + newline
+ " your simulation, where you call the ParaMonte sampler routines. " + newline
+ newline
+ "Note that the above two commands must be executed on a command-line that " + newline
+ "recognizes both Python and mpiexec applications, such as the Anaconda " + newline
+ "command-line interface. For more information, in particular, on how " + newline
+ "to register to run Hydra services for multi-node simulations " + newline
+ "on Windows servers, visit: " + newline
+ newline
+ " " + pm.website.home.url
, marginTop = 1
, marginBot = 1
, methodName = pm.names.paramonte
)
setupFilePath = os.path.join( pm.path.auxil, "setup.bat" )
with open(setupFilePath, "w") as setupFile:
setupFile.write("@echo off\n")
setupFile.write("cd " + thisPath + " && mpivars.bat quiet\n")
setupFile.write("cd " + pm.path.root + "\n")
setupFile.write("@echo on\n")
mpi.install.found = mpi.install.bin.found and mpi.install.bin.mpiexec.found and mpi.install.bin.mpivars.found
if mpi.install.found: break
elif pm.platform.isLinux:
pathList = os.environ['PATH'].split(":")
for thisPath in pathList:
pathLower = thisPath.lower().replace("/","")
if ("linuxmpiintel64" in pathLower):
mpi.install.bin.found = True
mpi.install.bin.path = thisPath
mpiexecFilePath = os.path.join( mpi.install.bin.path, "mpiexec" )
if os.path.isfile(mpiexecFilePath):
mpi.install.bin.mpiexec.found = True
mpi.install.bin.mpiexec.path = mpiexecFilePath
mpivarsFilePath = os.path.join( thisPath, "mpivars.sh" )
if os.path.exists(mpivarsFilePath):
mpi.install.bin.mpivars.found = True
mpi.install.bin.mpivars.path = mpivarsFilePath
mpivarsCommand = '"' + mpivarsFilePath + '"'
pm.note ( msg = "Intel MPI library for 64-bit architecture detected at: " + newline
+ newline
+ " " + thisPath + newline
+ newline
+ "To perform ParaMonte simulations in parallel on a single node, " + newline
+ "run the following two commands, in the form and order specified, " + newline
+ "in a Bash shell (terminal), " + newline
+ newline
+ " source " + mpivarsCommand + newline
+ newline
+ " mpiexec -n NUM_PROCESSES python main.py" + newline
+ newline
+ "where, " + newline
+ newline
+ " 0. the first command defines the essential environment variables" + newline
+ " and the second command runs in the simulation in parallel, where," + newline
+ " 1. you should replace NUM_PROCESSES with the number of processors " + newline
+ " you wish to assign to your simulation task, " + newline
+ " 2. main.py is the Python file which serves as the entry point to " + newline
+ " your simulation, where you call ParaMonte sampler routines. " + newline
+ newline
+ "For more information on how to install and use and run parallel " + newline
+ "ParaMonte simulations on Linux systems, visit: " + newline
+ newline
+ " " + pm.website.home.url
, marginTop = 1
, marginBot = 1
, methodName = pm.names.paramonte
)
try:
setupFilePath = os.path.join( pm.path.auxil, "setup.sh" )
with open(setupFilePath, "w") as setupFile:
setupFile.write("source " + mpivarsCommand)
except:
pm.warn ( msg = "Failed to create the MPI setup file. " + newline
+ "It looks like the ParaMonte library directory is read-only. " + newline
+ "This can be potentially problematic. Skipping for now..."
, marginTop = 1
, marginBot = 1
, methodName = pm.names.paramonte
)
mpi.install.found = mpi.install.bin.found and mpi.install.bin.mpiexec.found and mpi.install.bin.mpivars.found
if mpi.install.found: break
elif pm.platform.isMacOS:
import shutil
gfortranPath = None
try:
import subprocess
gfortranVersion = subprocess.run(args=["gfortran", "--version"],capture_output=True)
if "GCC 10." in str(gfortranVersion.stdout): gfortranPath = shutil.which("gfortran")
except:
warnings.warn("Failed to capture the GNU Compiler Collection version...")
mpi.install.bin.mpiexec.path = None
try:
import subprocess
mpiexecVersion = subprocess.run(args=["mpiexec", "--version"],capture_output=True)
if "open-mpi" in str(mpiexecVersion.stdout): mpi.install.bin.mpiexec.path = shutil.which("mpiexec")
except:
warnings.warn("Failed to capture the mpiexec version...")
if (mpi.install.bin.mpiexec.path is not None) and (gfortranPath is not None):
mpi.install.bin.found = True
mpi.install.bin.mpiexec.found = True
mpi.install.bin.mpivars.found = True # dummy
mpi.install.bin.path = os.path.dirname(mpi.install.bin.mpiexec.path)
pm.note ( msg = "MPI runtime libraries detected at: " + newline
+ newline
+ " " + mpi.install.bin.path + newline
+ newline
+ "To perform ParaMonte simulations in parallel on a single node, " + newline
+ "run the following command, in the form and order specified, " + newline
+ "in a Bash shell (terminal), " + newline
+ newline
+ " mpiexec -n NUM_PROCESSES python main.py" + newline
+ newline
+ "where, " + newline
+ newline
+ " 0. the first command defines the essential environment variables " + newline
+ " and the second command runs in the simulation in parallel, where, " + newline
+ " 1. you should replace NUM_PROCESSES with the number of processors " + newline
+ " you wish to assign to your simulation task, " + newline
+ " 2. main.py is the Python file which serves as the entry point to " + newline
+ " your simulation, where you call the ParaMonte sampler routines. " + newline
+ newline
+ "For more information on how to install and use and run parallel ParaMonte " + newline
+ "simulations on the macOS (Darwin) operating systems, visit:" + newline
+ newline
+ pm.website.home.url
, marginTop = 1
, marginBot = 1
, methodName = pm.names.paramonte
)
mpi.install.found = mpi.install.bin.found and mpi.install.bin.mpiexec.found and mpi.install.bin.mpivars.found
else:
LocalInstallDir = getLocalInstallDir()
if (LocalInstallDir.mpi.bin is not None) and (LocalInstallDir.mpi.lib is not None):
mpi.install.bin.found = True
mpi.install.bin.path = LocalInstallDir.mpi.bin
mpiexecFilePath = os.path.join(mpi.install.bin.path,"mpiexec")
if os.path.isfile(mpiexecFilePath):
mpi.install.bin.mpiexec.found = True
mpi.install.bin.mpiexec.path = mpiexecFilePath
mpi.install.bin.mpivars.found = mpi.install.bin.found and mpi.install.bin.mpiexec.found # dummy
mpi.install.found = mpi.install.bin.found and mpi.install.bin.mpiexec.found and mpi.install.bin.mpivars.found
#### one last try to find the MPI library if not found yet
if not mpi.install.found:
mpi.path.broken = True
if pm.platform.isLinux:
defaultIntelLinuxMpiPath = getDefaultIntelLinuxMpiPath()
if defaultIntelLinuxMpiPath.mpiRootDirNotFound:
return mpi
else:
mpi.install.found = True
pm.warn ( msg = "The PATH environmental variable of your Bash terminal does not point to " + newline
+ "any current installation of the Intel MPI runtime libraries on your system, " + newline
+ "however, ParaMonte has detected a hidden installation of the Intel MPI " + newline
+ "runtime libraries on your system at, " + newline
+ newline
+ " " + defaultIntelLinuxMpiPath.mpiDefaultRootDirList[-1] + newline
+ newline
+ "Include this path to your terminal's PATH environmental variable to ensure " + newline
+ "the MPI runtime libraries will be properly detected in the future."
, marginTop = 1
, marginBot = 1
, methodName = pm.names.paramonte
)
# mpi.install.bin.path = setupIntelLinuxMpiPath(defaultIntelLinuxMpiPath)
elif pm.platform.isWin32:
pm.warn ( msg = "Failed to detect the Intel MPI library for 64-bit architecture." + newline
+ "Now searching through the installed applications..." + newline
+ "This may take some time..."
, marginTop = 1
, marginBot = 1
, methodName = pm.names.paramonte
)
import subprocess
installedApp = str(subprocess.run(args=["wmic","product","get","Name,","Version"],capture_output=True).stdout)
if "Intel MPI" in installedApp:
mpi.install.found = True
pm.note ( msg = "Possible Intel MPI installation detected:"
, marginTop = 0
, marginBot = 1
, methodName = pm.names.paramonte
)
installedAppList = str(installedApp).replace("\\r","").split("\\n")
for app in installedAppList:
appClean = app.replace(chr(13),"").replace(chr(10),"") # remove cr, nl
if "Intel MPI" in appClean:
pm.note ( msg = appClean
, marginTop = 0
, marginBot = 0
, methodName = pm.names.paramonte
)
return mpi
####################################################################################################################################
#### getPrereqs
####################################################################################################################################
def getPrereqs(DependencyList = None):
prereqs = Struct()
prereqs.mpi = Struct()
prereqs.mpi.intel = Struct()
prereqs.list = getDependencyList() if DependencyList is None else DependencyList
if pm.platform.isLinux:
intelMpiFilePrefix, intelMpiFileSuffix = "l_mpi-rt_" , ".tgz"
elif pm.platform.isWin32:
intelMpiFilePrefix, intelMpiFileSuffix = "w_mpi-rt_p_" , ".exe"
else:
return prereqs
for dependency in prereqs.list:
fullFilePath = os.path.join( pm.path.lib, dependency )
if intelMpiFilePrefix in dependency and intelMpiFileSuffix in dependency:
prereqs.mpi.intel.fullFileName = dependency
prereqs.mpi.intel.fullFilePath = fullFilePath
prereqs.mpi.intel.fileName = prereqs.mpi.intel.fullFileName.split(intelMpiFileSuffix)[0]
prereqs.mpi.intel.version = prereqs.mpi.intel.fileName.split(intelMpiFilePrefix)[1]
return prereqs
####################################################################################################################################
#### getDefaultIntelLinuxMpiPath
####################################################################################################################################
def getDefaultIntelLinuxMpiPath(prereqs = None):
if prereqs is None: prereqs = getPrereqs()
mpiPath = Struct()
mpiPath.mpiDefaultRootDirList = []
mpiPath.mpiRootDirNotFound = True
mpiPath.mpivarsDefaultFilePathList = []
mpiPath.installationRootDirList = [ "/opt", pm.path.home ]
mpiPath.mpiTrunkDir = os.path.join("intel", "compilers_and_libraries_" + prereqs.mpi.intel.version, "linux", "mpi", "intel64")
for installationRootDir in mpiPath.installationRootDirList:
mpiPath.mpiDefaultRootDirList.append( os.path.join(installationRootDir, mpiPath.mpiTrunkDir) )
mpiPath.mpivarsDefaultFilePathList.append( os.path.join(mpiPath.mpiDefaultRootDirList[-1],"bin","mpivars.sh") )
if os.path.isdir(mpiPath.mpiDefaultRootDirList[-1]):
mpiPath.mpiRootDirNotFound = False
break
return mpiPath
####################################################################################################################################
#### getDependencyList
####################################################################################################################################
def getDependencyList():
fileName = ".dependencies_";
if pm.platform.isWin32: fileName = fileName + "windows"
if pm.platform.isMacOS: fileName = fileName + "macos"
if pm.platform.isLinux: fileName = fileName + "linux"
with open(os.path.join(pm.path.auxil, fileName), "r") as depFile: lines = depFile.read().splitlines()
dependencyList = []
for count,item in enumerate(lines):
if item[0]!="!": dependencyList.append(item) # remove comment lines
return dependencyList
####################################################################################################################################
#### installMPI
####################################################################################################################################
def installMPI():
if pm.platform.isWin32 or pm.platform.isLinux:
pm.note ( msg = "Downloading the ParaMonte parallel library prerequisites... " + newline
+ "Please make sure your firewall allows access to the Internet. "
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
prereqs = getPrereqs()
for dependency in prereqs.list:
fullFilePath = os.path.join(pm.path.lib, dependency)
thisVersion = pm.version.kernel.dump()
while thisVersion is not None:
try:
download( url = pm.website.github.release.url + "/download/" + thisVersion + "/" + dependency
, filePath = fullFilePath
)
break
except:
thisVersion = getPreviousVersion(thisVersion)
if thisVersion is None:
pm.warn ( msg = "Exhausted all releases of the ParaMonte library in search " + newline
+ "of the prerequisites, but could not find: " + dependency + newline
+ "Please report this issue at " + newline
+ newline
+ " " + pm.website.github.issues.url + newline
+ newline
+ "In the meantime, visit, " + newline
+ newline
+ " " + pm.website.home.url + newline
+ newline
+ "for instructions to manually install the MPI library on your " + newline
+ "system. Aborting the automatic MPI installation by ParaMonte..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
return
pm.note ( msg = "Installing the Intel MPI library for 64-bit architecture... " + newline
+ "file location: " + prereqs.mpi.intel.fullFilePath
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
pm.warn ( msg = "Please do not change the default installation location of " + newline
+ "the MPI library suggested by the installer. If you do change " + newline
+ "the default path, the onus will be on you to ensure the path " + newline
+ "to the MPI runtime libraries exist in the environmental PATH " + newline
+ "variable of your session."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
currentDir = os.getcwd()
if pm.platform.isWin32:
err = 0
err = os.system(prereqs.mpi.intel.fullFilePath)
if err==0:
#writeVerificationStatusFile("Testing")
pm.note ( msg = "Intel MPI library installation appears to have succeeded. " + newline
+ "Now close your Python environment and the command-line interface " + newline
+ "and reopen a new fresh (Anaconda) command prompt."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
else:
pm.warn ( msg = "Intel MPI library installation might have failed. Exit flag: {}.".format(err)
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
if pm.platform.isLinux:
try:
import tarfile
tf = tarfile.open(prereqs.mpi.intel.fullFilePath)
tf.extractall(path=pm.path.lib)
mpiExtractDir = os.path.join(pm.path.lib, prereqs.mpi.intel.fileName)
pm.note ( msg = "If this is your personal computer and you have opened your Python " + newline
+ "session with superuser (sudo) privileges, then you can choose " + newline
+ newline
+ " 'install as root'" + newline
+ newline
+ "in the graphical user interface that appears in your session. " + newline
+ "Otherwise, if you are using the ParaMonte library on a public " + newline
+ "server, for example, on a supercomputer, or you do not have " + newline
+ "superuser (sudo) privileges on your system, then choose " + newline
+ "the third option: " + newline
+ newline
+ " 'install as current user'"
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
mpiInstallScriptPath = os.path.join( mpiExtractDir, "install_GUI.sh" )
if not os.path.exists(mpiInstallScriptPath):
pm.abort( msg = "Internal error occurred." + newline
+ "Failed to detect the Intel MPI installation Bash script." + newline
+ "Please report this issue at " + newline
+ newline
+ " " + pm.website.github.issues.url + newline
+ newline
+ "Visit " + pm.website.home.url + " for instructions " + newline
+ "to build ParaMonte object files on your system."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
except Exception as e:
print(str(e))
pm.abort( msg = "Unzipping of Intel MPI runtime library tarball failed." + newline
+ "Make sure you have tar software installed on your system and try again."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
err = os.system("chmod +x " + mpiInstallScriptPath)
if err != 0:
pm.warn ( msg = "The following action failed: " + newline
+ newline
+ " chmod +x " + mpiInstallScriptPath + newline
+ newline
+ "skipping..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
os.chdir(mpiExtractDir)
import subprocess
try:
subprocess.check_call( mpiInstallScriptPath, shell = True )
except Exception as e:
print(str(e))
pm.abort ( msg = "Intel MPI runtime libraries installation for " + newline
+ "64-bit architecture appears to have failed." + newline
+ "Please report this error at:" + newline
+ newline
+ " " + pm.website.github.issues.url + newline
+ newline
+ "Visit " + pm.website.home.url + " for more instructions " + newline
+ "to build and use the ParaMonte library on your system."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
pm.note ( msg = "Intel MPI runtime libraries installation for " + newline
+ "64-bit architecture appears to have succeeded. " + newline
+ "Searching for the MPI runtime environment setup file..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
os.chdir(currentDir)
setupFilePath = os.path.join( pm.path.auxil, "setup.sh" )
installationRootDirList = [ "/opt", pm.path.home ]
mpivarsDefaultFilePathList = ["",""]
mpiRootDir = ["",""]
mpiRootDirNotFound = True
while mpiRootDirNotFound:
mpiRootDir = []
mpivarsDefaultFilePathList = []
mpiTrunkDir = os.path.join( "intel", "compilers_and_libraries_" + prereqs.mpi.intel.version, "linux", "mpi", "intel64" )
for installationRootDir in installationRootDirList:
mpiRootDir.append( os.path.join( installationRootDir, mpiTrunkDir ) )
mpivarsDefaultFilePathList.append( os.path.join( mpiRootDir[-1] , "bin" , "mpivars.sh" ) )
if os.path.isdir(mpiRootDir[-1]):
mpiRootDirNotFound = False
break
if mpiRootDirNotFound:
pm.warn ( msg = "Failed to detect the installation root path for Intel MPI runtime " + newline
+ "libraries for 64-bit architecture on your system. If you specified " + newline
+ "a different installation root path at the time of installation, " + newline
+ "please copy and paste it below. Note that the installation root " + newline
+ "path is part of the path that replaces: " + newline
+ newline
+ " " + "opt" + newline
+ newline
+ "in the following path: " + newline
+ newline
+ " " + os.path.join( "opt" , mpiTrunkDir )
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
answer = input ( "\n Please type the root path of MPI installation below and press ENTER."
+ "\n If you don't know the root path, simply press ENTER to quit:\n"
)
if len(answer.strip())==0:
pm.warn ( msg = "Skipping the MPI runtime library environmental path setup..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
break
else:
installationRootDirList = [ answer ]
continue
if mpiRootDirNotFound:
pm.warn ( msg = "Failed to find the MPI runtime environment setup file on your system. " + newline
+ "This is highly unusual. Normally, Intel MPI libraries is installed " + newline
+ "in the following directory: " + newline
+ newline
+ " " + mpiRootDir[0] + newline
+ newline
+ "or," + newline
+ newline
+ " " + mpiRootDir[1] + newline
+ newline
+ "If you cannot manually find the Intel MPI installation directory," + newline
+ "it is likely that the installation might have somehow failed. " + newline
+ "If you do find the installation directory, try to locate the " + newline
+ "'mpivars.sh' file which is normally installed in the following path:" + newline
+ newline
+ " " + mpivarsDefaultFilePathList[0] + newline
+ newline
+ "or, " + newline
+ newline
+ " " + mpivarsDefaultFilePathList[1] + newline
+ newline
+ "Before attempting to run any parallel ParaMonte simulation, " + newline
+ "make sure you source this file, like the following: " + newline
+ newline
+ " source " + mpivarsDefaultFilePathList[0] + newline
+ newline
+ "or, " + newline
+ newline
+ " source " + mpivarsDefaultFilePathList[1] + newline
+ newline
+ "where you will have to replace the path in the above with the " + newline
+ "correct path that you find on your system."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
else:
mpiBinDir = os.path.join( mpiRootDir[-1], "bin" )
mpiLibDir = os.path.join( mpiRootDir[-1], "lib" )
mpivarsFilePath = os.path.join( mpiBinDir, "mpivars.sh" )
if os.path.isfile(mpivarsFilePath):
with open(setupFilePath, "w") as setupFile:
setupFile.write(mpiBinDir+"\n")
setupFile.write(mpiLibDir+"\n")
setupFile.write("source " + mpivarsFilePath)
pm.note ( msg = "To ensure all MPI routine environmental variables \n"
+ "are properly loaded, source the following Bash script \n"
+ "in your Bash environment before calling mpiexec, like:\n\n"
+ " source " + mpivarsFilePath + "\n\n"
+ "Alternatively, ParaMonte can also automatically add \n"
+ "the required script to your '.bashrc' file, so that \n"
+ "all required MPI environmental variables are loaded \n"
+ "automatically before any ParaMonte usage from any \n"
+ "Bash command line on your system."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
#isYes = getUserResponse ( msg = "\n MPI runtime variables are essential for parallel ParaMonte"
# "\n simulation. Would you like ParaMonte to add the MPI runtime"
# "\n environmental variables to your Bash environment (y/n)? "
# )
#
#if isYes:
bashrcContents = getBashrcContents()
mpivarsFileCommand = "source " + mpivarsFilePath
if mpivarsFileCommand not in bashrcContents:
os.system( "chmod 777 ~/.bashrc")
os.system( "chmod 777 ~/.bashrc && echo '' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '# >>> ParaMonte MPI runtime library initialization >>>' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '" + mpivarsFileCommand + "' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '# <<< ParaMonte MPI runtime library initialization <<<' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && sh ~/.bashrc" )
pm.note ( msg = "If you intend to run parallel simulations right now,\n"
+ "we highly recommned you to close your current shell environment\n"
+ "and open a new Bash shell environment. This is to ensure that all MPI\n"
+ "library environmental variables are properly set in your shell environment."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
#else:
# pm.warn ( msg = "skipping...\n"
# + "It is now your responsibility to ensure that the MPI runtime \n"
# + "environmental variables in your Bash environment are properly \n"
# + "set up before attempting to run any parallel ParaMonte simulation. \n"
# + "You can do so by running the following command in every Bash session:\n\n"
# + " source " + mpivarsFilePath + "\n\n"
# + "Alternatively, ParaMonte can also automatically add \n"
# + "the required script to your '.bashrc' file, so that \n"
# + "all required MPI environmental variables are loaded \n"
# + "automatically before any ParaMonte usage from any \n"
# + "Bash command line on your system."
# , methodName = pm.names.paramonte
# , marginTop = 1
# , marginBot = 1
# )
else:
pm.abort ( msg = "ParaMonte was able to detect an MPI library path on your system, however,\n"
+ "the MPI installation appears to be corrupted. The required mpivars.sh \n"
+ "does not exist:\n\n"
+ mpivarsFilePath
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
elif pm.platform.isMacOS:
pm.warn ( msg = "To use the ParaMonte kernel routines in parallel on macOS, " + newline
+ "the Open-MPI library will have to be installed on your system. " + newline
#+ "To ensure full consistency, we recommend building the parallel " + newline
#+ "object files of ParaMonte library on your system along with Open-MPI." + newline
+ newline
+ "If this installation of the prerequisites is being done from within " + newline
+ "a Jupyter notebook and the installation fails:" + newline
+ newline
+ " 1. quit the Jupyter notebook." + newline
+ " 2. enter an IPython session on the command-prompt:" + newline
+ " - On Windows, use Anaconda3 command-prompt." + newline
+ " - On Linux / macOS, use the Bash terminal." + newline
+ " 3. import paramonte as pm" + newline
+ " 4. pm.verify()" + newline
+ newline
+ "Building the ParaMonte library prerequisites on your system..."
, marginTop = 1
, marginBot = 1
, methodName = pm.names.paramonte
)
_ = buildParaMontePrereqsForMac()
else:
pm.warn ( msg = "To use ParaMonte in parallel on this unknown Operating System, " + newline
+ "ParaMonte needs to be built from scratch on your system. " + newline
+ "Building ParaMonte library prerequisites on your system..."
, marginTop = 1
, marginBot = 1
, methodName = pm.names.paramonte
)
build()
####################################################################################################################################
#### buildParaMontePrereqsForMac
####################################################################################################################################
def buildParaMontePrereqsForMac():
pm.note ( msg = "Checking if Homebrew exists on your system..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
import shutil
import subprocess
if shutil.which("brew") is None:
pm.note ( msg = "Failed to detect Homebrew on your system. Installing Homebrew..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
err1 = os.system('xcode-select --install')
if err1 != 0 and not os.path.isdir( subprocess.check_output(['xcode-select','-p']).decode('utf-8').replace("\n","").replace(chr(13),"") ):
pm.warn ( msg = getMacosInstallHelpMsg("xcode-select")
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
return False
#err2 = os.system('ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"')
err2 = os.system('/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"')
err3 = os.system('brew --version')
if err2 != 0 or err3 != 0:
pm.warn ( msg = getMacosInstallHelpMsg("Homebrew")
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
return False
#### cmake
cmakeInstallationNeeded = False
cmakePath = shutil.which("cmake")
if cmakePath is None:
cmakeInstallationNeeded = True
pm.note ( msg = "cmake installation is missing on your system."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
else:
pm.note ( msg = "cmake installation detected at: " + cmakePath + newline + "Checking cmake version..."
, methodName = pm.names.paramonte
, marginTop = 0
, marginBot = 0
)
try:
cmakeVersion = str(subprocess.run(args=["cmake","--version"],capture_output=True).stdout).split(" ")[2].split("-")[0]
cmakeVersionList = cmakeVersion.split(".")
pm.note ( msg = "current cmake version: " + cmakeVersion
, methodName = pm.names.paramonte
, marginTop = 0
, marginBot = 0
)
if int(cmakeVersionList[0])>=3 and int(cmakeVersionList[1])>=14:
pm.note ( msg = "cmake version is ParaMonte-compatible!"
, methodName = pm.names.paramonte
, marginTop = 0
, marginBot = 0
)
else:
cmakeInstallationNeeded = True
pm.note ( msg = "cmake version is NOT ParaMonte-compatible."
, methodName = pm.names.paramonte
, marginTop = 0
, marginBot = 0
)
except:
cmakeInstallationNeeded = True
pm.note ( msg = "Failed to detect the current cmake installation version. skipping..."
, methodName = pm.names.paramonte
, marginTop = 0
, marginBot = 0
)
if cmakeInstallationNeeded:
pm.note ( msg = "Installing cmake..."
, methodName = pm.names.paramonte
, marginTop = 0
, marginBot = 0
)
err1 = os.system("brew install cmake")
err2 = os.system("brew link --overwrite cmake")
if err1 != 0 or err2 != 0:
pm.warn ( msg = "cmake installation or linking failed."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
return False
cmakeVersionList = str(subprocess.run(args=["cmake","--version"],capture_output=True).stdout).split(" ")[2].split("-")[0].split(".")
if int(cmakeVersionList[0])>=3 and int(cmakeVersionList[1])>=14:
pm.note ( msg = "cmake installation succeeded."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
else:
pm.warn ( msg = getMacosInstallHelpMsg("cmake")
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
return False
#### gnu
pm.note ( msg = "Installing GNU Compiler Collection..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
err1 = os.system("brew install gcc@10")
err2 = os.system("brew link gcc@10")
if err1 != 0 or err2 != 0:
pm.warn ( msg = getMacosInstallHelpMsg("GNU Compiler Collection")
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
return False
#### open-mpi
pm.note ( msg = "Installing Open-MPI..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
err1 = os.system("brew install open-mpi")
err2 = os.system("brew link open-mpi")
if err1 != 0 or err2 != 0:
pm.warn ( msg = getMacosInstallHelpMsg("Open-MPI")
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
return False
return True
####################################################################################################################################
#### getMacosInstallHelpMsg
####################################################################################################################################
def getMacosInstallHelpMsg(app = ""):
msg = ("Failed to install and link the application " + app + " on your " + newline
+ "system. The " + app + " application is required to install and " + newline
+ "build the ParaMonte components and prerequisites. " + newline
+ newline
+ "If you are performing this installation from within a Jupyter " + newline
+ "Notebook, then, reinstalling from within an ipython environment or " + newline
+ "the python command line (instead of Jupyter Notebook) will likely " + newline
+ "resolve the errors. To do so, open a Bash command line and type, " + newline
+ newline
+ " ipython || python" + newline
+ newline
+ "Then, inside the (i)python environment, type, " + newline
+ newline
+ " import paramonte as pm" + newline
+ " pm.verify()" + newline
+ newline
+ "Otherwise, you can install the application " + app + " manually " + newline
+ "on your system. The " + app + " installation is only a single " + newline
+ "command and takes only a few seconds to install. " + newline
+ "You can get the installation command from this page: " + newline
+ newline
+ " " + pm.website.home.install.macos.prereqs.cmd.url + newline
+ newline
+ "Once you have manually installed the missing component, retry, " + newline
+ newline
+ " import paramonte as pm" + newline
+ " pm.verify()" + newline
+ newline
+ "skipping the installation for now..."
)
return msg
####################################################################################################################################
#### writeVerificationStatusFile
####################################################################################################################################
def writeVerificationStatusFile(verificationEnabledString):
with open(verificationStatusFilePath, "w") as verificationStatusFile:
verificationStatusFile.write(verificationEnabledString)
return None
####################################################################################################################################
#### dispFinalMessage
####################################################################################################################################
def dispFinalMessage():
pm.note ( msg = "To check for the MPI library installation status or display the above " + newline
+ "messages in the future, type the following on the Python command-line: " + newline
+ newline
+ " import paramonte as pm" + newline
+ " pm.verify()" + newline
+ newline
+ "To get started, type the following on the Python command-line," + newline
+ newline
+ " import paramonte as pm" + newline
+ " pm.helpme()"
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
return None
####################################################################################################################################
#### displayParaMonteBanner
####################################################################################################################################
def displayParaMonteBanner():
bannerFilePath = os.path.join( pm.path.auxil, ".ParaMonteBanner")
offset = ( len(pm.version.interface.dump()) - 5 ) // 2
print("")
with open(bannerFilePath,"r") as file:
for line in file:
if "Version" in line:
line = line.replace(" "*offset+"Version 0.0.0","Version "+pm.version.interface.dump())
print(line,end="")
print("")
return None
####################################################################################################################################
#### build
####################################################################################################################################
def build(flags=""):
"""
Builds the ParaMonte library kernel on the user's system from scratch.
**Parameters**
flags
A string containing any of the ParaMonte install script flags.
If the operating system is Unix-based (e.g., Linux or macOS) then
the value of ``flags`` must conform to the rules and syntax of
the flags of the Bash install script of the ParaMonte library
on GitHub. If the operating system is Windows, then the value
of ``flags`` must conform to the rules and syntax of the flags
of the Batch install script of the ParaMonte library on GitHub.
The default value is an empty string ``""``.
"""
if pm.platform.isWin32:
pm.warn ( msg = "The ParaMonte library build on Windows Operating Systems (OS) " + newline
+ "requires the installation of the following software on your system: " + newline
+ newline
+ " - Microsoft Visual Studio (MSVS) (Community Edition >2017)" + newline
+ " - Intel Parallel Studio >2018, which is built on top of MSVS" + newline
+ newline
+ "If you don't have these software already installed on your system, " + newline
+ "please visit the following page for the installation instructions: " + newline
+ newline
+ " " + pm.website.home.url + newline
+ newline
+ "Follow the instructions on this website for building the ParaMonte " + newline
+ "ParaMonte on your system."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
else:
pm.note ( msg = "You are requesting to build the ParaMonte kernel libraries on your system. " + newline
+ "The kernel library build requires ParaMonte-compatible versions of the " + newline
+ "following compilers and parallelism libraries installed on your system: " + newline
+ newline
+ " GNU compiler collection (GCC >8.3)" + newline
+ " MPI library (MPICH >3.2) on Linux OS or Open-MPI on Darwin OS" + newline
+ " OpenCoarrays >2.8" + newline
+ newline
+ "The full installation of these software could require 4 to 5 Gb of free " + newline
+ "space on your system (where the ParaMonte library is already installed)." + newline
+ "Note that the installation script is in Bash and therefore requires a " + newline
+ "Bash or Bash-compatible shell. An existing recent installation of the " + newline
+ "GNU Compiler Collection (GCC) on your system would be also highly " + newline
+ "desirable and will significantly cut the build time. Also, downloading " + newline
+ "the prerequisites requires access to the Internet. If you have an " + newline
+ "Internet firewall active on your system, please make sure to turn " + newline
+ "it off before proceeding with the local installation of ParaMonte."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
buildEnabled = getUserResponse ( msg = "\n Do you wish to download and install the ParaMonte library"
+ "\n and its prerequisites on your system now (y/n)? "
)
if buildEnabled:
if pm.platform.isMacOS:
succeeded = buildParaMontePrereqsForMac()
if not succeeded:
pm.warn ( msg = "The ParaMonte build failed. To get further instructions " + newline
+ "to build the ParaMonte library on your macOS, visit, " + newline
+ newline
+ " " + pm.website.home.install.macos.url + newline
+ newline
+ "You can also report this issue at, " + newline
+ newline
+ " " + pm.website.github.issues.url + newline
+ newline
+ "to get direct help. For more information, visit, " + newline
+ newline
+ " " + pm.website.home.url
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
return None
currentDir = os.getcwd()
pmGitTarPath = os.path.join( pm.path.root, "master.tar.gz" )
download( url = pm.website.github.archive.master.tar.url
, filePath = pmGitTarPath
)
pmGitRootDir = os.path.join( pm.path.root, "paramonte-master" )
try:
import tarfile
tf = tarfile.open(pmGitTarPath)
tf.extractall(path=pm.path.root) # path=pmGitRootDir)
pmGitInstallScriptPath = os.path.join( pmGitRootDir, "install.sh" )
if not os.path.exists(pmGitInstallScriptPath):
pm.abort( msg = "Internal error occurred." + newline
+ "Failed to detect the ParaMonte installation Bash script. " + newline
+ "Please report this issue at " + newline
+ newline
+ " " + pm.website.github.issues.url + newline
+ newline
+ "Visit, " + " for instructions " + newline
+ newline
+ " " + pm.website.home.url
+ "to build ParaMonte object files on your system."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
except Exception as e:
print(str(e))
pm.abort ( msg = "Unzipping of the ParaMonte tarball failed.\n"
+ "Make sure you have tar software installed on your system and try again."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
err = os.system("chmod +x " + pmGitInstallScriptPath)
if err != 0:
pm.warn ( msg = "The following action failed:\n\n"
+ "chmod +x " + pmGitInstallScriptPath + "\n\n"
+ "skipping..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
os.chdir(pmGitRootDir)
import subprocess
try:
os.system( "find " + pmGitRootDir + " -type f -iname \"*.sh\" -exec chmod +x {} \;" )
os.system( pmGitInstallScriptPath + " --lang python --test_enabled true --exam_enabled false --yes-to-all " + flags )
except Exception as e:
print(str(e))
pm.abort ( msg = "The Local installation of ParaMonte failed." + newline
+ "Please report this issue at " + newline
+ newline
+ " " + pm.website.github.issues.url
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
os.chdir(currentDir)
# copy files to module folder
import glob
import shutil
pythonBinDir = os.path.join( pmGitRootDir , "bin" , "Python" , "paramonte" )
fileList = glob.glob( os.path.join( pythonBinDir , "libparamonte_*" ) )
if len(fileList)==0:
pm.abort( msg = "ParaMonte kernel libraries build and installation " + newline
+ "appears to have failed. You can check this path:" + newline
+ newline
+ " " + pythonBinDir + newline
+ newline
+ "to find out if any shared objects with the prefix " + newline
+ "'libparamonte_' have been generated or not. " + newline
+ "Please report this issue at " + newline
+ newline
+ " " + pm.website.github.issues.url
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 2
)
else:
pm.note ( msg = "ParaMonte kernel libraries build appears to have succeeded. " + newline
+ "copying the kernel files to the ParaMonte Python module directory..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
for file in fileList:
pm.note ( msg = "file: " + file
, methodName = pm.names.paramonte
, marginTop = 0
, marginBot = 0
)
shutil.copy(file, pm.path.lib)
pm.note ( msg = "ParaMonte kernel libraries should be now usable on your system."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
setupFilePath = os.path.join( pmGitRootDir , "build", "prerequisites", "prerequisites", "installations", "opencoarrays", "2.8.0", "setup.sh" )
if os.path.exists(setupFilePath):
bashrcContents = getBashrcContents()
setupFilePathCmd = "source " + setupFilePath
if setupFilePathCmd not in bashrcContents:
os.system( "chmod 777 ~/.bashrc")
os.system( "chmod 777 ~/.bashrc && echo '' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '# >>> ParaMonte library local installation setup >>>' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '" + setupFilePathCmd + "' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '# <<< ParaMonte library local installation setup <<<' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && echo '' >> ~/.bashrc" )
os.system( "chmod 777 ~/.bashrc && sh ~/.bashrc" )
pm.warn ( msg = "Whenever you intend to use ParaMonte in the future, " + newline
+ "before opening your Python session, please execute " + newline
+ "the following command in your Bash shell to ensure " + newline
+ "all required paths are properly defined in your " + newline
+ "environment: " + newline
+ newline
+ " " + setupFilePathCmd + newline
+ newline
+ "mission accomplished."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
writeVerificationStatusFile("True")
else:
pm.warn ( msg = "Aborting the ParaMonte-for-Python local build on your system."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
return None
####################################################################################################################################
#### getVersionTriplet
####################################################################################################################################
def getVersionTriplet(versionDumpString):
"""
Take an input version string like, "1.1.1" and return an integer triplet list.
"""
return np.int32(versionDumpString.split("."))
####################################################################################################################################
#### getPreviousVersion
####################################################################################################################################
def getPreviousVersion(currentVerionString):
"""
Take an input version string like, "1.1.1" and return another string representing the version before the input version, like, 1.1.0.
"""
currentVerionTriplet = getVersionTriplet(currentVerionString)
previousVerionString = None
index = 3
while True:
index -= 1
if index<=0:
break
else:
if currentVerionTriplet[index]>0:
previousVerionTriplet = copy.deepcopy(currentVerionTriplet)
previousVerionTriplet[index] -= 1
previousVerionString = ".".join([str(number) for number in previousVerionTriplet])
break
return previousVerionString
####################################################################################################################################
#### getDependencyVersion
####################################################################################################################################
dependencyVersionDict = { "numpy": '1.19.2'
, "scipy": '1.5.2'
, "pandas": '1.1.2'
, "seaborn": '0.11.0'
, "matplotlib": '3.3.2'
}
def getDependencyVersion( pkg : tp.Optional[ str ] = None ):
"""
Return the minimum required version of the Python library
for the successful use of the ParaMonte library visualization
and post-processing tools.
**Parameters**
pkg
An optional string representing the name of the
Python package whose version is being inquired.
**Returns**
A string representing the required minimum version
of the input ``pkg``. If ``pkg`` is missing or the
package dependency does not exist within the ParaMonte
library, the dictionary of all dependencies will
be returned.
"""
if pkg is not None:
try:
version = dependencyVersionDict[ pkg ]
except:
version = dependencyVersionDict
else:
version = dependencyVersionDict
return version
####################################################################################################################################
#### displayDependencyVersionMessage
####################################################################################################################################
def displayDependencyVersionMessage():
indentedNewLine = newline + " "
pm.note( msg = "The ParaMonte::Kernel samplers have no Python package dependencies " + newline
+ "beyond numpy. However, the ParaMonte::Python post-processing and " + newline
+ "visualization tools require the following Python packages, " + newline
+ indentedNewLine
+ indentedNewLine.join("{} : {}".format(key, val) for key, val in dependencyVersionDict.items()) + newline
+ newline
+ "If you do not intend to use the postprocessing and visualization tools, " + newline
+ "you can ignore this message. Otherwise, UPDATE THE ABOVE PACKAGES TO " + newline
+ "THE REQUESTED VERSIONS OR NEWER, SO THAT THE VISUALIZATION TOOLS " + newline
+ "OF THE ParaMonte::Python LIBRARY FUNCTION PROPERLY."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
####################################################################################################################################
#### verifyDependencyVersion
####################################################################################################################################
def verifyDependencyVersion():
"""
Verify the existence of the required Python packages and
their minimum versions on the current system.
**Parameters**
None
**Returns**
None
"""
print("")
for module, version in dependencyVersionDict.items():
versionIsCompatible = False
print("checking the ParaMonte::Python dependency on " + module + " ... ", end = "")
try:
exec("import " + module)
installedVersion = eval(module + ".__version__")
if installedVersion == version:
versionIsCompatible = True
else:
if installedVersion.split(".")[0] != version.split(".")[0]:
pm.warn ( msg = "The current installation version of the " + module + " library on" + newline
+ "your system (" + installedVersion + ") is significantly different from " + newline
+ "the version (" + version + ") with which the ParaMonte library " + newline
+ "has been tested. This could potentially create runtime issues. " + newline
+ "Please consider upgrading this library to the most recent " + newline
+ "version by typing the following on your command prompt, " + newline
+ newline
+ " pip install --user --upgrade " + module + newline
+ newline
+ "before you begin to use the ParaMonte library. Should "
+ "the simulations or the post-processing of the " + newline
+ "output files fail, please report it at, " + newline
+ newline
+ " " + pm.website.github.issues.url + newline
+ newline
+ "for a possible solution. skipping for now..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
if installedVersion.split(".")[1] < version.split(".")[1]:
pm.warn ( msg = "The current installation version of the " + module + " library on" + newline
+ "your system (" + installedVersion + ") is not the same as the " + newline
+ "version (" + version + ") with which the ParaMonte library " + newline
+ "has been tested. This may not create any issues, however, " + newline
+ "should the simulations or the post-processing of the " + newline
+ "output files fail, please upgrade the library via, " + newline
+ newline
+ " pip install --user --upgrade " + module + newline
+ newline
+ "If the error persists, please report it at, " + newline
+ newline
+ " " + pm.website.github.issues.url + newline
+ newline
+ "for a possible solution. skipping for now..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
except Exception as e:
print(str(e))
if module=="numpy" or module=="pandas":
pm.abort( msg = "Failed to import the " + module + " library into your Python session." + newline
+ "This library is required for the ParaMonte kernel library to perform " + newline
+ "simulations. Please install the latest version of this " + newline
+ "library by typing the following on your command prompt: " + newline
+ newline
+ " pip install --user --upgrade " + module + newline
+ newline
+ "If the error persists, please report it at, " + newline
+ newline
+ " " + pm.website.github.issues.url + newline
+ newline
+ "for a possible solution. skipping for now..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
else:
pm.warn ( msg = "Failed to import the " + module + " library into your Python session." + newline
+ "This library is required for the post-processing of the ParaMonte " + newline
+ "simulation output files. Please install the latest version of this " + newline
+ "library by typing the following on your command prompt: " + newline
+ newline
+ " pip install --user --upgrade " + module + newline
+ newline
+ "If the error persists, please report it at, " + newline
+ newline
+ " " + pm.website.github.issues.url + newline
+ newline
+ "for a possible solution. skipping for now..."
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
if versionIsCompatible: print("OK")
return None
####################################################################################################################################
#### checkForUpdate
####################################################################################################################################
def checkForUpdate(package = "paramonte"):
import subprocess
import sys
latestVersion = str(subprocess.run([sys.executable, '-m', 'pip', 'install', '{}==random'.format(package)], capture_output=True, text=True))
latestVersion = latestVersion[latestVersion.find('(from versions:')+15:]
latestVersion = latestVersion[:latestVersion.find(')')]
latestVersion = latestVersion.replace(' ','').split(',')[-1]
#currentVersion = str(subprocess.run([sys.executable, '-m', 'pip', 'show', '{}'.format(package)], capture_output=True, text=True))
#currentVersion = currentVersion[currentVersion.find('Version:')+8:]
#currentVersion = currentVersion[:currentVersion.find('\\n')].replace(' ','')
currentVersion = pm.version.interface.dump()
if latestVersion == currentVersion:
pm.note ( msg = "You have the latest version of the ParaMonte library. " + newline
+ "To see the most recent changes to the library, visit, " + newline
+ newline
+ " " + pm.website.home.overview.changes.python.url
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
else:
currentVersionTriplet = currentVersion.split(".")
latestVersionTriplet = latestVersion.split(".")
newerVersionAvailable = False
for current, latest in zip(currentVersionTriplet, latestVersionTriplet):
if int(latest)>int(current):
newerVersionAvailable = True
break
if newerVersionAvailable:
msg =("A newer version (" + latestVersion + ") of the ParaMonte::Python library appears to be " + newline
+ "available on the PyPI repository. The currently-installed version is: " + currentVersion + newline
+ "You can upgrade to the latest version by typing the following " + newline
+ "on your Bash terminal or Anaconda command prompt: " + newline
+ newline
+ " pip install --user --upgrade " + package + newline
+ newline
+ "To upgrade from within your Jupyter or IPython session, try, " + newline
+ newline
+ " !pip install --user --upgrade " + package + newline
+ newline
+ "To see the latest changes to the ParaMonte::Python library, visit, " + newline
+ newline
+ " " + pm.website.home.overview.changes.python.url
)
else:
msg = ( "Looks like you have a version of the ParaMonte::Python library (" + currentVersion + ") " + newline
+ "that is newer than the PyPI version (" + latestVersion + "). Good for you!"
)
pm.note ( msg = msg
, methodName = pm.names.paramonte
, marginTop = 1
, marginBot = 1
)
return None
####################################################################################################################################
| 51.546627 | 158 | 0.459112 |
aceadf97215967a582ff29c157f4e9081a1363f6 | 1,944 | py | Python | eclipse-mosquitto/test/broker/04-retain-qos1-qos0.py | HenriqueBuzin/mosquitto-eclipse-mqtt | 00468923fcf70eefdf2c707b6ba9bdd4f859faf2 | [
"Unlicense"
] | 2 | 2021-04-20T14:28:59.000Z | 2021-05-06T07:46:53.000Z | eclipse-mosquitto/test/broker/04-retain-qos1-qos0.py | HenriqueBuzin/mosquitto-eclipse-mqtt | 00468923fcf70eefdf2c707b6ba9bdd4f859faf2 | [
"Unlicense"
] | null | null | null | eclipse-mosquitto/test/broker/04-retain-qos1-qos0.py | HenriqueBuzin/mosquitto-eclipse-mqtt | 00468923fcf70eefdf2c707b6ba9bdd4f859faf2 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# Test whether a retained PUBLISH to a topic with QoS 1 is retained.
# Subscription is made with QoS 0 so the retained message should also have QoS
# 0.
from mosq_test_helper import *
def do_test(proto_ver):
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("retain-qos1-test", keepalive=keepalive, proto_ver=proto_ver)
connack_packet = mosq_test.gen_connack(rc=0, proto_ver=proto_ver)
mid = 6
publish_packet = mosq_test.gen_publish("retain/qos1/test", qos=1, mid=mid, payload="retained message", retain=True, proto_ver=proto_ver)
if proto_ver == 5:
puback_packet = mosq_test.gen_puback(mid, proto_ver=proto_ver, reason_code=mqtt5_rc.MQTT_RC_NO_MATCHING_SUBSCRIBERS)
else:
puback_packet = mosq_test.gen_puback(mid, proto_ver=proto_ver)
mid = 18
subscribe_packet = mosq_test.gen_subscribe(mid, "retain/qos1/test", 0, proto_ver=proto_ver)
suback_packet = mosq_test.gen_suback(mid, 0, proto_ver=proto_ver)
publish0_packet = mosq_test.gen_publish("retain/qos1/test", qos=0, payload="retained message", retain=True, proto_ver=proto_ver)
port = mosq_test.get_port()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, port=port)
mosq_test.do_send_receive(sock, publish_packet, puback_packet, "puback")
mosq_test.do_send_receive(sock, subscribe_packet, suback_packet, "suback")
mosq_test.expect_packet(sock, "publish0", publish0_packet)
rc = 0
sock.close()
except mosq_test.TestError:
pass
finally:
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
print("proto_ver=%d" % (proto_ver))
exit(rc)
do_test(proto_ver=4)
do_test(proto_ver=5)
exit(0)
| 36 | 140 | 0.703704 |
aceae02e3ede2818626496f13271fc3d308cab4c | 845 | py | Python | test_runners.py | manatlan/htag | cf085077adf04bec8a2b059497efedb210c59936 | [
"MIT"
] | null | null | null | test_runners.py | manatlan/htag | cf085077adf04bec8a2b059497efedb210c59936 | [
"MIT"
] | null | null | null | test_runners.py | manatlan/htag | cf085077adf04bec8a2b059497efedb210c59936 | [
"MIT"
] | null | null | null | import pytest
import importlib
from htag import Tag
class MyApp(Tag.div):
def init(self):
self <= "Hello World"
def test_default():
from htag.runners import BrowserHTTP
BrowserHTTP( MyApp )
def data_source():
for i in [
"DevApp",
"BrowserStarletteHTTP",
"BrowserStarletteWS",
"WebHTTP",
"PyWebWiew",
"GuyApp",
"ChromeApp",
"AndroidApp",
"BrowserTornadoHTTP",
"PyScript",
]:
yield i
@pytest.mark.parametrize('my_runner', data_source())
def test_a_runner( my_runner ):
mrunners=importlib.import_module("htag.runners")
if hasattr(mrunners,my_runner):
runner=getattr(mrunners,my_runner)
runner( MyApp )
else:
print("can't test %s" % my_runner)
if __name__=="__main__":
test_default()
| 20.119048 | 52 | 0.610651 |
aceae05b5b70d14424091a305dd84e2f299fdc70 | 2,208 | py | Python | repos/system_upgrade/el7toel8/actors/checkosrelease/actor.py | Jakuje/leapp-repository | 580540f68bd4f89152c28935f775f660c2db0839 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/checkosrelease/actor.py | Jakuje/leapp-repository | 580540f68bd4f89152c28935f775f660c2db0839 | [
"Apache-2.0"
] | 1 | 2019-04-12T14:45:21.000Z | 2019-04-12T14:45:21.000Z | repos/system_upgrade/el7toel8/actors/checkosrelease/actor.py | frenzymadness/leapp-repository | 683cd1df8fb769a8256da89bd86fd402602a8d42 | [
"Apache-2.0"
] | null | null | null | import os
from leapp.actors import Actor
from leapp.models import OSReleaseFacts
from leapp.reporting import Report
from leapp.libraries.common.reporting import report_generic
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class CheckOSRelease(Actor):
"""
Check if a supported release version of system's OS is in use. If not, inhibit upgrade process.
Based on OS release collected facts, this actor will compare current release with supported
versions. If a problem is found an inhibition message will be generated. This check can be
skipped by using LEAPP_SKIP_CHECK_OS_RELEASE environment variable.
"""
name = 'check_os_release'
consumes = (OSReleaseFacts,)
produces = (Report,)
tags = (ChecksPhaseTag, IPUWorkflowTag,)
def process(self):
skip_check = os.getenv('LEAPP_SKIP_CHECK_OS_RELEASE')
if skip_check:
report_generic(
title='Skipped OS release check',
summary='OS release check skipped via LEAPP_SKIP_CHECK_OS_RELEASE env var.',
severity='low'
)
return
min_supported_version = {
'rhel': '7.6'
}
for facts in self.consume(OSReleaseFacts):
if facts.id not in min_supported_version.keys():
report_generic(
title='Unsupported OS id',
summary='Supported OS ids for upgrade process: ' + ','.join(min_supported_version.keys()),
flags=['inhibitor']
)
return
min_version = [int(x) for x in min_supported_version[facts.id].split('.')]
os_version = [int(x) for x in facts.version_id.split('.')]
for current, minimal in zip(os_version, min_version):
if current > minimal:
break
if current < minimal:
report_generic(
title='Unsupported OS version',
summary='Minimal supported OS version for upgrade process: ' + min_supported_version[facts.id],
flags=['inhibitor']
)
return
| 36.196721 | 119 | 0.597826 |
aceae096e5810315ea329b13a360cc34da9a4eb9 | 12,433 | py | Python | docs/source/conf.py | izelarabm/client | d257de3b5ded6714b7cb52765b4b7fd0bffecd0c | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | izelarabm/client | d257de3b5ded6714b7cb52765b4b7fd0bffecd0c | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | izelarabm/client | d257de3b5ded6714b7cb52765b4b7fd0bffecd0c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# cloudmesh_client documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 16 13:55:43 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from cloudmesh_client.version import __version__
import os
# os.path.abspath('../cloudmesh_client')
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
on_rtd = False
bootstrap_theme = False
if bootstrap_theme:
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# 'sphinx.ext.doctest',
'sphinx.ext.todo',
# 'sphinx.ext.coverage',
# 'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
]
extensions += ['sphinx-prompt']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cloudmesh_client'
copyright = u'2015, Gregor von Laszewski'
author = u'Gregor von Laszewski'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = False
# If true, `to do` and `to do List` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = 'alabaster'
if bootstrap_theme:
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "cm client"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
html_sidebars = {'**': ['mypage.html',
'localtoc.html',
'mysidebar.html',
'sourcelink.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'cloudmesh_clientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cloudmesh_client.tex', u'cloudmesh\\_client Documentation',
u'Gregor von Laszewski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cloudmesh_client', u'cloudmesh_client Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cloudmesh_client', u'cloudmesh_client Documentation',
author, 'cloudmesh_client', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to
# save visual space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
| 31.555838 | 79 | 0.711413 |
aceae0a5b06b8b10bfe9c207c0e66cdbb793bbd4 | 6,572 | py | Python | neutron/db/allowedaddresspairs_db.py | brandonlogan/neutron | 57364544aa8b0e7cd9d73550f287bcad574ba08c | [
"Apache-2.0"
] | null | null | null | neutron/db/allowedaddresspairs_db.py | brandonlogan/neutron | 57364544aa8b0e7cd9d73550f287bcad574ba08c | [
"Apache-2.0"
] | null | null | null | neutron/db/allowedaddresspairs_db.py | brandonlogan/neutron | 57364544aa8b0e7cd9d73550f287bcad574ba08c | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib.api import validators
from neutron.api.v2 import attributes as attr
from neutron.db import db_base_plugin_v2
from neutron.common import utils
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.objects import base as obj_base
from neutron.objects.port.extensions import (allowedaddresspairs
as obj_addr_pair)
class AllowedAddressPairsMixin(object):
"""Mixin class for allowed address pairs."""
def _process_create_allowed_address_pairs(self, context, port,
allowed_address_pairs):
if not validators.is_attr_set(allowed_address_pairs):
return []
try:
with context.session.begin(subtransactions=True):
for address_pair in allowed_address_pairs:
# use port.mac_address if no mac address in address pair
if 'mac_address' not in address_pair:
address_pair['mac_address'] = port['mac_address']
# retain string format as passed through API
mac_address = utils.AuthenticEUI(
address_pair['mac_address'])
ip_address = utils.AuthenticIPNetwork(
address_pair['ip_address'])
pair_obj = obj_addr_pair.AllowedAddressPair(
context,
port_id=port['id'],
mac_address=mac_address,
ip_address=ip_address)
pair_obj.create()
except obj_base.NeutronDbObjectDuplicateEntry:
raise addr_pair.DuplicateAddressPairInRequest(
mac_address=address_pair['mac_address'],
ip_address=address_pair['ip_address'])
return allowed_address_pairs
def get_allowed_address_pairs(self, context, port_id):
pairs = self._get_allowed_address_pairs_objs(context, port_id)
return [self._make_allowed_address_pairs_dict(pair.db_obj)
for pair in pairs]
def _get_allowed_address_pairs_objs(self, context, port_id):
pairs = obj_addr_pair.AllowedAddressPair.get_objects(
context, port_id=port_id)
return pairs
def _extend_port_dict_allowed_address_pairs(self, port_res, port_db):
# If port_db is provided, allowed address pairs will be accessed via
# sqlalchemy models. As they're loaded together with ports this
# will not cause an extra query.
allowed_address_pairs = [
self._make_allowed_address_pairs_dict(address_pair) for
address_pair in port_db.allowed_address_pairs]
port_res[addr_pair.ADDRESS_PAIRS] = allowed_address_pairs
return port_res
# Register dict extend functions for ports
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attr.PORTS, ['_extend_port_dict_allowed_address_pairs'])
def _delete_allowed_address_pairs(self, context, id):
pairs = self._get_allowed_address_pairs_objs(context, port_id=id)
with context.session.begin(subtransactions=True):
for pair in pairs:
pair.delete()
def _make_allowed_address_pairs_dict(self, allowed_address_pairs,
fields=None):
res = {'mac_address': allowed_address_pairs['mac_address'],
'ip_address': allowed_address_pairs['ip_address']}
return self._fields(res, fields)
def _has_address_pairs(self, port):
return (validators.is_attr_set(port['port'][addr_pair.ADDRESS_PAIRS])
and port['port'][addr_pair.ADDRESS_PAIRS] != [])
def _check_update_has_allowed_address_pairs(self, port):
"""Determine if request has an allowed address pair.
Return True if the port parameter has a non-empty
'allowed_address_pairs' attribute. Otherwise returns False.
"""
return (addr_pair.ADDRESS_PAIRS in port['port'] and
self._has_address_pairs(port))
def _check_update_deletes_allowed_address_pairs(self, port):
"""Determine if request deletes address pair.
Return True if port has an allowed address pair and its value
is either [] or not is_attr_set, otherwise return False
"""
return (addr_pair.ADDRESS_PAIRS in port['port'] and
not self._has_address_pairs(port))
def is_address_pairs_attribute_updated(self, port, update_attrs):
"""Check if the address pairs attribute is being updated.
Returns True if there is an update. This can be used to decide
if a port update notification should be sent to agents or third
party controllers.
"""
new_pairs = update_attrs.get(addr_pair.ADDRESS_PAIRS)
if new_pairs is None:
return False
old_pairs = port.get(addr_pair.ADDRESS_PAIRS)
# Missing or unchanged address pairs in attributes mean no update
return new_pairs != old_pairs
def update_address_pairs_on_port(self, context, port_id, port,
original_port, updated_port):
"""Update allowed address pairs on port.
Returns True if an update notification is required. Notification
is not done here because other changes on the port may need
notification. This method is expected to be called within
a transaction.
"""
new_pairs = port['port'].get(addr_pair.ADDRESS_PAIRS)
if self.is_address_pairs_attribute_updated(original_port,
port['port']):
updated_port[addr_pair.ADDRESS_PAIRS] = new_pairs
self._delete_allowed_address_pairs(context, port_id)
self._process_create_allowed_address_pairs(
context, updated_port, new_pairs)
return True
return False
| 42.954248 | 78 | 0.65916 |
aceae161ca37cbe092edd3f0f001517484d0ea08 | 128 | py | Python | workflow_session/__init__.py | CBroz1/workflow-session | 3730820b940f694416e7384ee4391dc93ddb516a | [
"MIT"
] | null | null | null | workflow_session/__init__.py | CBroz1/workflow-session | 3730820b940f694416e7384ee4391dc93ddb516a | [
"MIT"
] | null | null | null | workflow_session/__init__.py | CBroz1/workflow-session | 3730820b940f694416e7384ee4391dc93ddb516a | [
"MIT"
] | null | null | null | __author__ = "DataJoint"
__date__ = "December, 2021"
__version__ = "0.0.1"
__all__ = ['__author__', '__version__', '__date__']
| 21.333333 | 51 | 0.695313 |
aceae1d6e34a4377a8abc8b97ea12ec045f9053d | 22,527 | py | Python | python-profiles/Thorlabs/DC stages/MT1-Z8.py | EPC-MSU/libximc | b0349721f57c8274b098a7b646d7ae67b8e70b9d | [
"BSD-2-Clause"
] | 3 | 2020-12-08T14:41:48.000Z | 2022-02-23T13:42:42.000Z | python-profiles/Thorlabs/DC stages/MT1-Z8.py | EPC-MSU/libximc | b0349721f57c8274b098a7b646d7ae67b8e70b9d | [
"BSD-2-Clause"
] | 4 | 2020-12-08T20:15:06.000Z | 2021-12-08T14:15:24.000Z | python-profiles/Thorlabs/DC stages/MT1-Z8.py | EPC-MSU/libximc | b0349721f57c8274b098a7b646d7ae67b8e70b9d | [
"BSD-2-Clause"
] | 2 | 2020-11-02T02:17:35.000Z | 2021-03-18T14:13:56.000Z | def set_profile_MT1_Z8(lib, id):
worst_result = Result.Ok
result = Result.Ok
feedback_settings = feedback_settings_t()
feedback_settings.IPS = 2048
class FeedbackType_:
FEEDBACK_ENCODER_MEDIATED = 6
FEEDBACK_NONE = 5
FEEDBACK_EMF = 4
FEEDBACK_ENCODER = 1
feedback_settings.FeedbackType = FeedbackType_.FEEDBACK_ENCODER
class FeedbackFlags_:
FEEDBACK_ENC_TYPE_BITS = 192
FEEDBACK_ENC_TYPE_DIFFERENTIAL = 128
FEEDBACK_ENC_TYPE_SINGLE_ENDED = 64
FEEDBACK_ENC_REVERSE = 1
FEEDBACK_ENC_TYPE_AUTO = 0
feedback_settings.FeedbackFlags = FeedbackFlags_.FEEDBACK_ENC_TYPE_SINGLE_ENDED | FeedbackFlags_.FEEDBACK_ENC_TYPE_AUTO
feedback_settings.CountsPerTurn = 2048
result = lib.set_feedback_settings(id, byref(feedback_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
home_settings = home_settings_t()
home_settings.FastHome = 500
home_settings.uFastHome = 0
home_settings.SlowHome = 500
home_settings.uSlowHome = 0
home_settings.HomeDelta = 2000
home_settings.uHomeDelta = 0
class HomeFlags_:
HOME_USE_FAST = 256
HOME_STOP_SECOND_BITS = 192
HOME_STOP_SECOND_LIM = 192
HOME_STOP_SECOND_SYN = 128
HOME_STOP_SECOND_REV = 64
HOME_STOP_FIRST_BITS = 48
HOME_STOP_FIRST_LIM = 48
HOME_STOP_FIRST_SYN = 32
HOME_STOP_FIRST_REV = 16
HOME_HALF_MV = 8
HOME_MV_SEC_EN = 4
HOME_DIR_SECOND = 2
HOME_DIR_FIRST = 1
home_settings.HomeFlags = HomeFlags_.HOME_USE_FAST | HomeFlags_.HOME_STOP_SECOND_REV | HomeFlags_.HOME_STOP_FIRST_BITS | HomeFlags_.HOME_DIR_SECOND
result = lib.set_home_settings(id, byref(home_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
move_settings = move_settings_t()
move_settings.Speed = 6000
move_settings.uSpeed = 0
move_settings.Accel = 16000
move_settings.Decel = 16000
move_settings.AntiplaySpeed = 6000
move_settings.uAntiplaySpeed = 0
class MoveFlags_:
RPM_DIV_1000 = 1
result = lib.set_move_settings(id, byref(move_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_settings = engine_settings_t()
engine_settings.NomVoltage = 792
engine_settings.NomCurrent = 240
engine_settings.NomSpeed = 12000
engine_settings.uNomSpeed = 0
class EngineFlags_:
ENGINE_LIMIT_RPM = 128
ENGINE_LIMIT_CURR = 64
ENGINE_LIMIT_VOLT = 32
ENGINE_ACCEL_ON = 16
ENGINE_ANTIPLAY = 8
ENGINE_MAX_SPEED = 4
ENGINE_CURRENT_AS_RMS = 2
ENGINE_REVERSE = 1
engine_settings.EngineFlags = EngineFlags_.ENGINE_LIMIT_RPM | EngineFlags_.ENGINE_LIMIT_CURR | EngineFlags_.ENGINE_LIMIT_VOLT | EngineFlags_.ENGINE_ACCEL_ON
engine_settings.Antiplay = -13312
class MicrostepMode_:
MICROSTEP_MODE_FRAC_256 = 9
MICROSTEP_MODE_FRAC_128 = 8
MICROSTEP_MODE_FRAC_64 = 7
MICROSTEP_MODE_FRAC_32 = 6
MICROSTEP_MODE_FRAC_16 = 5
MICROSTEP_MODE_FRAC_8 = 4
MICROSTEP_MODE_FRAC_4 = 3
MICROSTEP_MODE_FRAC_2 = 2
MICROSTEP_MODE_FULL = 1
engine_settings.MicrostepMode = MicrostepMode_.MICROSTEP_MODE_FULL
engine_settings.StepsPerRev = 64
result = lib.set_engine_settings(id, byref(engine_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
entype_settings = entype_settings_t()
class EngineType_:
ENGINE_TYPE_BRUSHLESS = 5
ENGINE_TYPE_TEST = 4
ENGINE_TYPE_STEP = 3
ENGINE_TYPE_2DC = 2
ENGINE_TYPE_DC = 1
ENGINE_TYPE_NONE = 0
entype_settings.EngineType = EngineType_.ENGINE_TYPE_DC | EngineType_.ENGINE_TYPE_NONE
class DriverType_:
DRIVER_TYPE_EXTERNAL = 3
DRIVER_TYPE_INTEGRATE = 2
DRIVER_TYPE_DISCRETE_FET = 1
entype_settings.DriverType = DriverType_.DRIVER_TYPE_INTEGRATE
result = lib.set_entype_settings(id, byref(entype_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
power_settings = power_settings_t()
power_settings.HoldCurrent = 60
power_settings.CurrReductDelay = 1500
power_settings.PowerOffDelay = 3600
power_settings.CurrentSetTime = 600
class PowerFlags_:
POWER_SMOOTH_CURRENT = 4
POWER_OFF_ENABLED = 2
POWER_REDUCT_ENABLED = 1
result = lib.set_power_settings(id, byref(power_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
secure_settings = secure_settings_t()
secure_settings.LowUpwrOff = 800
secure_settings.CriticalIpwr = 4000
secure_settings.CriticalUpwr = 5500
secure_settings.CriticalT = 800
secure_settings.CriticalIusb = 450
secure_settings.CriticalUusb = 520
secure_settings.MinimumUusb = 420
class Flags_:
ALARM_ENGINE_RESPONSE = 128
ALARM_WINDING_MISMATCH = 64
USB_BREAK_RECONNECT = 32
ALARM_FLAGS_STICKING = 16
ALARM_ON_BORDERS_SWAP_MISSET = 8
H_BRIDGE_ALERT = 4
LOW_UPWR_PROTECTION = 2
ALARM_ON_DRIVER_OVERHEATING = 1
secure_settings.Flags = Flags_.ALARM_ENGINE_RESPONSE | Flags_.ALARM_FLAGS_STICKING | Flags_.ALARM_ON_BORDERS_SWAP_MISSET | Flags_.H_BRIDGE_ALERT | Flags_.ALARM_ON_DRIVER_OVERHEATING
result = lib.set_secure_settings(id, byref(secure_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
edges_settings = edges_settings_t()
class BorderFlags_:
BORDERS_SWAP_MISSET_DETECTION = 8
BORDER_STOP_RIGHT = 4
BORDER_STOP_LEFT = 2
BORDER_IS_ENCODER = 1
edges_settings.BorderFlags = BorderFlags_.BORDER_STOP_RIGHT | BorderFlags_.BORDER_STOP_LEFT
class EnderFlags_:
ENDER_SW2_ACTIVE_LOW = 4
ENDER_SW1_ACTIVE_LOW = 2
ENDER_SWAP = 1
edges_settings.EnderFlags = EnderFlags_.ENDER_SW2_ACTIVE_LOW | EnderFlags_.ENDER_SW1_ACTIVE_LOW
edges_settings.LeftBorder = 39165
edges_settings.uLeftBorder = 0
edges_settings.RightBorder = 1603427
edges_settings.uRightBorder = 0
result = lib.set_edges_settings(id, byref(edges_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
pid_settings = pid_settings_t()
pid_settings.KpU = 800
pid_settings.KiU = 2800
pid_settings.KdU = 400
pid_settings.Kpf = 0
pid_settings.Kif = 0
pid_settings.Kdf = 0
result = lib.set_pid_settings(id, byref(pid_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_in_settings = sync_in_settings_t()
class SyncInFlags_:
SYNCIN_GOTOPOSITION = 4
SYNCIN_INVERT = 2
SYNCIN_ENABLED = 1
sync_in_settings.ClutterTime = 4
sync_in_settings.Position = 0
sync_in_settings.uPosition = 0
sync_in_settings.Speed = 0
sync_in_settings.uSpeed = 0
result = lib.set_sync_in_settings(id, byref(sync_in_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_out_settings = sync_out_settings_t()
class SyncOutFlags_:
SYNCOUT_ONPERIOD = 64
SYNCOUT_ONSTOP = 32
SYNCOUT_ONSTART = 16
SYNCOUT_IN_STEPS = 8
SYNCOUT_INVERT = 4
SYNCOUT_STATE = 2
SYNCOUT_ENABLED = 1
sync_out_settings.SyncOutFlags = SyncOutFlags_.SYNCOUT_ONSTOP | SyncOutFlags_.SYNCOUT_ONSTART
sync_out_settings.SyncOutPulseSteps = 100
sync_out_settings.SyncOutPeriod = 2000
sync_out_settings.Accuracy = 0
sync_out_settings.uAccuracy = 0
result = lib.set_sync_out_settings(id, byref(sync_out_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extio_settings = extio_settings_t()
class EXTIOSetupFlags_:
EXTIO_SETUP_INVERT = 2
EXTIO_SETUP_OUTPUT = 1
extio_settings.EXTIOSetupFlags = EXTIOSetupFlags_.EXTIO_SETUP_OUTPUT
class EXTIOModeFlags_:
EXTIO_SETUP_MODE_OUT_BITS = 240
EXTIO_SETUP_MODE_OUT_MOTOR_ON = 64
EXTIO_SETUP_MODE_OUT_ALARM = 48
EXTIO_SETUP_MODE_OUT_MOVING = 32
EXTIO_SETUP_MODE_OUT_ON = 16
EXTIO_SETUP_MODE_IN_BITS = 15
EXTIO_SETUP_MODE_IN_ALARM = 5
EXTIO_SETUP_MODE_IN_HOME = 4
EXTIO_SETUP_MODE_IN_MOVR = 3
EXTIO_SETUP_MODE_IN_PWOF = 2
EXTIO_SETUP_MODE_IN_STOP = 1
EXTIO_SETUP_MODE_IN_NOP = 0
EXTIO_SETUP_MODE_OUT_OFF = 0
extio_settings.EXTIOModeFlags = EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_STOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_NOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_OUT_OFF
result = lib.set_extio_settings(id, byref(extio_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
brake_settings = brake_settings_t()
brake_settings.t1 = 300
brake_settings.t2 = 500
brake_settings.t3 = 300
brake_settings.t4 = 400
class BrakeFlags_:
BRAKE_ENG_PWROFF = 2
BRAKE_ENABLED = 1
brake_settings.BrakeFlags = BrakeFlags_.BRAKE_ENG_PWROFF
result = lib.set_brake_settings(id, byref(brake_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
control_settings = control_settings_t()
control_settings.MaxSpeed[0] = 60
control_settings.MaxSpeed[1] = 600
control_settings.MaxSpeed[2] = 6000
control_settings.MaxSpeed[3] = 0
control_settings.MaxSpeed[4] = 0
control_settings.MaxSpeed[5] = 0
control_settings.MaxSpeed[6] = 0
control_settings.MaxSpeed[7] = 0
control_settings.MaxSpeed[8] = 0
control_settings.MaxSpeed[9] = 0
control_settings.uMaxSpeed[0] = 0
control_settings.uMaxSpeed[1] = 0
control_settings.uMaxSpeed[2] = 0
control_settings.uMaxSpeed[3] = 0
control_settings.uMaxSpeed[4] = 0
control_settings.uMaxSpeed[5] = 0
control_settings.uMaxSpeed[6] = 0
control_settings.uMaxSpeed[7] = 0
control_settings.uMaxSpeed[8] = 0
control_settings.uMaxSpeed[9] = 0
control_settings.Timeout[0] = 1000
control_settings.Timeout[1] = 1000
control_settings.Timeout[2] = 1000
control_settings.Timeout[3] = 1000
control_settings.Timeout[4] = 1000
control_settings.Timeout[5] = 1000
control_settings.Timeout[6] = 1000
control_settings.Timeout[7] = 1000
control_settings.Timeout[8] = 1000
control_settings.MaxClickTime = 300
class Flags_:
CONTROL_BTN_RIGHT_PUSHED_OPEN = 8
CONTROL_BTN_LEFT_PUSHED_OPEN = 4
CONTROL_MODE_BITS = 3
CONTROL_MODE_LR = 2
CONTROL_MODE_JOY = 1
CONTROL_MODE_OFF = 0
control_settings.Flags = Flags_.CONTROL_MODE_LR | Flags_.CONTROL_MODE_OFF
control_settings.DeltaPosition = 1
control_settings.uDeltaPosition = 0
result = lib.set_control_settings(id, byref(control_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
joystick_settings = joystick_settings_t()
joystick_settings.JoyLowEnd = 0
joystick_settings.JoyCenter = 5000
joystick_settings.JoyHighEnd = 10000
joystick_settings.ExpFactor = 100
joystick_settings.DeadZone = 50
class JoyFlags_:
JOY_REVERSE = 1
result = lib.set_joystick_settings(id, byref(joystick_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
ctp_settings = ctp_settings_t()
ctp_settings.CTPMinError = 3
class CTPFlags_:
CTP_ERROR_CORRECTION = 16
REV_SENS_INV = 8
CTP_ALARM_ON_ERROR = 4
CTP_BASE = 2
CTP_ENABLED = 1
ctp_settings.CTPFlags = CTPFlags_.CTP_ERROR_CORRECTION | CTPFlags_.CTP_ENABLED
result = lib.set_ctp_settings(id, byref(ctp_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
uart_settings = uart_settings_t()
uart_settings.Speed = 115200
class UARTSetupFlags_:
UART_STOP_BIT = 8
UART_PARITY_BIT_USE = 4
UART_PARITY_BITS = 3
UART_PARITY_BIT_MARK = 3
UART_PARITY_BIT_SPACE = 2
UART_PARITY_BIT_ODD = 1
UART_PARITY_BIT_EVEN = 0
uart_settings.UARTSetupFlags = UARTSetupFlags_.UART_PARITY_BIT_EVEN
result = lib.set_uart_settings(id, byref(uart_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
controller_name = controller_name_t()
controller_name.ControllerName = bytes([0, 113, 252, 118, 36, 0, 72, 0, 3, 0, 0, 0, 104, 101, 103, 0])
class CtrlFlags_:
EEPROM_PRECEDENCE = 1
result = lib.set_controller_name(id, byref(controller_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
emf_settings = emf_settings_t()
emf_settings.L = 0
emf_settings.R = 0
emf_settings.Km = 0
class BackEMFFlags_:
BACK_EMF_KM_AUTO = 4
BACK_EMF_RESISTANCE_AUTO = 2
BACK_EMF_INDUCTANCE_AUTO = 1
result = lib.set_emf_settings(id, byref(emf_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_advansed_setup = engine_advansed_setup_t()
engine_advansed_setup.stepcloseloop_Kw = 50
engine_advansed_setup.stepcloseloop_Kp_low = 1000
engine_advansed_setup.stepcloseloop_Kp_high = 33
result = lib.set_engine_advansed_setup(id, byref(engine_advansed_setup))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extended_settings = extended_settings_t()
extended_settings.Param1 = 0
result = lib.set_extended_settings(id, byref(extended_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_name = stage_name_t()
stage_name.PositionerName = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_name(id, byref(stage_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_information = stage_information_t()
stage_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
stage_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_information(id, byref(stage_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_settings = stage_settings_t()
stage_settings.LeadScrewPitch = 0
stage_settings.Units = bytes([0, 0, 0, 0, 0, 0, 0, 0])
stage_settings.MaxSpeed = 0
stage_settings.TravelRange = 0
stage_settings.SupplyVoltageMin = 0
stage_settings.SupplyVoltageMax = 0
stage_settings.MaxCurrentConsumption = 0
stage_settings.HorizontalLoadCapacity = 0
stage_settings.VerticalLoadCapacity = 0
result = lib.set_stage_settings(id, byref(stage_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_information = motor_information_t()
motor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
motor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_motor_information(id, byref(motor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_settings = motor_settings_t()
class MotorType_:
MOTOR_TYPE_BLDC = 3
MOTOR_TYPE_DC = 2
MOTOR_TYPE_STEP = 1
MOTOR_TYPE_UNKNOWN = 0
motor_settings.MotorType = MotorType_.MOTOR_TYPE_UNKNOWN
motor_settings.ReservedField = 0
motor_settings.Poles = 0
motor_settings.Phases = 0
motor_settings.NominalVoltage = 0
motor_settings.NominalCurrent = 0
motor_settings.NominalSpeed = 0
motor_settings.NominalTorque = 0
motor_settings.NominalPower = 0
motor_settings.WindingResistance = 0
motor_settings.WindingInductance = 0
motor_settings.RotorInertia = 0
motor_settings.StallTorque = 0
motor_settings.DetentTorque = 0
motor_settings.TorqueConstant = 0
motor_settings.SpeedConstant = 0
motor_settings.SpeedTorqueGradient = 0
motor_settings.MechanicalTimeConstant = 0
motor_settings.MaxSpeed = 0
motor_settings.MaxCurrent = 0
motor_settings.MaxCurrentTime = 0
motor_settings.NoLoadCurrent = 0
motor_settings.NoLoadSpeed = 0
result = lib.set_motor_settings(id, byref(motor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_information = encoder_information_t()
encoder_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
encoder_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_encoder_information(id, byref(encoder_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_settings = encoder_settings_t()
encoder_settings.MaxOperatingFrequency = 0
encoder_settings.SupplyVoltageMin = 0
encoder_settings.SupplyVoltageMax = 0
encoder_settings.MaxCurrentConsumption = 0
encoder_settings.PPR = 0
class EncoderSettings_:
ENCSET_REVOLUTIONSENSOR_ACTIVE_HIGH = 256
ENCSET_REVOLUTIONSENSOR_PRESENT = 64
ENCSET_INDEXCHANNEL_PRESENT = 16
ENCSET_PUSHPULL_OUTPUT = 4
ENCSET_DIFFERENTIAL_OUTPUT = 1
result = lib.set_encoder_settings(id, byref(encoder_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_information = hallsensor_information_t()
hallsensor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
hallsensor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_hallsensor_information(id, byref(hallsensor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_settings = hallsensor_settings_t()
hallsensor_settings.MaxOperatingFrequency = 0
hallsensor_settings.SupplyVoltageMin = 0
hallsensor_settings.SupplyVoltageMax = 0
hallsensor_settings.MaxCurrentConsumption = 0
hallsensor_settings.PPR = 0
result = lib.set_hallsensor_settings(id, byref(hallsensor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_information = gear_information_t()
gear_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
gear_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_gear_information(id, byref(gear_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_settings = gear_settings_t()
gear_settings.ReductionIn = 0
gear_settings.ReductionOut = 0
gear_settings.RatedInputTorque = 0
gear_settings.RatedInputSpeed = 0
gear_settings.MaxOutputBacklash = 0
gear_settings.InputInertia = 0
gear_settings.Efficiency = 0
result = lib.set_gear_settings(id, byref(gear_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
accessories_settings = accessories_settings_t()
accessories_settings.MagneticBrakeInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.MBRatedVoltage = 0
accessories_settings.MBRatedCurrent = 0
accessories_settings.MBTorque = 0
class MBSettings_:
MB_POWERED_HOLD = 2
MB_AVAILABLE = 1
accessories_settings.TemperatureSensorInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.TSMin = 0
accessories_settings.TSMax = 0
accessories_settings.TSGrad = 0
class TSSettings_:
TS_AVAILABLE = 8
TS_TYPE_BITS = 7
TS_TYPE_SEMICONDUCTOR = 2
TS_TYPE_THERMOCOUPLE = 1
TS_TYPE_UNKNOWN = 0
accessories_settings.TSSettings = TSSettings_.TS_TYPE_UNKNOWN
class LimitSwitchesSettings_:
LS_SHORTED = 16
LS_SW2_ACTIVE_LOW = 8
LS_SW1_ACTIVE_LOW = 4
LS_ON_SW2_AVAILABLE = 2
LS_ON_SW1_AVAILABLE = 1
result = lib.set_accessories_settings(id, byref(accessories_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
return worst_result
| 35.643987 | 185 | 0.691792 |
aceae1de0abb38b79a066bd58d1ede6767e1a321 | 273 | py | Python | astropy/vo/validator/setup_package.py | xiaomi1122/astropy | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | [
"BSD-3-Clause"
] | null | null | null | astropy/vo/validator/setup_package.py | xiaomi1122/astropy | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | [
"BSD-3-Clause"
] | null | null | null | astropy/vo/validator/setup_package.py | xiaomi1122/astropy | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
return {
'astropy.vo.validator': ['data/*.txt'],
'astropy.vo.validator.tests': ['data/*.json', 'data/*.xml',
'data/*.out']}
| 30.333333 | 67 | 0.527473 |
aceae22659e5db29f848eb31f405f971c1478ca7 | 16,200 | py | Python | tests/test_qrqualitycheck.py | andypandy47/dcc-quality-assurance-1 | a11e6b3abb450b300a65469849ed5a171cd8482b | [
"Apache-2.0"
] | null | null | null | tests/test_qrqualitycheck.py | andypandy47/dcc-quality-assurance-1 | a11e6b3abb450b300a65469849ed5a171cd8482b | [
"Apache-2.0"
] | null | null | null | tests/test_qrqualitycheck.py | andypandy47/dcc-quality-assurance-1 | a11e6b3abb450b300a65469849ed5a171cd8482b | [
"Apache-2.0"
] | null | null | null | # ---license-start
# eu-digital-green-certificates / dgc-testdata
# ---
# Copyright (C) 2021 Qryptal Pte Ltd
# Copyright (C) 2021 T-Systems International GmbH and all other contributors
# ---
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---license-end
import os
import re
import json
import json
import base64
import pytest
import jsonref
import requests
import warnings
import constants
import jsonschema
from glob import glob
from io import BytesIO
from pathlib import Path
from DccQrCode import DccQrCode
from traceback import format_exc
from datetime import date, datetime, timezone
from filecache import HOUR, MINUTE, DAY, filecache
# COSE / CBOR related
from cose.keys import CoseKey
from cryptography import x509
from cose.keys.curves import P256
from cose.keys.keyops import VerifyOp
from cose.headers import Algorithm, KID
from cryptography.utils import int_to_bytes
from cose.keys.keytype import KtyEC2, KtyRSA
from cryptography.x509 import ExtensionNotFound
from cose.algorithms import Es256, Ps256, Sha256
from cryptography.hazmat.primitives.hashes import SHA256
from cryptography.hazmat.primitives.asymmetric import ec, rsa
from cose.keys.keyparam import KpAlg, EC2KpX, EC2KpY, EC2KpCurve, RSAKpE, RSAKpN, KpKty, KpKeyOps
from cryptography.hazmat.backends.openssl.backend import backend as OpenSSLBackend
@filecache(HOUR)
def valuesets_from_environment():
"Downloads and caches valuesets from acceptance environment"
valuesets = {}
if requests.get(constants.VALUESET_LIST).ok:
source_url = constants.VALUESET_LIST
else:
source_url = constants.VALUESET_LIST_ALTERNATIVE
hashes = requests.get(source_url).json()
for vs in hashes:
try:
valuesets[vs['id']] = requests.get(f'{source_url}/{vs["hash"]}').json()['valueSetValues']
except KeyError:
warnings.warn('Could not download value-sets. Skipping tests.')
pytest.skip('Could not download value-sets.')
return valuesets
@filecache(HOUR)
def certificates_from_environment():
"Downloads and caches the certificates from the acceptance environment"
def get_key_id_dict():
response = requests.get(constants.ACC_KID_LIST)
if not response.ok:
pytest.fail("KID list not reachable")
kidDict = {key: None for key in json.loads(response.text)}
return kidDict
def download_certificates(kid_dict):
response = requests.get(constants.ACC_CERT_LIST)
while constants.X_RESUME_TOKEN in response.headers and response.ok:
kid_dict[response.headers[constants.X_KID]] = response.text
response = requests.get(constants.ACC_CERT_LIST, headers={
constants.X_RESUME_TOKEN: response.headers[constants.X_RESUME_TOKEN]})
return kid_dict
return download_certificates(get_key_id_dict())
def test_plausibility_checks( dccQrCode ):
'''Perform various plausibility checks:
- RAT tests should not have "nm" field
- NAA/PCR tests should not have "ma" field
'''
hcert = dccQrCode.payload[constants.PAYLOAD_HCERT][1]
if 't' in hcert.keys():
assert 'tt' in hcert['t'][0].keys(), 'Test type is not present in TEST-DCC'
if hcert['t'][0]['tt'] == 'LP6464-4':
assert 'ma' not in hcert['t'][0].keys() or hcert['t'][0]['ma'] == '', "PCR/NAA tests should not have a ma-field"
if hcert['t'][0]['tt'] == 'LP217198-3':
assert 'nm' not in hcert['t'][0].keys() or hcert['t'][0]['nm'] == '', "Rapid tests should not have a nm-field"
def test_if_dsc_exists( dccQrCode, pytestconfig ):
"Checks whether the DCC's key is listed on the national backend of the acceptance environment"
if pytestconfig.getoption('no_signature_check'):
pytest.skip('Signature check skipped by request')
certs = certificates_from_environment()
if not dccQrCode.get_key_id_base64() in certs:
pytest.fail("KID exist not on acceptance environment")
def test_tags( dccQrCode ):
"Tests if the decompressed contents of the QR code is a COSE.Sign1Message"
firstByte = dccQrCode.decompressed[0]
if firstByte == 132:
msgType = "List"
elif firstByte == 216:
msgType == "CWT"
elif firstByte == 210:
msgType = "Sign1"
else:
msgType = "unknown"
assert msgType == "Sign1"
def test_algorithm( dccQrCode ):
"Tests if Ps256 or Es256 are used as cryptographic algorithm in the COSE message"
alg = dccQrCode.sign1Message.phdr[Algorithm]
if not alg.__name__ in ['Ps256', 'Es256']:
pytest.fail(f"Wrong Algorithm used: {alg.__name__} Expected: Ps256 or Es256")
if Algorithm in dccQrCode.sign1Message.uhdr:
pytest.fail("Algorithm must be in Protected header")
def test_dcc_type_in_payload( dccQrCode, pytestconfig ):
"""Checks whether the payload has exactly one of v, r or t content
(vaccination, recovery, test certificate)"""
dcc_types_in_payload = [ key for key in dccQrCode.payload[constants.PAYLOAD_HCERT][1].keys() if key in ['v', 'r', 't'] ]
if pytestconfig.getoption('verbose'):
print(dccQrCode.payload)
if not pytestconfig.getoption('allow_multi_dcc') and len(dcc_types_in_payload) > 1:
pytest.fail('DCC contains multiple certificates')
if len(dcc_types_in_payload) < 1:
pytest.fail('No DCC content (v, r, t) found')
for dcc_type in dcc_types_in_payload:
if not dccQrCode.get_file_name().lower().startswith( constants.DCC_TYPES[dcc_type].lower()):
pytest.fail(f'File name "{dccQrCode.get_file_name()}" indicates other DCC type. (DCC contains {constants.DCC_TYPES[dcc_type]})')
def test_payload_version_matches_path_version( dccQrCode ):
"Tests whether the payload has the same version as the file's path indicates"
assert dccQrCode.payload[constants.PAYLOAD_HCERT][1]['ver'] == dccQrCode.get_path_schema_version()
@filecache(DAY)
def get_json_schema(version, extra_eu):
''' Get the json schema depending on the version of the DCC data.
Schema code is obtained from https://raw.githubusercontent.com/ehn-dcc-development/ehn-dcc-schema/
'''
class RewritingLoader:
'''Json schema in ehn-dcc-development has absolute references which don't match with the
base uri of their repo. The RewritingLoader is supposed to search and replace these uris with
working links'''
def __init__(self, rewrites ):
self.rewrites = rewrites
def __call__(self, uri, **kwargs):
response = requests.get(uri, **kwargs)
raw = response.text
for rw_from, rw_to in self.rewrites.items():
raw = raw.replace( rw_from, rw_to )
return json.loads(raw)
# Check if version is three numbers separated by dots
if re.match("^\\d\\.\\d\\.\\d$", version) is None:
raise ValueError(f'{version} is not a valid version string')
# Before v1.2.1, the datatype was called DGC, now DCC
main_file = 'DCC.schema.json' if version >= '1.2.1' else 'DGC.schema.json'
versioned_path = f'{constants.SCHEMA_BASE_URI}{version}/'
# Rewrite the references to id.uvci.eu to the repository above
# Rewrite to not allow additional properties
rewritingLoader = RewritingLoader({'https://id.uvci.eu/' : versioned_path,
"\"properties\"": "\"additionalProperties\": false, \"properties\""} )
rewritingLoaderExtraEU = RewritingLoader({'https://id.uvci.eu/' : versioned_path,
"\"properties\"": "\"additionalProperties\": true, \"properties\""} )
print(f'Loading HCERT schema {version} ...')
try:
schema = jsonref.load_uri(f'{versioned_path}{main_file}', loader=rewritingLoader )
schemaExtraEU = jsonref.load_uri(f'{versioned_path}{main_file}', loader=rewritingLoaderExtraEU )
except:
raise LookupError(f'Could not load schema definition for {version}')
if extra_eu:
return schemaExtraEU
return schema
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_json_schema( dccQrCode ):
"Performs a schema validation against the ehn-dcc-development/ehn-dcc-schema definition"
extra_eu = dccQrCode.get_path_country() not in constants.EU_COUNTRIES
schema = get_json_schema( dccQrCode.payload[constants.PAYLOAD_HCERT][1]['ver'], extra_eu)
jsonschema.validate( dccQrCode.payload[constants.PAYLOAD_HCERT][1], schema )
def test_verify_signature( dccQrCode, pytestconfig ):
"""Verifies the signature of the DCC.
This requires download of the certificates from the acceptance environment"""
if pytestconfig.getoption('no_signature_check'):
pytest.skip('Signature check skipped by request')
def key_from_cert(cert):
if isinstance(cert.public_key(), ec.EllipticCurvePublicKey):
return CoseKey.from_dict(
{
KpKeyOps: [VerifyOp],
KpKty: KtyEC2,
EC2KpCurve: P256,
KpAlg: Es256, # ECDSA using P-256 and SHA-256
EC2KpX: int_to_bytes(cert.public_key().public_numbers().x),
EC2KpY: int_to_bytes(cert.public_key().public_numbers().y),
}
)
elif isinstance(cert.public_key(), rsa.RSAPublicKey):
return CoseKey.from_dict(
{
KpKeyOps: [VerifyOp],
KpKty: KtyRSA,
KpAlg: Ps256, # RSASSA-PSS using SHA-256 and MGF1 with SHA-256
RSAKpE: int_to_bytes(cert.public_key().public_numbers().e),
RSAKpN: int_to_bytes(cert.public_key().public_numbers().n),
}
)
else:
raise ValueError(f'Unsupported certificate agorithm: {cert.signature_algorithm_oid} for verification.')
certs = certificates_from_environment()
cert_base64 = certs[dccQrCode.get_key_id_base64()]
cert = x509.load_pem_x509_certificate(
f'-----BEGIN CERTIFICATE-----\n{cert_base64}\n-----END CERTIFICATE-----'.encode(), OpenSSLBackend)
extensions = { extension.oid._name:extension for extension in cert.extensions}
if pytestconfig.getoption('verbose'):
if 'extendedKeyUsage' in extensions.keys():
allowed_usages = [oid.dotted_string for oid in extensions['extendedKeyUsage'].value._usages]
else:
allowed_usages = 'ANY'
print(f'\nCert: {cert_base64}\nAllowed Cert Usages: {allowed_usages}\nKeyID: {dccQrCode.get_key_id_base64()}')
key = key_from_cert( cert )
fingerprint = cert.fingerprint(SHA256())
assert dccQrCode.get_key_id_base64() == base64.b64encode(fingerprint[0:8]).decode("ascii")
dccQrCode.sign1Message.key = key_from_cert(cert)
if not dccQrCode.sign1Message.verify_signature():
pytest.fail(f"Signature could not be verified with signing certificate {cert_base64}")
if 'extendedKeyUsage' in extensions.keys():
allowed_usages = [oid.dotted_string for oid in extensions['extendedKeyUsage'].value._usages]
if len( set(constants.EXTENDED_KEY_USAGE_OIDs.values()) & set(allowed_usages) ) > 0: # Only check if at least one known OID is used in DSC
for cert_type in constants.DCC_TYPES.keys():
if cert_type in dccQrCode.payload[constants.PAYLOAD_HCERT][1].keys():
# There are 2 versions of extended key usage OIDs in circulation. We simply logged them as upper and lower case
# types, but they actually mean the same. So we treat t == T, v == V and r == R
if constants.EXTENDED_KEY_USAGE_OIDs[cert_type] not in allowed_usages \
and constants.EXTENDED_KEY_USAGE_OIDs[cert_type.upper()] not in allowed_usages:
pytest.fail(f"DCC is of type {constants.DCC_TYPES[cert_type]}, DSC allows {allowed_usages} "+\
f"but not {constants.EXTENDED_KEY_USAGE_OIDs[cert_type]} or {constants.EXTENDED_KEY_USAGE_OIDs[cert_type.upper()]}")
def test_country_in_path_matches_issuer( dccQrCode ):
'Checks whether the country code in the path matches the issuer country'
if dccQrCode.get_path_country() in ['EL', 'GR'] and dccQrCode.payload[constants.PAYLOAD_ISSUER] in ['EL','GR']:
pass # EL and GR are interchangeable
else:
assert dccQrCode.get_path_country() == dccQrCode.payload[constants.PAYLOAD_ISSUER]
def test_country_code_formats( dccQrCode ):
'Checks that country codes are 2 upper case alphabetical characters'
try:
country_code = dccQrCode.payload[constants.PAYLOAD_ISSUER]
assert len(country_code) == 2
assert country_code.isalpha()
assert country_code == country_code.upper()
for cert_type in constants.DCC_TYPES.keys():
if cert_type in dccQrCode.payload[constants.PAYLOAD_HCERT][1].keys():
for inner_cert in dccQrCode.payload[constants.PAYLOAD_HCERT][1][cert_type]:
country_code = inner_cert['co']
assert len(country_code) == 2
assert country_code.isalpha()
assert country_code == country_code.upper()
except AssertionError:
raise ValueError(f'Invalid country code: {country_code}')
def test_claim_dates( dccQrCode, pytestconfig ):
'Performs some plausibility checks against date related claims'
assert dccQrCode.payload[constants.PAYLOAD_ISSUE_DATE] < dccQrCode.payload[constants.PAYLOAD_EXPIRY_DATE]
assert datetime.fromtimestamp(dccQrCode.payload[constants.PAYLOAD_ISSUE_DATE]).year >= 2021
if 'r' in dccQrCode.payload[constants.PAYLOAD_HCERT][1].keys() and pytestconfig.getoption('warn_timedelta') :
expiry_from_claim = datetime.fromtimestamp(dccQrCode.payload[constants.PAYLOAD_EXPIRY_DATE])
expiry_from_payload = datetime.fromisoformat(dccQrCode.payload[constants.PAYLOAD_HCERT][1]['r'][0]['du'])
if abs(expiry_from_claim - expiry_from_payload).days > 14:
warnings.warn('Expiry dates in payload and envelope differ more than 14 days:\n'+
f'Claim key 4: {expiry_from_claim.isoformat()}\n'+
f'Payload: {expiry_from_payload.isoformat()}')
def test_valuesets( dccQrCode ):
"Test if the only entries from valuesets are used for corresponding fields"
def test_field( data, field_name, valueset_name ):
valuesets = valuesets_from_environment()
if not data[field_name] in valuesets[valueset_name].keys():
pytest.fail(f'"{data[field_name]}" is not a valid value for {field_name} ({valueset_name})')
hCert = dccQrCode.payload[constants.PAYLOAD_HCERT][1]
if 'v' in hCert.keys():
test_field( hCert['v'][0], 'vp','sct-vaccines-covid-19' )
test_field( hCert['v'][0], 'ma','vaccines-covid-19-auth-holders' )
test_field( hCert['v'][0], 'mp','vaccines-covid-19-names' )
test_field( hCert['v'][0], 'tg','disease-agent-targeted' )
elif 't' in dccQrCode.payload[constants.PAYLOAD_HCERT][1].keys():
test_field( hCert['t'][0], 'tr','covid-19-lab-result' )
if 'ma' in hCert['t'][0].keys(): # Only rapid tests have these
test_field( hCert['t'][0], 'ma','covid-19-lab-test-manufacturer-and-name' )
test_field( hCert['t'][0], 'tt','covid-19-lab-test-type' )
test_field( hCert['t'][0], 'tg','disease-agent-targeted' )
elif 'r' in dccQrCode.payload[constants.PAYLOAD_HCERT][1].keys():
test_field( hCert['r'][0], 'tg','disease-agent-targeted' )
| 45.251397 | 152 | 0.678704 |
aceae3e887f1bd2aea085611386046de83730931 | 1,275 | py | Python | setup.py | syndicate-storage/syndicate-dataset-manager | f15a9f30b276385f6325c53f405a1fb3c73ee60b | [
"Apache-2.0"
] | null | null | null | setup.py | syndicate-storage/syndicate-dataset-manager | f15a9f30b276385f6325c53f405a1fb3c73ee60b | [
"Apache-2.0"
] | null | null | null | setup.py | syndicate-storage/syndicate-dataset-manager | f15a9f30b276385f6325c53f405a1fb3c73ee60b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Copyright 2016 The Trustees of University of Arizona
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import setup
packages = [
'sdm'
]
# dependencies
dependencies = [
'prettytable',
'psutil',
'grequests'
]
setup(
name='sdm',
version='0.1',
description='Syndicate Dataset Manager',
url='https://github.com/syndicate-storage/syndicate-dataset-manager',
author='Illyoung Choi',
author_email='syndicate@lists.cs.princeton.edu',
license='Apache 2.0',
packages=packages,
package_dir={
'sdm': 'src/sdm'
},
entry_points={
'console_scripts': [
'sdm = sdm.sdm:main'
]
},
install_requires=dependencies,
zip_safe=False
)
| 24.519231 | 75 | 0.676863 |
aceae3fc0edad75ad15ea7df2234e1ddd62cbb39 | 1,437 | py | Python | test/multiapi/Expected/AcceptanceTests/MultiapiCustomBaseUrl/setup.py | qwordy/autorest.python | 6b12df51c2a39a1285546b5a771b69f5896e794f | [
"MIT"
] | 35 | 2018-04-03T12:15:53.000Z | 2022-03-11T14:03:34.000Z | test/multiapi/Expected/AcceptanceTests/MultiapiCustomBaseUrl/setup.py | qwordy/autorest.python | 6b12df51c2a39a1285546b5a771b69f5896e794f | [
"MIT"
] | 652 | 2017-08-28T22:44:41.000Z | 2022-03-31T21:20:31.000Z | test/multiapi/Expected/AcceptanceTests/MultiapiCustomBaseUrl/setup.py | qwordy/autorest.python | 6b12df51c2a39a1285546b5a771b69f5896e794f | [
"MIT"
] | 29 | 2017-08-28T20:57:01.000Z | 2022-03-11T14:03:38.000Z | # coding=utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "multiapicustombaseurl"
VERSION = "0.1.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.6.0", "azure-core<2.0.0,>=1.2.0"]
setup(
name=NAME,
version=VERSION,
description="multiapi custom base url",
author_email="",
url="",
keywords=["Swagger", "multiapicustombaseurl"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
"""
)
| 33.418605 | 92 | 0.647182 |
aceae4107bcde8b7f42f0d5c3f9cfa8f7f33f023 | 114 | py | Python | purbeurre_project/settings/__init__.py | etiennody/purbeurre-v2 | cee10b5ad3ccee6535f197070cd4ee80f2bad5d0 | [
"MIT"
] | null | null | null | purbeurre_project/settings/__init__.py | etiennody/purbeurre-v2 | cee10b5ad3ccee6535f197070cd4ee80f2bad5d0 | [
"MIT"
] | 3 | 2020-10-12T13:58:38.000Z | 2020-11-12T01:02:14.000Z | purbeurre_project/settings/__init__.py | etiennody/purbeurre-v2 | cee10b5ad3ccee6535f197070cd4ee80f2bad5d0 | [
"MIT"
] | 1 | 2021-02-03T18:49:31.000Z | 2021-02-03T18:49:31.000Z | import os
ENV = os.getenv('ENV', 'local')
if ENV == "local":
from .dev import *
else:
from .prod import * | 16.285714 | 31 | 0.596491 |
aceae45b50b99b54a63f886acc068b6e6e40f0cb | 13,666 | py | Python | tensorboard/plugins/histogram/summary_test.py | kewlcoder/tensorboard | b2c342bbca9276ac560004d3cff70b0a1f1a8437 | [
"Apache-2.0"
] | 6,139 | 2017-06-13T02:43:20.000Z | 2022-03-30T23:56:28.000Z | tensorboard/plugins/histogram/summary_test.py | kewlcoder/tensorboard | b2c342bbca9276ac560004d3cff70b0a1f1a8437 | [
"Apache-2.0"
] | 3,239 | 2017-06-14T15:49:50.000Z | 2022-03-31T23:43:15.000Z | tensorboard/plugins/histogram/summary_test.py | kewlcoder/tensorboard | b2c342bbca9276ac560004d3cff70b0a1f1a8437 | [
"Apache-2.0"
] | 1,882 | 2017-06-15T01:33:54.000Z | 2022-03-30T15:27:25.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the histogram plugin summary generation functions."""
import glob
import os
import numpy as np
import tensorflow as tf
from tensorboard.compat import tf2
from tensorboard.compat.proto import summary_pb2
from tensorboard.plugins.histogram import metadata
from tensorboard.plugins.histogram import summary
from tensorboard.util import tensor_util
try:
tf2.__version__ # Force lazy import to resolve
except ImportError:
tf2 = None
try:
tf.compat.v1.enable_eager_execution()
except AttributeError:
# TF 2.0 doesn't have this symbol because eager is the default.
pass
class SummaryBaseTest(object):
def setUp(self):
super(SummaryBaseTest, self).setUp()
np.random.seed(0)
self.gaussian = np.random.normal(size=[100])
def histogram(self, *args, **kwargs):
raise NotImplementedError()
def test_metadata(self):
pb = self.histogram("h", [], description="foo")
self.assertEqual(len(pb.value), 1)
summary_metadata = pb.value[0].metadata
self.assertEqual(summary_metadata.summary_description, "foo")
plugin_data = summary_metadata.plugin_data
self.assertEqual(plugin_data.plugin_name, metadata.PLUGIN_NAME)
parsed = metadata.parse_plugin_metadata(plugin_data.content)
self.assertEqual(metadata.PROTO_VERSION, parsed.version)
def test_empty_input(self):
pb = self.histogram("empty", [])
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
np.testing.assert_allclose(buckets, np.array([]).reshape((0, 3)))
def test_empty_input_of_high_rank(self):
pb = self.histogram("empty_but_fancy", [[[], []], [[], []]])
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
np.testing.assert_allclose(buckets, np.array([]).reshape((0, 3)))
def test_singleton_input(self):
pb = self.histogram("twelve", [12])
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
np.testing.assert_allclose(buckets, np.array([[11.5, 12.5, 1]]))
def test_input_with_all_same_values(self):
pb = self.histogram("twelven", [12, 12, 12])
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
np.testing.assert_allclose(buckets, np.array([[11.5, 12.5, 3]]))
def test_fixed_input(self):
pass # TODO: test a small fixed input
def test_normal_distribution_input(self):
bucket_count = 44
pb = self.histogram(
"normal", data=self.gaussian.reshape((5, -1)), buckets=bucket_count
)
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
self.assertEqual(buckets[:, 0].min(), self.gaussian.min())
# Assert near, not equal, since TF's linspace op introduces floating point
# error in the upper bound of the result.
self.assertNear(buckets[:, 1].max(), self.gaussian.max(), 1.0 ** -10)
self.assertEqual(buckets[:, 2].sum(), self.gaussian.size)
np.testing.assert_allclose(buckets[1:, 0], buckets[:-1, 1])
def test_when_shape_not_statically_known(self):
self.skipTest("TODO: figure out how to test this")
placeholder = tf.compat.v1.placeholder(tf.float64, shape=None)
reshaped = self.gaussian.reshape((25, -1))
self.histogram(
data=reshaped,
data_tensor=placeholder,
feed_dict={placeholder: reshaped},
)
# The proto-equality check is all we need.
def test_when_bucket_count_not_statically_known(self):
self.skipTest("TODO: figure out how to test this")
placeholder = tf.compat.v1.placeholder(tf.int32, shape=())
bucket_count = 44
pb = self.histogram(
bucket_count=bucket_count,
bucket_count_tensor=placeholder,
feed_dict={placeholder: bucket_count},
)
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
self.assertEqual(buckets.shape, (bucket_count, 3))
def test_with_large_counts(self):
# Check for accumulating floating point errors with large counts (> 2^24).
# See https://github.com/tensorflow/tensorflow/issues/51419 for details.
large_count = 20_000_000
data = [0] + [1] * large_count
pb = self.histogram("large_count", data=data, buckets=2)
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
self.assertEqual(buckets[0][2], 1)
self.assertEqual(buckets[1][2], large_count)
class SummaryV1PbTest(SummaryBaseTest, tf.test.TestCase):
def histogram(self, *args, **kwargs):
# Map new name to the old name.
if "buckets" in kwargs:
kwargs["bucket_count"] = kwargs.pop("buckets")
return summary.pb(*args, **kwargs)
def test_tag(self):
self.assertEqual(
"a/histogram_summary", self.histogram("a", []).value[0].tag
)
self.assertEqual(
"a/b/histogram_summary", self.histogram("a/b", []).value[0].tag
)
class SummaryV1OpTest(SummaryBaseTest, tf.test.TestCase):
def histogram(self, *args, **kwargs):
# Map new name to the old name.
if "buckets" in kwargs:
kwargs["bucket_count"] = kwargs.pop("buckets")
return summary_pb2.Summary.FromString(
summary.op(*args, **kwargs).numpy()
)
def test_tag(self):
self.assertEqual(
"a/histogram_summary", self.histogram("a", []).value[0].tag
)
self.assertEqual(
"a/b/histogram_summary", self.histogram("a/b", []).value[0].tag
)
def test_scoped_tag(self):
with tf.name_scope("scope"):
self.assertEqual(
"scope/a/histogram_summary",
self.histogram("a", []).value[0].tag,
)
class SummaryV2PbTest(SummaryBaseTest, tf.test.TestCase):
def histogram(self, *args, **kwargs):
return summary.histogram_pb(*args, **kwargs)
class SummaryV2OpTest(SummaryBaseTest, tf.test.TestCase):
def setUp(self):
super(SummaryV2OpTest, self).setUp()
if tf2 is None:
self.skipTest("v2 summary API not available")
def histogram(self, *args, **kwargs):
return self.histogram_event(*args, **kwargs).summary
def histogram_event(self, *args, **kwargs):
self.write_histogram_event(*args, **kwargs)
event_files = sorted(glob.glob(os.path.join(self.get_temp_dir(), "*")))
self.assertEqual(len(event_files), 1)
events = list(tf.compat.v1.train.summary_iterator(event_files[0]))
# Expect a boilerplate event for the file_version, then the summary one.
self.assertEqual(len(events), 2)
# Delete the event file to reset to an empty directory for later calls.
# TODO(nickfelt): use a unique subdirectory per writer instead.
os.remove(event_files[0])
return events[1]
def write_histogram_event(self, *args, **kwargs):
kwargs.setdefault("step", 1)
writer = tf2.summary.create_file_writer(self.get_temp_dir())
with writer.as_default():
self.call_histogram_op(*args, **kwargs)
writer.close()
def call_histogram_op(self, *args, **kwargs):
summary.histogram(*args, **kwargs)
def test_scoped_tag(self):
with tf.name_scope("scope"):
self.assertEqual("scope/a", self.histogram("a", []).value[0].tag)
def test_scoped_tag_empty_scope(self):
with tf.name_scope(""):
self.assertEqual("a", self.histogram("a", []).value[0].tag)
def test_step(self):
event = self.histogram_event("a", [], step=333)
self.assertEqual(333, event.step)
def test_default_step(self):
try:
tf2.summary.experimental.set_step(333)
# TODO(nickfelt): change test logic so we can just omit `step` entirely.
event = self.histogram_event("a", [], step=None)
self.assertEqual(333, event.step)
finally:
# Reset to default state for other tests.
tf2.summary.experimental.set_step(None)
class SummaryV2OpGraphTest(SummaryV2OpTest, tf.test.TestCase):
def write_histogram_event(self, *args, **kwargs):
kwargs.setdefault("step", 1)
# Hack to extract current scope since there's no direct API for it.
with tf.name_scope("_") as temp_scope:
scope = temp_scope.rstrip("/_")
@tf2.function
def graph_fn():
# Recreate the active scope inside the defun since it won't propagate.
with tf.name_scope(scope):
self.call_histogram_op(*args, **kwargs)
writer = tf2.summary.create_file_writer(self.get_temp_dir())
with writer.as_default():
graph_fn()
writer.close()
def test_no_gradient_error_xla(self):
@tf2.function(jit_compile=True)
def graph_fn():
x = tf.constant(1.0)
with tf2.GradientTape() as tape1:
with tf2.GradientTape() as tape2:
tape1.watch(x)
tape2.watch(x)
self.call_histogram_op(
name="loss", step=0, data=x, buckets=10
)
# Note that XLA CPU/GPU has no outside compilation support, so summaries
# won't actually run in a jit_compiled function. TPUs do, and follow
# some similar codepaths, so this test stops at graph building to
# exercise those paths without a TPU available.
writer = tf2.summary.create_file_writer(self.get_temp_dir())
with writer.as_default():
graph_fn.get_concrete_function()
class SummaryV3OpTest(SummaryV2OpTest, tf.test.TestCase):
def call_histogram_op(self, *args, **kwargs):
summary.histogram_v3(*args, **kwargs)
def test_singleton_input(self):
pb = self.histogram("twelve", [12])
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
# By default there will be 30 buckets.
expected_buckets = np.array(
[[12, 12, 0] for _ in range(29)] + [[12, 12, 1]]
)
np.testing.assert_allclose(buckets, expected_buckets)
def test_input_with_all_same_values(self):
pb = self.histogram("twelven", [12, 12, 12])
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
# By default there will be 30 buckets.
expected_buckets = np.array(
[[12, 12, 0] for _ in range(29)] + [[12, 12, 3]]
)
np.testing.assert_allclose(buckets, expected_buckets)
def test_empty_input(self):
pb = self.histogram("empty", [])
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
# By default there will be 30 buckets.
np.testing.assert_allclose(buckets, np.zeros((30, 3)))
def test_empty_input_of_high_rank(self):
pb = self.histogram("empty_but_fancy", [[[], []], [[], []]])
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
# By default there will be 30 buckets.
np.testing.assert_allclose(buckets, np.zeros((30, 3)))
def test_zero_bucket_count(self):
pb = self.histogram("zero_bucket_count", [1, 1, 1], buckets=0)
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
np.testing.assert_array_equal(buckets, np.array([]).reshape((0, 3)))
class SummaryV3OpGraphTest(SummaryV3OpTest, tf.test.TestCase):
def write_histogram_event(self, *args, **kwargs):
kwargs.setdefault("step", 1)
# Hack to extract current scope since there's no direct API for it.
with tf.name_scope("_") as temp_scope:
scope = temp_scope.rstrip("/_")
@tf2.function
def graph_fn():
# Recreate the active scope inside the defun since it won't propagate.
with tf.name_scope(scope):
self.call_histogram_op(*args, **kwargs)
writer = tf2.summary.create_file_writer(self.get_temp_dir())
with writer.as_default():
graph_fn()
writer.close()
def test_no_gradient_error_xla(self):
@tf2.function(jit_compile=True)
def graph_fn():
x = tf.constant(1.0)
with tf2.GradientTape() as tape1:
with tf2.GradientTape() as tape2:
tape1.watch(x)
tape2.watch(x)
self.call_histogram_op(
name="loss", step=0, data=x, buckets=10
)
# Note that XLA CPU/GPU has no outside compilation support, so summaries
# won't actually run in a jit_compiled function. TPUs do, and follow
# some similar codepaths, so this test stops at graph building to
# exercise those paths without a TPU available.
writer = tf2.summary.create_file_writer(self.get_temp_dir())
with writer.as_default():
graph_fn.get_concrete_function()
if __name__ == "__main__":
tf.test.main()
| 38.713881 | 84 | 0.631641 |
aceae499a79cd8cb8b2f1397539f5f250071e4da | 4,445 | py | Python | custom_components/unifiprotect/light.py | marcusrugger/unifiprotect | 2e395dd500de56131d42cd9b669a8f330684ffc9 | [
"MIT"
] | null | null | null | custom_components/unifiprotect/light.py | marcusrugger/unifiprotect | 2e395dd500de56131d42cd9b669a8f330684ffc9 | [
"MIT"
] | null | null | null | custom_components/unifiprotect/light.py | marcusrugger/unifiprotect | 2e395dd500de56131d42cd9b669a8f330684ffc9 | [
"MIT"
] | null | null | null | """This component provides Lights for Unifi Protect."""
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
from .const import (
ATTR_DEVICE_MODEL,
ATTR_ONLINE,
ATTR_UP_SINCE,
DEFAULT_ATTRIBUTION,
DEVICE_TYPE_LIGHT,
DOMAIN,
LIGHT_SETTINGS_SCHEMA,
SERVICE_LIGHT_SETTINGS,
)
from .entity import UnifiProtectEntity
_LOGGER = logging.getLogger(__name__)
ON_STATE = True
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up lights for UniFi Protect integration."""
entry_data = hass.data[DOMAIN][entry.entry_id]
upv_object = entry_data["upv"]
protect_data = entry_data["protect_data"]
server_info = entry_data["server_info"]
if not protect_data.data:
return
lights = []
for light_id in protect_data.data:
if protect_data.data[light_id].get("type") == DEVICE_TYPE_LIGHT:
lights.append(
UnifiProtectLight(
upv_object,
protect_data,
server_info,
light_id,
)
)
if not lights:
# No lights found
return
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_LIGHT_SETTINGS, LIGHT_SETTINGS_SCHEMA, "async_light_settings"
)
async_add_entities(lights)
def unifi_brightness_to_hass(value):
"""Convert unifi brightness 1..6 to hass format 0..255."""
return min(255, round((value / 6) * 255))
def hass_to_unifi_brightness(value):
"""Convert hass brightness 0..255 to unifi 1..6 scale."""
return max(1, round((value / 255) * 6))
class UnifiProtectLight(UnifiProtectEntity, LightEntity):
"""A Ubiquiti Unifi Protect Light Entity."""
def __init__(self, upv_object, protect_data, server_info, light_id):
"""Initialize an Unifi light."""
super().__init__(upv_object, protect_data, server_info, light_id, None)
self._name = self._device_data["name"]
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""If the light is currently on or off."""
return self._device_data["is_on"] == ON_STATE
@property
def icon(self):
"""Return the Icon for this light."""
return "mdi:spotlight-beam"
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return unifi_brightness_to_hass(self._device_data["brightness"])
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
hass_brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
unifi_brightness = hass_to_unifi_brightness(hass_brightness)
_LOGGER.debug("Turning on light with brightness %s", unifi_brightness)
await self.upv_object.set_light_on_off(self._device_id, True, unifi_brightness)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
_LOGGER.debug("Turning off light")
await self.upv_object.set_light_on_off(self._device_id, False)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return {
ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,
ATTR_DEVICE_MODEL: self._model,
ATTR_ONLINE: self._device_data["online"],
ATTR_UP_SINCE: self._device_data["up_since"],
}
async def async_light_settings(self, mode, **kwargs):
"""Adjust Light Settings."""
k_enable_at = kwargs.get("enable_at")
k_duration = kwargs.get("duration")
if k_duration is not None:
k_duration = k_duration * 1000
k_sensitivity = kwargs.get("sensitivity")
await self.upv_object.light_settings(
self._device_id,
mode,
enable_at=k_enable_at,
duration=k_duration,
sensitivity=k_sensitivity,
)
| 29.832215 | 87 | 0.658493 |
aceae4b15c075ec996f859b96230ddda54986875 | 1,591 | py | Python | samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_sync.py | lclc19/python-aiplatform | d8da2e365277441abadb04328943f23345d72b0e | [
"Apache-2.0"
] | 180 | 2020-09-23T17:21:15.000Z | 2022-03-30T17:25:47.000Z | samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_sync.py | lclc19/python-aiplatform | d8da2e365277441abadb04328943f23345d72b0e | [
"Apache-2.0"
] | 601 | 2020-09-23T16:23:44.000Z | 2022-03-31T19:08:23.000Z | samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_sync.py | lclc19/python-aiplatform | d8da2e365277441abadb04328943f23345d72b0e | [
"Apache-2.0"
] | 109 | 2020-09-23T16:22:04.000Z | 2022-03-28T21:18:29.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CancelPipelineJob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelPipelineJob_sync]
from google.cloud import aiplatform_v1beta1
def sample_cancel_pipeline_job():
"""Snippet for cancel_pipeline_job"""
# Create a client
client = aiplatform_v1beta1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelPipelineJobRequest(
name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
)
# Make the request
response = client.cancel_pipeline_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelPipelineJob_sync]
| 34.586957 | 88 | 0.774984 |
aceae504dc4750216dea5dee065dfb175c145f18 | 901 | py | Python | config.py | ucynthy12/zuri-s | 24f4fd1efd8a5659b14378533021085dfca4ac46 | [
"Unlicense"
] | null | null | null | config.py | ucynthy12/zuri-s | 24f4fd1efd8a5659b14378533021085dfca4ac46 | [
"Unlicense"
] | null | null | null | config.py | ucynthy12/zuri-s | 24f4fd1efd8a5659b14378533021085dfca4ac46 | [
"Unlicense"
] | null | null | null | import os
class Config:
'''
Generl configuration parent class
'''
BLOG_BASE_URL= 'http://quotes.stormconsultancy.co.uk/random.json'
SECRET_KEY= os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://cynt:zion@localhost/zuri'
UPLOADED_PHOTOS_DEST ='app/static/photos'
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class DevConfig(Config) :
BLOG_BASE_URL= 'http://quotes.stormconsultancy.co.uk/random.json'
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://cynt:zion@localhost/zuri'
DEBUG = True
config_options = {
'development' : DevConfig,
'production' :ProdConfig
}
| 28.15625 | 78 | 0.719201 |
aceae572395711809b01874d696567630e8583c3 | 861 | py | Python | src/paddle.py | harryedgeworth/pong-pygame | 0b80b1961b2e7e905201f8526bdc77987241981e | [
"MIT"
] | null | null | null | src/paddle.py | harryedgeworth/pong-pygame | 0b80b1961b2e7e905201f8526bdc77987241981e | [
"MIT"
] | null | null | null | src/paddle.py | harryedgeworth/pong-pygame | 0b80b1961b2e7e905201f8526bdc77987241981e | [
"MIT"
] | null | null | null | import pygame
class Paddle(pygame.sprite.Sprite):
def __init__(self, color, x, y, pid):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((20, 140))
self.color = color
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.speed = 0
self.score = 0
self.id = pid
def update(self, ball, player, opponent):
self.rect.y += self.speed
# Stop paddles from leaving the window
if self.rect.top <= 0:
self.rect.top = 0
elif self.rect.bottom >= 600:
self.rect.bottom = 600
# Opponent AI
if self.id == 2:
if self.rect.top < ball.rect.y:
self.rect.top += 7
if self.rect.bottom > ball.rect.y:
self.rect.bottom -= 7
| 28.7 | 46 | 0.536585 |
aceae5c93a02aeb3bfb3040c868228533b94d0ef | 96,605 | py | Python | parser/team06/gramaticaAscendente.py | Otzoy97/tytus | 66e538f8fb26e709461389bff81ee650364b66f7 | [
"MIT"
] | 1 | 2020-12-10T03:52:33.000Z | 2020-12-10T03:52:33.000Z | parser/team06/gramaticaAscendente.py | vkslax/tytus | 6eed4139628ede6fa5cfb46ca7437199db4134c4 | [
"MIT"
] | null | null | null | parser/team06/gramaticaAscendente.py | vkslax/tytus | 6eed4139628ede6fa5cfb46ca7437199db4134c4 | [
"MIT"
] | 1 | 2021-01-05T18:31:17.000Z | 2021-01-05T18:31:17.000Z | import re
from queries import *
from expresiones import *
# -----------------------------------------------------------------------------
# Grupo 6
#
# Universidad de San Carlos de Guatemala
# Facultad de Ingenieria
# Escuela de Ciencias y Sistemas
# Organizacion de Lenguajes y Compiladores 2
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# INICIA ANALIZADOR LEXICO
# -----------------------------------------------------------------------------
#palabras reservadas del lenguaje
reservadas = {
# PALABRAS RESERVADAS POR SQL
'show' : 'SHOW',
'databases' : 'DATABASES',
'database' : 'DATABASE',
'tables' : 'TABLES',
'columns' : 'COLUMNS',
'from' : 'FROM',
'select' : 'SELECT',
'distinct' : 'DISTINCT',
'limit' : 'LIMIT',
'offset' : 'OFFSET',
'of':'OF',
'order' : 'ORDER',
'by' : 'BY',
'where' : 'WHERE',
'and' : 'AND',
'or' : 'OR',
'not' : 'NOT',
'in' : 'IN',
'concat' : 'CONCAT',
'only':'ONLY',
'as' : 'AS',
'upper' : 'UPPER',
'sqrt' : 'SQRT',
'avg' : 'AVG',
'sum' : 'SUM',
'cont' :'CONT',
'desc' : 'DESC',
'asc' : 'ASC',
'like' : 'LIKE',
'min' : 'MIN',
'max' : 'MAX',
'abs' : 'ABS',
'on' : 'ON',
'union' : 'UNION',
'all' : 'ALL',
'insert' : 'INSERT',
'unknown':'UNKNOWN',
'into' : 'INTO',
'values' : 'VALUES',
'update' : 'UPDATE',
'set' : 'SET',
'delete' : 'DELETE',
'create' : 'CREATE',
'primary' : 'PRIMARY',
'key' : 'KEY',
'null' : 'NULL',
'nulls':'NULLS',
'unique' : 'UNIQUE',
'check' : 'CHECK',
'cbrt' : 'CBRT',
'ceil' : 'CEIL',
'ceiling' : 'CEILING',
'degrees' : 'DEGREES',
'div':'DIV',
'exp':'EXP',
'factorial':'FACTORIAL',
'floor':'FLOOR',
'gcd':'GCD',
'lcm':'LCM',
'ln':'LN',
'log':'LOG',
'log10':'LOG10',
#'current':'CURRENT',
'default' : 'DEFAULT',
'auto_increment' : 'AUTO_INCREMENT',
'alter' : 'ALTER',
'table' : 'TABLE',
'add' : 'ADD',
'drop' : 'DROP',
'column' : 'COLUMN',
'rename' : 'RENAME',
'to' : 'TO',
'view' : 'VIEW',
'replace' : 'REPLACE',
'type' : 'TYPE',
'enum' : 'ENUM',
'if' : 'IF',
'exists' : 'EXISTS',
'min_scale':'MIN_SCALE',
'mod':'MOD',
'pi':'PI',
'power':'POWER',
'radians':'RADIANS',
'round':'ROUND',
'scale':'SCALE',
'sign':'SIGN',
'mode' : 'MODE',
'owner' : 'OWNER',
'constraint' : 'CONSTRAINT',
'foreign' : 'FOREIGN',
'references' : 'REFERENCES',
'inherits' : 'INHERITS',
'group' : 'GROUP',
'having' : 'HAVING',
'inner' : 'INNER',
'outer' : 'OUTER',
'trim_scale':'TRIM_SCALE',
'trunc':'TRUNC',
'width_bucket':'WIDTH_BUCKET',
'random':'RANDOM',
'setseed':'SETSEED',
'acos':'ACOS',
'acosd':'ACOSD',
'asin':'ASIN',
'asind':'ASIND',
'atan':'ATAN',
'atan2':'ATAN2',
'cos':'COS',
'cosd':'COSD',
'cot':'COT',
'cotd':'COTD',
'sin':'SIN',
'sind':'SIND',
'tan':'TAN',
'tand':'TAND',
'atand':'ATAND',
'atan2d':'ATAN2D',
'sinh':'SINH',
'cosh':'COSH',
'tanh':'TANH',
'asinh':'ASINH',
'acosh':'ACOSH',
'atanh':'ATANH',
'length':'LENGTH',
'substring':'SUBSTRING',
'trim':'TRIM',
'get_byte':'GET_BYTE',
'md5':'MD5',
'set_byte':'SET_BYTE',
'sha256':'SHA256',
'substr':'SUBSTR',
'convert':'CONVERT',
'encode':'ENCODE',
'decode':'DECODE',
'escape':'ESCAPE',
'any':'ANY',
'some':'SOME',
'using':'USING',
'first':'FIRST',
'last':'LAST',
'current_user':'CURRENT_USER',
'session_user':'SESSION_USER',
'symmetric':'SYMMETRIC',
'izquierda' : 'LEFT',
'derecha' : 'RIGHT',
'full' : 'FULL',
'join' : 'JOIN',
'natural' : 'NATURAL',
'case' : 'CASE',
'when' : 'WHEN',
'then' : 'THEN',
'begin' : 'BEGIN',
'end' : 'END',
'else' : 'ELSE',
'greatest' : 'GREATEST',
'least' : 'LEAST',
'intersect' : 'INTERSECT',
'except' : 'EXCEPT',
# tipos de datos permitidos
'smallint' : 'SMALLINT',
'integer' : 'INTEGER',
'bigint' : 'BIGINT',
'decimal' : 'DECIMAL',
'numeric' : 'NUMERIC',
'real' : 'REAL',
'double' : 'DOUBLE',
'precision' : 'PRECISION',
'money' : 'MONEY',
'varying' : 'VARYING',
'varchar' : 'VARCHAR',
'character' : 'CHARACTER',
'char' : 'CHAR',
'text' : 'TEXT',
'boolean' : 'BOOLEAN',
'timestamp':'TIMESTAMP',
'time':'TIME',
'date':'DATE',
'interval':'INTERVAL',
'year':'YEAR',
'month':'MONTH',
'day':'DAY',
'hour':'HOUR',
'minute':'MINUTE',
'second':'SECOND',
'to':'TO',
'true':'TRUE',
'false':'FALSE',
'declare' : 'DECLARE',
'function' : 'FUNCTION',
'returns' : 'RETURNS',
'returning':'RETURNING',
'between' : 'BETWEEN',
'ilike' : 'ILIKE',
'is':'IS',
'isnull':'ISNULL',
'notnull':'NOTNULL',
#enums
'type':'TYPE',
'ENUM':'ENUM',
#para trim
'leading':'LEADING',
'trailing':'TRAILING',
'both':'BOTH',
'for':'FOR',
'symmetric':'SYMMETRIC',
'use' : 'USE',
'now' : 'NOW',
'extract' : 'EXTRACT',
'date_part' : 'DATE_PART',
'current_date' : 'CURRENT_DATE',
'current_time' : 'CURRENT_TIME'
# revisar funciones de tiempo y fechas
}
# listado de tokens que manejara el lenguaje (solo la forma en la que los llamare en las producciones)
tokens = [
'PUNTOYCOMA',
'MAS',
'MENOS',
'POR',
'DIV',
'DOSPUNTOS',
'PUNTO',
'TYPECAST',
'CORCHETEIZQ',
'CORCHETEDER',
'POTENCIA',
'RESIDUO',
'MAYOR',
'MENOR',
'IGUAL',
'MAYORIGUAL',
'MENORIGUAL',
'DIFERENTE',
'IGUALIGUAL',
'PARENTESISIZQUIERDA',
'PARENTESISDERECHA',
'COMA',
'NOTEQUAL',
'SIMBOLOOR',
'SIMBOLOAND',
'SIMBOLOAND2',
'SIMBOLOOR2',
'NUMERAL',
'COLOCHO',
'DESPLAZAMIENTODERECHA',
'DESPLAZAMIENTOIZQUIERDA',
#tokens que si devuelven valor
'DECIMALTOKEN',
'ENTERO',
'CADENA',
'ETIQUETA',
'ID'
] + list(reservadas.values())
# Tokens y la forma en la que se usaran en el lenguaje
t_PUNTOYCOMA = r';'
t_MAS = r'\+'
t_MENOS = r'-'
t_POR = r'\*'
t_DIV = r'/'
t_DOSPUNTOS = r':'
t_PUNTO = r'\.'
t_TYPECAST = r'::'
t_CORCHETEDER = r']'
t_CORCHETEIZQ = r'\['
t_POTENCIA = r'\^'
t_RESIDUO = r'%'
t_MAYOR = r'<'
t_MENOR = r'>'
t_IGUAL = r'='
t_MAYORIGUAL = r'>='
t_MENORIGUAL = r'<='
t_DIFERENTE = r'<>'
t_IGUALIGUAL = r'=='
t_PARENTESISIZQUIERDA = r'\('
t_PARENTESISDERECHA = r'\)'
t_COMA = r','
t_NOTEQUAL = r'!='
t_SIMBOLOOR = r'\|\|' #esto va a concatenar cadenas
t_SIMBOLOAND = r'&&'
t_SIMBOLOAND2 = r'\&'
t_SIMBOLOOR2 = r'\|'
t_NUMERAL = r'\#' #REVISAR
t_COLOCHO = r'~' #REVISAR
t_DESPLAZAMIENTODERECHA = r'>>'
t_DESPLAZAMIENTOIZQUIERDA = r'<<'
#definife la estructura de los decimales
def t_DECIMAL(t):
r'\d+\.\d+'
try:
t.value = float(t.value)
except ValueError:
print("El valor decimal es muy largo %d", t.value)
t.value = 0
return t
#definife la estructura de los enteros
def t_ENTERO(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("El valor del entero es muy grande %d", t.value)
t.value = 0
return t
#definife la estructura de las cadenas
def t_CADENA(t):
r'[\'|\"].*?[\'|\"]'
t.value = t.value[1:-1] # quito las comillas del inicio y final de la cadena
return t
#definife la estructura de las etiquetas, por el momento las tomo unicamente como letras y numeros
def t_ETIQUETA(t):
r'[a-zA-Z_]+[a-zA-Z0-9_]*'
t.type = reservadas.get(t.value.lower(),'ID') # Check for reserved words
return t
# Comentario simple # ...
def t_COMENTARIO_SIMPLE(t):
r'--.*\n'
t.lexer.lineno += 1
def t_COMENTARIO_MULTILINEA(t):
r'/\*(.|\n|)*?\*/'
t.lexer.lineno += t.value.count("\n")
# ----------------------- Caracteres ignorados -----------------------
# caracter equivalente a un tab
t_ignore = " \t"
#caracter equivalente a salto de linea
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
x=caden.splitlines()
filas=len(x)-1
print("filas que no cambian: ",filas)
if h.filapivote>0:
fila=(t.lineno-1)-h.filapivote*filas
else:
fila=(t.lineno-1)
h.filapivote+=1
print("Caracter lexico no permitido ==> '%s'" % t.value)
h.errores+= "<tr><td>"+str(t.value[0])+"</td><td>"+str(fila)+"</td><td>"+str(find_column(caden,t))+"</td><td>LEXICO</td><td>token no pertenece al lenguaje</td></tr>\n"
t.lexer.skip(1)
# Construyendo el analizador léxico
import ply.lex as lex
lexer = lex.lex()
# -----------------------------------------------------------------------------
# INICIA ANALIZADOR SINTACTICO
# -----------------------------------------------------------------------------
# Asociación de operadores y precedencia
precedence = (
('left','TYPECAST'),
('right','UMINUS'),
('right','UNOT'),
('left','MAS','MENOS'),
('left','POTENCIA'),
('left','POR','DIV','RESIDUO'),
('left','AND','OR','SIMBOLOOR2','SIMBOLOOR','SIMBOLOAND2'),
('left','DESPLAZAMIENTOIZQUIERDA','DESPLAZAMIENTODERECHA'),
)
#IMPORTACION DE CLASES ALTERNAS
import reportes as h
# estructura de mi gramatica
#-----------------------------------------------------INICIO--------------------------------------------------------------------
def p_inicio_1(t) :
'inicio : queries'
h.reporteGramatical1 +="inicio ::= queries \n"
t[0]=t[1]
p=t[0]
h.insertarSimbolos(p)
def p_queries_1(t) :
'queries : queries query'
h.reporteGramatical1 +="queries ::= queries query\n"
t[1].append(t[2])
t[0]=t[1]
def p_queries_2(t) :
'queries : query'
h.reporteGramatical1 +="queries ::= query\n"
t[0]=[t[1]]
#-----------------------------------------------------LISTA DE FUNCIONES--------------------------------------------------------------------
def p_query(t):
'''query : mostrarBD
| crearBD
| alterBD
| dropBD
| useBD
| operacion
| insertinBD
| updateinBD
| deleteinBD
| createTable
| inheritsBD
| dropTable
| alterTable
| variantesAt
| contAdd
| contDrop
| contAlter
| selectData PUNTOYCOMA
| tipos
'''
h.reporteGramatical1 +="query ::= opcion\n"
h.reporteGramatical2 +="t[0]=t[1]\n"
t[0]=t[1]
# derivando cada produccion a cosas como el create, insert, select; funciones como avg, sum, substring irian como otra produccion
#dentro del select (consulta)
# empiezan las producciones de las operaciones finales
#la englobacion de las operaciones
#-----------------------------------------------------CREATE DB--------------------------------------------------------------------
def p_crearBaseDatos_1(t):
'crearBD : CREATE DATABASE ID PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE DATABASE ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabases(t[3])\n"
t[0] = CreateDatabases(t[3])
def p_crearBaseDatos_2(t):
'crearBD : CREATE DATABASE IF NOT EXISTS ID PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE DATABASE IF NOT EXISTS ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = Create_IF_Databases(t[3],t[6])\n"
t[0] = Create_IF_Databases(t[3],t[6])
def p_crear_replace_BaseDatos_1(t):
'crearBD : CREATE OR REPLACE DATABASE ID PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE OR REPLACE DATABASE ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabases(t[5])\n"
t[0] = Create_Replace_Databases(t[3],t[5])
def p_crear_replace_BaseDatos_2(t):
'crearBD : CREATE OR REPLACE DATABASE IF NOT EXISTS ID PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE OR REPLACE DATABASE ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabases(t[5])\n"
t[0] = Create_Replace_IF_Databases(t[3],t[5],t[8])
def p_crear_param_BaseDatos_1(t):
'crearBD : CREATE DATABASE ID parametrosCrearBD PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE DATABASE ID parametrosCrearBD PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabaseswithParameters(t[3],t[4])\n"
t[0] = CreateDatabaseswithParameters(t[3],t[4])
def p_crear_param_BaseDatos_2(t):
'crearBD : CREATE DATABASE IF NOT EXISTS ID parametrosCrearBD PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE DATABASE ID parametrosCrearBD PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabaseswithParameters(t[3],t[4])\n"
t[0] = Create_Databases_IFwithParameters(t[3],t[6],t[7])
def p_crear_replace_param_BaseDatos_1(t):
'crearBD : CREATE OR REPLACE DATABASE ID parametrosCrearBD PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE OR REPLACE DATABASE ID parametrosCrearBD PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabaseswithParameters(t[5],t[6])\n"
t[0] = Create_Replace_DatabaseswithParameters(t[3],t[5],t[6])
def p_crear_replace_param_BaseDatos_2(t):
'crearBD : CREATE OR REPLACE DATABASE IF NOT EXISTS ID parametrosCrearBD PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE OR REPLACE DATABASE ID parametrosCrearBD PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabaseswithParameters(t[5],t[6])\n"
t[0] = Create_Replace_Databases_IFwithParameters(t[3],t[5],t[8],t[9])
def p_parametrosCrearBD_1(t):
'parametrosCrearBD : parametrosCrearBD parametroCrearBD'
h.reporteGramatical1 +="parametrosCrearBD ::= parametrosCrearBD parametroCrearBD\n"
h.reporteGramatical2 +="t[1].append(t[2])\n t[0]=t[1]\n"
t[1].append(t[2])
t[0]=t[1]
def p_parametrosCrearBD_2(t):
'parametrosCrearBD : parametroCrearBD'
h.reporteGramatical1 +="parametrosCrearBD ::= parametroCrearBD\n"
h.reporteGramatical2 +="t[0]=[t[1]]\n"
t[0]=[t[1]]
def p_parametroCrearBD(t):
'''parametroCrearBD : OWNER IGUAL final
| MODE IGUAL final
'''
h.reporteGramatical1 +="parametroCrearBD ::= "+str(t[1])+" IGUAL "+str(t[3])+"\n"
if t[1] == "OWNER":
h.reporteGramatical2 +="t[0]=ExpresionOwner(t[1],t[3])\n"
t[0]=ExpresionOwner(t[1],t[3])
elif t[1] == "MODE":
h.reporteGramatical2 +="t[0]=ExpresionMode(t[1],t[3])\n"
t[0]=ExpresionMode(t[1],t[3])
#-----------------------------------------------------SHOW DB--------------------------------------------------------------------
def p_mostrarBD(t):
'mostrarBD : SHOW DATABASES PUNTOYCOMA'
h.reporteGramatical1 +="mostrarBD ::= SHOW DATABASES PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=ShowDatabases(1)\n"
t[0]=ShowDatabases(1)
def p_usarBaseDatos(t):
'useBD : USE ID PUNTOYCOMA'
h.reporteGramatical1 +="useBD ::= USE ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=UseDatabases(t[2])\n"
t[0]=UseDatabases(t[2])
print("t[0]:",t[0])
#-----------------------------------------------------ALTER BD--------------------------------------------------------------------
def p_alterBD_1(t):
'alterBD : ALTER DATABASE ID RENAME TO ID PUNTOYCOMA'
h.reporteGramatical1 +="alterBD ::= ALTER DATABASE "+str(t[3])+" RENAME TO "+str(t[6])+" PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = AlterDB(t[3],t[6])\n"
t[0] = AlterDB(t[3],t[6])
def p_alterBD_2(t):
'alterBD : ALTER DATABASE ID OWNER TO parametroAlterUser PUNTOYCOMA'
h.reporteGramatical1 +="alterBD ::= ALTER DATABASE "+str(t[3])+" OWNER TO "+str(t[6])+" PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = AlterOwner(t[3],t[4],t[6])\n"
t[0] = AlterOwner(t[3],t[4],t[6])
def p_parametroAlterUser(t):
'''parametroAlterUser : CURRENT_USER
| SESSION_USER
| final
'''
h.reporteGramatical1 +="parametroAlterUser ::= "+str(t[1])+" \n"
h.reporteGramatical2 +="t[0] = t[1]\n"
t[0] = t[1]
#-----------------------------------------------------DROP TABLE-----------------------------------------------------------------
def p_dropTable(t) :
'dropTable : DROP TABLE ID PUNTOYCOMA'
h.reporteGramatical1 +="dropTable ::= DROP TABLE ID PUNTOYCOMA\n"
t[0]=DropTable(t[3])
#-----------------------------------------------------ALTER TABLE-----------------------------------------------------------------
def p_alterTable(t):
'''
alterTable : ALTER TABLE ID variantesAt PUNTOYCOMA
'''
h.reporteGramatical1 +="alterTable ::= ALTER TABLE ID variantesAt PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = AlterTable(t[3],t[4])"
t[0] = AlterTable(t[3],t[4])
#---------------------------------------------------TIPOS------------------------------------------------------------------------
def p_variantesAt(t):
'''
variantesAt : ADD contAdd
| ALTER contAlter
| DROP contDrop
'''
if t[1].upper()=="ADD":
h.reporteGramatical1 +="variantesAt ::= ADD contAdd\n"
h.reporteGramatical2 +="t[0]=VariantesAt(t[1],t[2])"
t[0]=VariantesAt(t[1],t[2])
elif t[1].upper()=="ALTER":
h.reporteGramatical1 +="variantesAt ::= ALTER listaContAlter\n"
h.reporteGramatical2 +="t[0]=VariantesAt(t[1],t[2])"
t[0]=VariantesAt(t[1],t[2])
elif t[1].upper()=="DROP":
h.reporteGramatical1 +="variantesAt ::= DROP contDrop\n"
h.reporteGramatical2 +="t[0]=VariantesAt(t[1],t[2])"
t[0]=VariantesAt(t[1],t[2])
# SE SEPARO LA LISTA PARA PODER MANIPULAR DATOS
def p_listaContAlter(t):
'''
listaContAlter : listaContAlter COMA contAlter
'''
h.reporteGramatical1 +="listaContAlter ::= listaContAlter COMA contAlter\n"
def p_listaContAlter_2(t):
'''
listaContAlter : contAlter
'''
h.reporteGramatical1 +="listaContAlter ::= contAlter\n"
def p_contAlter(t):
'''
contAlter : COLUMN ID SET NOT NULL
| COLUMN ID TYPE tipo
'''
if t[3].upper()=="SET":
h.reporteGramatical1 +="contAlter ::= COLUMN ID SET NOT NULL\n"
h.reporteGramatical2 +="t[0]=contAlter(t[2],t[3],t[4])"
t[0]=contAlter(t[2],t[3],t[4])
elif t[3].upper()=="TYPE":
h.reporteGramatical1 +="contAlter ::= COLUMN ID TYPE tipo\n"
h.reporteGramatical2 +="t[0]=contAlter(t[2],t[3],t[4])"
t[0]=contAlter(t[2],t[3],t[4])
def p_contAdd(t):
'''
contAdd : COLUMN ID tipo
| CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID
| PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA
| CONSTRAINT ID FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA ID PARENTESISDERECHA
| CONSTRAINT ID PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA
| CONSTRAINT ID UNIQUE PARENTESISIZQUIERDA ID PARENTESISDERECHA
'''
if t[1].upper()=="COLUMN":
h.reporteGramatical1 +="contAdd ::= COLUMN ID tipo\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],t[3],t[2],None,None,None,None)"
t[0]=contAdd(t[1],t[3],t[2],None,None,None,None)
elif t[1].upper()=="CHECK":
h.reporteGramatical1 +="contAdd ::= CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],None,None,None,None,None,t[3])"
t[0]=contAdd(t[1],None,None,None,None,None,t[3])
elif t[1].upper()=="FOREIGN":
h.reporteGramatical1 +="contAdd ::= FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],None,t[4],t[7],None,None,None)"
t[0]=contAdd(t[1],None,t[4],t[7],None,None,None)
elif t[1].upper()=="PRIMARY":
h.reporteGramatical1 +="contAdd ::= PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],None,t[4],None,None,None,None)"
t[0]=contAdd(t[1],None,t[4],None,None,None,None)
elif t[1].upper()=="CONSTRAINT":
if t[3].upper()=="PRIMARY":
h.reporteGramatical1 +="contAdd ::= CONSTRAINT ID PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],t[3],t[2],t[6],None,None,None)"
t[0]=contAdd(t[1],t[3],t[2],t[6],None,None,None)
elif t[3].upper()=="FOREIGN":
h.reporteGramatical1 +="contAdd ::= CONSTRAINT ID FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA ID PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],t[3],t[2],t[6],t[9],t[11],None)"
t[0]=contAdd(t[1],t[3],t[2],t[6],t[9],t[11],None)
else:
h.reporteGramatical1 +="contAdd ::= CONSTRAINT ID UNIQUE PARENTESISIZQUIERDA ID PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],None,t[2],None,None,None,t[5])"
t[0]=contAdd(t[1],t[3],t[2],None,None,None,t[5])
def p_contDrop(t):
'''
contDrop : COLUMN ID
| CONSTRAINT ID
| PRIMARY KEY
'''
if t[1].upper()=="COLUMN":
h.reporteGramatical1 +="contDrop ::= COLUMN ID \n"
h.reporteGramatical2 +="t[0]=contDrop(t[1],t[2])"
t[0]=contDrop(t[1],t[2])
elif t[1].upper()=="CONSTRAINT":
h.reporteGramatical1 +="contDrop ::= CONSTRAINT ID\n"
h.reporteGramatical2 +="t[0]=contDrop(t[1],t[2])"
t[0]=contDrop(t[1],t[2])
elif t[1].upper()=="PRIMARY":
h.reporteGramatical1 +="contDrop ::= PRIMARY KEY\n"
h.reporteGramatical2 +="t[0]=contDrop(t[1],None)"
t[0]=contDrop(t[1],None)
# SE SEPARO LA LISTA PARA PODER MANIPULAR DATOS
def p_listaID(t):
'''
listaid : listaid COMA ID
'''
h.reporteGramatical1 +="listaid ::= listaid COMA ID\n"
h.reporteGramatical2 +="t[1].append(t[3])\nt[0]=t[1]\n"
t[1].append(t[3])
t[0]=t[1]
def p_listaID_2(t):
'''
listaid : ID
'''
h.reporteGramatical1 +="listaid ::= ID\n"
h.reporteGramatical2 +="t[0]=[t[1]]"
t[0]=ExpresionIdentificador(t[1])
#-----------------------------------------------------DROP BD--------------------------------------------------------------------
def p_dropBD_1(t):
'dropBD : DROP DATABASE ID PUNTOYCOMA'
h.reporteGramatical1 +="dropBD ::= DROP DATABASE "+str(t[3])+" PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]= DropDB(t[3])\n"
t[0]= DropDB(t[3])
def p_dropBD_2(t):
'dropBD : DROP DATABASE IF EXISTS ID PUNTOYCOMA'
h.reporteGramatical1 +="dropBD ::= DROP DATABASE IF EXISTS "+str(t[5])+" PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]= DropDBIF(t[3],t[5])\n"
t[0]= DropDBIF(t[3],t[5])
#-----------------------------------------------------OPERACIONES Y EXPRESIONES--------------------------------------------------------------------
def p_operacion(t):
'''operacion : operacion MAS operacion
| operacion MENOS operacion
| operacion POR operacion
| operacion DIV operacion
| operacion RESIDUO operacion
| operacion POTENCIA operacion
| operacion AND operacion
| operacion OR operacion
| operacion SIMBOLOOR2 operacion
| operacion SIMBOLOOR operacion
| operacion SIMBOLOAND2 operacion
| operacion DESPLAZAMIENTOIZQUIERDA operacion
| operacion DESPLAZAMIENTODERECHA operacion
| operacion IGUAL operacion
| operacion IGUALIGUAL operacion
| operacion NOTEQUAL operacion
| operacion MAYORIGUAL operacion
| operacion MENORIGUAL operacion
| operacion MAYOR operacion
| operacion MENOR operacion
| operacion DIFERENTE operacion
| PARENTESISIZQUIERDA operacion PARENTESISDERECHA
'''
# --------------------------------------------------------------------------------------------------------------
if t[2]=='+':
h.reporteGramatical1 +="operacion ::= operacion MAS operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MAS)\n"
t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MAS)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='-':
h.reporteGramatical1 +="operacion ::= operacion MENOS operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MENOS)\n"
t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MENOS)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='*':
h.reporteGramatical1 +="operacion ::= operacion POR operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.POR)\n"
t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.POR)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='/':
h.reporteGramatical1 +="operacion ::= operacion DIV operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.DIVIDIDO)\n"
t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.DIVIDIDO)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='%':
h.reporteGramatical1 +="operacion ::= operacion RESIDUO operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MODULO)\n"
t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MODULO)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='^':
print("entra a la potencia")
h.reporteGramatical1 +="operacion ::= operacion POTENCIA operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.POTENCIA)\n"
t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.POTENCIA)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=="AND":
h.reporteGramatical1 +="operacion ::= operacion AND operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND)\n"
t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=="OR":
h.reporteGramatical1 +="operacion ::= operacion OR operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)\n"
t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='|':
h.reporteGramatical1 +="operacion ::= operacion | operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)\n"
t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='||':
h.reporteGramatical1 +="operacion ::= operacion || operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)\n"
t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='&':
h.reporteGramatical1 +="operacion ::= operacion & operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND)\n"
t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='<<':
print(t[2])
h.reporteGramatical1 +="operacion ::= operacion DESPLAZAMIENTOIZQUIERDA operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionBIT(t[1],t[3],OPERACION_BIT.DESPLAZAMIENTO_IZQUIERDA)\n"
t[0]=ExpresionBIT(t[1],t[3],OPERACION_BIT.DESPLAZAMIENTO_IZQUIERDA)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='>>':
h.reporteGramatical1 +="operacion ::= operacion DESPLAZAMIENTODERECHA operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionBIT(t[1],t[3],OPERACION_BIT.DESPLAZAMIENTO_DERECHA)\n"
t[0]=ExpresionBIT(t[1],t[3],OPERACION_BIT.DESPLAZAMIENTO_DERECHA)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='=':
t[0]=operacionDelete(t[1],t[3],t[1])
h.reporteGramatical1 +="operacion ::= operacion IGUAL operacion\n"
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='==':
h.reporteGramatical1 +="operacion ::= operacion IGUALIGUAL operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.IGUAL_IGUAL)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.IGUAL_IGUAL)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='!=':
h.reporteGramatical1 +="operacion ::= operacion NOTEQUAL operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.NO_IGUAL)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.NO_IGUAL)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='>=':
h.reporteGramatical1 +="operacion ::= operacion MAYORIGUAL operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYOR_IGUAL)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYOR_IGUAL)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='<=':
h.reporteGramatical1 +="operacion ::= operacion MENORIGUAL operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENOR_IGUAL)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENOR_IGUAL)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='>':
h.reporteGramatical1 +="operacion ::= operacion MAYOR operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYOR)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYOR)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='<':
h.reporteGramatical1 +="operacion ::= operacion MENOR operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENOR)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENOR)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='<>':
h.reporteGramatical1 +="operacion ::= operacion DIFERENTE operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.DIFERENTE)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.DIFERENTE)
# --------------------------------------------------------------------------------------------------------------
else:
h.reporteGramatical1 +="operacion ::= PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
t[0]=t[2]
# --------------------------------------------------------------------------------------------------------------
def p_operacion_menos_unario(t):
'''operacion : MENOS ENTERO %prec UMINUS
| MENOS DECIMAL %prec UMINUS
'''
h.reporteGramatical1 +="operacion ::= MENOS operacion %prec UMINUS\n"
h.reporteGramatical2 +="t[0]=ExpresionNegativo(t[2])\n"
t[0]=ExpresionNegativo(t[2])
# --------------------------------------------------------------------------------------------------------------
def p_operacion_not_unario(t):
'operacion : NOT operacion %prec UNOT'
h.reporteGramatical1 +="operacion ::= NOT operacion %prec UNOT\n"
h.reporteGramatical2 +="t[0]=ExpresionNOT(t[2])\n"
t[0]=ExpresionNOT(t[2])
# --------------------------------------------------------------------------------------------------------------
def p_operacion_funcion(t):
'operacion : funcionBasica'
h.reporteGramatical1 +="operacion ::= funcionBasica\n"
h.reporteGramatical2 +="t[0]=t[1]\n"
t[0]=t[1]
# --------------------------------------------------------------------------------------------------------------
def p_operacion_final(t):
'operacion : final'
t[0] = t[1]
h.reporteGramatical1 +="operacion ::= final\n"
h.reporteGramatical2 +="t[0]=t[1]\n"
t[0]=t[1]
#-----------------------------------------------------FUNCIONES MATEMATICAS--------------------------------------------------------------------
def p_funcion_basica(t):
'''funcionBasica : ABS PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| CBRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| CEIL PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| CEILING PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| DEGREES PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| DIV PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| EXP PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| FACTORIAL PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| FLOOR PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| GCD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| LCM PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| LN PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| LOG PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| MOD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| PI PARENTESISIZQUIERDA PARENTESISDERECHA
| POWER PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| RADIANS PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ROUND PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SIGN PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SQRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| TRIM_SCALE PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| TRUNC PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| WIDTH_BUCKET PARENTESISIZQUIERDA operacion COMA operacion COMA operacion COMA operacion PARENTESISDERECHA
| RANDOM PARENTESISIZQUIERDA PARENTESISDERECHA
| ACOS PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ACOSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ASIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ASIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ATAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ATAN2 PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| ATAN2D PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| COS PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| COSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| COT PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| COTD PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| TAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| TAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| GREATEST PARENTESISIZQUIERDA select_list PARENTESISDERECHA
| LEAST PARENTESISIZQUIERDA select_list PARENTESISDERECHA
| NOW PARENTESISIZQUIERDA PARENTESISDERECHA
| COSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| TANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ASINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ACOSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ATANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| LENGTH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| TRIM PARENTESISIZQUIERDA opcionTrim operacion FROM operacion PARENTESISDERECHA
| GET_BYTE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| MD5 PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SET_BYTE PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA
| SHA256 PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SUBSTR PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA
| CONVERT PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA
| ENCODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| DECODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| AVG PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SUM PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ID PARENTESISIZQUIERDA opcionTiempo FROM TIMESTAMP operacion PARENTESISDERECHA
| ID PARENTESISIZQUIERDA operacion COMA INTERVAL operacion PARENTESISDERECHA
| ID PARENTESISIZQUIERDA operacion PARENTESISDERECHA
'''
if t[1].upper()=="ABS":
h.reporteGramatical1 +="funcionBasica ::= ABS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionABS(t[3])\n"
t[0]=ExpresionABS(t[3])
elif t[1].upper()=="CBRT":
h.reporteGramatical1 +="funcionBasica ::= CBRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCBRT(t[3])\n"
t[0]=ExpresionCBRT(t[3])
elif t[1].upper()=="CEIL":
h.reporteGramatical1 +="funcionBasica ::= CEIL PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCEIL(t[3])\n"
t[0]=ExpresionCEIL(t[3])
elif t[1].upper()=="CEILING":
h.reporteGramatical1 +="funcionBasica ::= CEILING PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCEILING(t[3])\n"
t[0]=ExpresionCEILING(t[3])
elif t[1].upper()=="DEGREES":
t[0]=ExpresionDEGREES(t[3])
h.reporteGramatical1 +="funcionBasica ::= DEGREES PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionDEGREES(t[3])\n"
elif t[1].upper()=="DIV":
print("entra a DIV++++++++++++")
t[0]=ExpresionDIV(t[3],t[5])
h.reporteGramatical1 +="funcionBasica ::= DIV PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionDIV(t[3],t[5])\n"
elif t[1].upper()=="EXP":
t[0]=ExpresionEXP(t[3])
h.reporteGramatical1 +="funcionBasica ::= EXP PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionEXP(t[3])\n"
elif t[1].upper()=="FACTORIAL":
t[0]=ExpresionFACTORIAL(t[3])
h.reporteGramatical1 +="funcionBasica ::= FACTORIAL PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionFACTORIAL(t[3])\n"
elif t[1].upper()=="FLOOR":
t[0]=ExpresionFLOOR(t[3])
h.reporteGramatical1 +="funcionBasica ::= FLOOR PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionFLOOR(t[3])\n"
elif t[1].upper()=="GCD":
t[0]=ExpresionGCD(t[3],t[5])
h.reporteGramatical1 +="funcionBasica ::= GCD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionGCD(t[3],t[5])\n"
elif t[1].upper()=="LN":
t[0]=ExpresionLN(t[3])
h.reporteGramatical1 +="funcionBasica ::= LN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionLN(t[3])\n"
elif t[1].upper()=="LOG":
t[0]=ExpresionLOG(t[3])
h.reporteGramatical1 +="funcionBasica ::= LOG PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionLOG(t[3])\n"
elif t[1].upper()=="MOD":
t[0]=ExpresionMOD(t[3],t[5])
h.reporteGramatical1 +="funcionBasica ::= MOD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionMOD(t[3],t[5])\n"
elif t[1].upper()=="PI":
t[0]=ExpresionPI(1)
h.reporteGramatical1 +="funcionBasica ::= PI PARENTESISIZQUIERDA PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionPI(1)\n"
elif t[1].upper()=="POWER":
t[0]=ExpresionPOWER(t[3],t[5])
h.reporteGramatical1 +="funcionBasica ::= POWER PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionPOWER(t[3],t[5])\n"
elif t[1].upper()=="RADIANS":
t[0]=ExpresionRADIANS(t[3])
h.reporteGramatical1 +="funcionBasica ::= RADIANS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionRADIANS(t[3])\n"
elif t[1].upper()=="ROUND":
t[0]=ExpresionROUND(t[3])
h.reporteGramatical1 +="funcionBasica ::= ROUND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionROUND(t[3])\n"
elif t[1].upper()=="SIGN":
t[0]=ExpresionSIGN(t[3])
h.reporteGramatical1 +="funcionBasica ::= SIGN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSIGN(t[3])\n"
elif t[1].upper()=="SQRT":
t[0]=ExpresionSQRT(t[3])
h.reporteGramatical1 +="funcionBasica ::= SQRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSQRT(t[3])\n"
elif t[1].upper()=="TRUNC":
t[0]=ExpresionTRUNC(t[3])
h.reporteGramatical1 +="funcionBasica ::= TRUNC PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="[0]=ExpresionTRUNC(t[3])\n"
elif t[1].upper()=="WIDTH_BUCKET":
t[0]=ExpresionWIDTHBUCKET(t[3],t[5],t[7],t[9])
h.reporteGramatical1 +="funcionBasica ::= WIDTH_BUCKET PARENTESISIZQUIERDA operacion COMA operacion COMA operacion COMA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionWIDTHBUCKET(t[3],t[5],t[7],t[9])\n"
elif t[1].upper()=="RANDOM":
t[0]=ExpresionRANDOM(1)
h.reporteGramatical1 +="funcionBasica ::= RANDOM PARENTESISIZQUIERDA PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionRANDOM(1)\n"
elif t[1].upper()=="ACOS":
t[0]=ExpresionACOS(t[3])
h.reporteGramatical1 +="funcionBasica ::= ACOS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionACOS(t[3])\n"
elif t[1].upper()=="ACOSD":
t[0]=ExpresionACOSD(t[3])
h.reporteGramatical1 +="funcionBasica ::= ACOSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionACOSD(t[3])\n"
elif t[1].upper()=="ASIN":
t[0]=ExpresionASIN(t[3])
h.reporteGramatical1 +="funcionBasica ::= ASIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="tt[0]=ExpresionASIN(t[3])\n"
elif t[1].upper()=="ASIND":
t[0]=ExpresionASIND(t[3])
h.reporteGramatical1 +="funcionBasica ::= ASIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionASIND(t[3])\n"
elif t[1].upper()=="ATAN":
t[0]=ExpresionATAN(t[3])
h.reporteGramatical1 +="funcionBasica ::= ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionATAN(t[3])\n"
elif t[1].upper()=="ATAND":
t[0]=ExpresionATAND(t[3])
h.reporteGramatical1 +="funcionBasica ::= ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionATAND(t[3])\n"
elif t[1].upper()=="ATAN2":
t[0]=ExpresionATAN2(t[3],t[5])
h.reporteGramatical1 +="funcionBasica ::= ATAN2 PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionATAN2(t[3],t[5])\n"
elif t[1].upper()=="ATAN2D":
t[0]=ExpresionATAN2D(t[3],t[5])
h.reporteGramatical1 +="funcionBasica ::= ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionATAN2D(t[3],t[5])\n"
elif t[1].upper()=="COS":
t[0]=ExpresionCOS(t[3])
h.reporteGramatical1 +="funcionBasica ::= COS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCOS(t[3])\n"
elif t[1].upper()=="COSD":
t[0]=ExpresionCOSD(t[3])
h.reporteGramatical1 +="funcionBasica ::= COSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCOSD(t[3])\n"
elif t[1].upper()=="COT":
t[0]=ExpresionCOT(t[3])
h.reporteGramatical1 +="funcionBasica ::= COT PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCOT(t[3])\n"
elif t[1].upper()=="COTD":
t[0]=ExpresionCOTD(t[3])
h.reporteGramatical1 +="funcionBasica ::= COTD PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCOTD(t[3])\n"
elif t[1].upper()=="SIN":
t[0]=ExpresionSIN(t[3])
h.reporteGramatical1 +="funcionBasica ::= SIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSIN(t[3])\n"
elif t[1].upper()=="SIND":
t[0]=ExpresionSIND(t[3])
h.reporteGramatical1 +="funcionBasica ::= SIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSIND(t[3])\n"
elif t[1].upper()=="TAN":
t[0]=ExpresionTAN(t[3])
h.reporteGramatical1 +="funcionBasica ::= TAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionTAN(t[3])\n"
elif t[1].upper()=="TAND":
t[0]=ExpresionTAND(t[3])
h.reporteGramatical1 +="funcionBasica ::= TAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionTAND(t[3])\n"
elif t[1].upper()=="SINH":
t[0]=ExpresionSINH(t[3])
h.reporteGramatical1 +="funcionBasica ::= SINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSINH(t[3])\n"
elif t[1].upper()=="COSH":
t[0]=ExpresionCOSH(t[3])
h.reporteGramatical1 +="funcionBasica ::= COSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCOSH(t[3])\n"
elif t[1].upper()=="TANH":
t[0]=ExpresionTANH(t[3])
h.reporteGramatical1 +="funcionBasica ::= TANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionTANH(t[3])\n"
elif t[1].upper()=="ASINH":
t[0]=ExpresionASINH(t[3])
h.reporteGramatical1 +="funcionBasica ::= ASINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionASINH(t[3])\n"
elif t[1].upper()=="ACOSH":
t[0]=ExpresionACOSH(t[3])
h.reporteGramatical1 +="funcionBasica ::= ACOSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionACOSH(t[3])\n"
elif t[1].upper()=="ATANH":
t[0]=ExpresionATANH(t[3])
h.reporteGramatical1 +="funcionBasica ::= ATANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionATANH(t[3])\n"
elif t[1].upper()=="GREATEST":
t[0]=ExpresionGREATEST(t[3])
h.reporteGramatical1 +="funcionBasica ::= GREATEST PARENTESISIZQUIERDA select_list PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionGREATEST(t[3])\n"
elif t[1].upper()=="LEAST":
t[0]=ExpresionLEAST(t[3])
h.reporteGramatical1 +="funcionBasica ::= LEAST PARENTESISIZQUIERDA select_list PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionLEAST(t[3])\n"
elif t[1].upper()=="NOW":
t[0]=ExpresionNOW(1)
h.reporteGramatical1 +="funcionBasica ::= NOW PARENTESISIZQUIERDA PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionNOW(1)\n"
elif t[1].upper()=="LENGTH":
h.reporteGramatical1 +="funcionBasica ::= LENGTH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionLENGTH(t[3])\n"
t[0]=ExpresionLENGTH(t[3])
elif t[1].upper()=="TRIM":
h.reporteGramatical1 +="funcionBasica ::= TRIM PARENTESISIZQUIERDA opcionTrim operacion FROM operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionTRIM(t[3],t[4],t[6])\n"
t[0]=ExpresionTRIM(t[3],t[4],t[6])
elif t[1].upper()=="GET_BYTE":
h.reporteGramatical1 +="funcionBasica ::= GET_BYTE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="MD5":
h.reporteGramatical1 +="funcionBasica ::= MD5 PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionMD5(t[3])\n"
t[0]=ExpresionMD5(t[3])
elif t[1].upper()=="SET_BYTE":
h.reporteGramatical1 +="funcionBasica ::= SET_BYTE PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="SHA256":
h.reporteGramatical1 +="funcionBasica ::= SHA256 PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSHA256(t[3])\n"
t[0]=ExpresionSHA256(t[3])
elif t[1].upper()=="SUBSTR":
h.reporteGramatical1 +="funcionBasica ::= SUBSTR PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSUBSTR(t[3],t[5],t[7])\n"
t[0]=ExpresionSUBSTR(t[3],t[5],t[7])
elif t[1].upper()=="CONVERT":
h.reporteGramatical1 +="funcionBasica ::= CONVERT PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="ENCODE":
h.reporteGramatical1 +="funcionBasica ::= ENCODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="DECODE":
h.reporteGramatical1 +="funcionBasica ::= DECODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="AVG":
h.reporteGramatical1 +="funcionBasica ::= AVG PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="SUM":
h.reporteGramatical1 +="funcionBasica ::= SUM PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="EXTRACT":
h.reporteGramatical1 +="funcionBasica ::= EXTRACT PARENTESISIZQUIERDA opcionTiempo FROM TIMESTAMP operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionEXTRACT(t[3],t[6])\n"
t[0]=ExpresionEXTRACT(t[3],t[6])
elif t[1].upper()=="DATE_PART":
h.reporteGramatical1 +="funcionBasica ::= DATE_PART PARENTESISIZQUIERDA operacion COMA INTERVAL operacion PARENTESISDERECHA\n"
elif t[1].upper()=="CURRENT_DATE":
h.reporteGramatical1 +="funcionBasica ::= CURRENT_DATE \n"
h.reporteGramatical2 +="t[0]=ExpresionCurrentDate(1)\n"
t[0]=ExpresionCurrentDate(1)
elif t[1].upper()=="CURRENT_TIME":
h.reporteGramatical1 +="funcionBasica ::= CURRENT_TIME\n"
h.reporteGramatical2 +="t[0]=ExpresionCurrentTime(1)\n"
t[0]=ExpresionCurrentTime(1)
else:
print("no entra a ninguna en funcionBasica")
def p_funcion_basica_1(t):
'funcionBasica : SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion FOR operacion PARENTESISDERECHA'
h.reporteGramatical1 +="funcionBasica ::= SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion FOR operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSUBSTRINGA(t[3],t[5],t[7])\n"
t[0]=ExpresionSUBSTRINGA(t[3],t[5],t[7])
def p_funcion_basica_2(t):
'funcionBasica : SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion PARENTESISDERECHA'
h.reporteGramatical1 +="funcionBasica ::= SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSUBSTRINGB(t[3],t[5])\n"
t[0]=ExpresionSUBSTRINGB(t[3],t[5])
def p_funcion_basica_3(t):
'funcionBasica : SUBSTRING PARENTESISIZQUIERDA operacion FOR operacion PARENTESISDERECHA'
h.reporteGramatical1 +="funcionBasica ::= SUBSTRING PARENTESISIZQUIERDA operacion FOR operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSUBSTRINGC(t[3],t[5])\n"
t[0]=ExpresionSUBSTRINGC(t[3],t[5])
def p_opcionTrim(t):
''' opcionTrim : LEADING
| TRAILING
| BOTH
'''
h.reporteGramatical1 +="opcionTrim ::= "+str(t[1])+"\n"
# falta mandar a las funciones de fechas y dates y todo eso
if t[1].upper()=="LEADING":
h.reporteGramatical1 +="funcioopcionTrimnBasica ::= LEADING\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(1)\n"
t[0]=ExpresionCadenas("1")
elif t[1].upper()=="TRAILING":
h.reporteGramatical1 +="opcionTrim ::= TRAILING\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(2)\n"
t[0]=ExpresionCadenas("2")
elif t[1].upper()=="BOTH":
h.reporteGramatical1 +="opcionTrim ::= BOTH\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(3)\n"
t[0]=ExpresionCadenas("3")
def p_opcionTiempo(t):
'''opcionTiempo : YEAR
| MONTH
| DAY
| HOUR
| MINUTE
| SECOND
'''
if t[1].upper()=="YEAR":
h.reporteGramatical1 +="opcionTiempo ::= YEAR\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(1)\n"
t[0]=ExpresionCadenas("1")
elif t[1].upper()=="MONTH":
h.reporteGramatical1 +="opcionTiempo ::= MONTH\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(2)\n"
t[0]=ExpresionCadenas("2")
elif t[1].upper()=="DAY":
h.reporteGramatical1 +="opcionTiempo ::= DAY\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(3)\n"
t[0]=ExpresionCadenas("3")
elif t[1].upper()=="HOUR":
h.reporteGramatical1 +="opcionTiempo ::= HOUR\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(4)\n"
t[0]=ExpresionCadenas("4")
elif t[1].upper()=="MINUTE":
h.reporteGramatical1 +="opcionTiempo ::= MINUTE\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(5)\n"
t[0]=ExpresionCadenas("5")
elif t[1].upper()=="SECOND":
h.reporteGramatical1 +="opcionTiempo ::= SECOND\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(6)\n"
t[0]=ExpresionCadenas("6")
#-----------------------------------------------------PRODUCCIONES TERMINALES--------------------------------------------------------------------
def p_final(t):
'''final : DECIMAL
| ENTERO'''
h.reporteGramatical1 +="final ::= Numero("+str(t[1])+")\n"
h.reporteGramatical2 +="t[0]=ExpresionNumero(t[1])\n"
t[0]=ExpresionNumero(t[1])
def p_final_id(t):
'final : ID'
t[0] = t[1]
h.reporteGramatical1 +="final ::= ID("+str(t[1])+")\n"
h.reporteGramatical2 +="t[0]=ExpresionIdentificador(t[1])\n"
t[0]=ExpresionIdentificador(t[1])
def p_final_invocacion(t):
'final : ID PUNTO ID'
h.reporteGramatical1 +="final ::= ID("+str(t[1])+") . ID("+str(t[3])+")\n"
h.reporteGramatical2 +="t[0] = ExpresionInvocacion(t[1],t[3])\n"
t[0] = ExpresionLlamame(t[1],t[3])
def p_final_invocacion_2(t):
'final : ID PUNTO POR'
h.reporteGramatical1 +="final ::= ID("+str(t[1])+") . ID("+str(t[3])+")\n"
h.reporteGramatical2 +="t[0] = ExpresionInvocacion(t[1],t[3])\n"
t[0] = ExpresionLlamame(t[1],t[3])
def p_final_cadena(t):
'final : CADENA'
t[0] = t[1]
h.reporteGramatical1 +="final ::= CADENA ("+t[1]+")\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(t[1])\n"
t[0]=ExpresionCadenas(t[1])
#-----------------------------------------------------INSERT BD--------------------------------------------------------------------
def p_insertBD_1(t):
'insertinBD : INSERT INTO ID VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA'
#print(t[3],t[6])
t[0] = InsertinDataBases(t[3],None,t[6])
h.reporteGramatical1 +="insertinBD ::= INSERT INTO ID VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 += "InsertinDabaBases(t[3],t[6])\n"
def p_insertBD_2(t):
'insertinBD : INSERT INTO ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA'
print(t[9])
t[0] = InsertinDataBases(t[3],t[5],t[9])
h.reporteGramatical1 +="insertinBD ::= INSERT INTO ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 += "t[0] = InsertinDataBases(t[3],t[5],t[9])\n"
# SE SEPARO LA LISTA EN 2 METODOS PARA MANEJAR DATOS
def p_listaParam(t):
'''listaParam : listaParam COMA operacion
'''
t[1].append(t[3])
t[0] = t[1]
h.reporteGramatical1 +="insertinBD ::= listaParam COMA operacion\n"
h.reporteGramatical2 +="t[0]=t[1]\n"
def p_listaParam_2(t):
'''listaParam : operacion
'''
t[0] = [t[1]]
h.reporteGramatical1 +="listaParam ::= operacion\n"
h.reporteGramatical2 +="t[0]=[t[1]]\n"
#-----------------------------------------------------UPDATE BD--------------------------------------------------------------------
def p_updateBD(t):
'updateinBD : UPDATE ID SET asignaciones WHERE asignaciones PUNTOYCOMA'
t[0]= UpdateinDataBase(t[2],t[4],t[6])
h.reporteGramatical1 +="updateinBD ::= UPDATE ID SET asignaciones WHERE asignaciones PUNTOYCOMA\n"
h.reporteGramatical1 +="t[0]=UpdateinDabaBase(t[2].t[4],t[6])\n"
# SE SEPARO LA LISTA EN 2 METODOS PARA MANEJAR DATOS
def p_asignaciones(t):
'''asignaciones : asignaciones COMA asigna
'''
t[1].append(t[3])
t[0] = t[1]
h.reporteGramatical1 +="asignaciones ::= asignaciones COMA asigna\n"
h.reporteGramatical2 +="t[0]=t[1]\n"
def p_asignaciones_2(t):
'''asignaciones : asigna
'''
t[0] = [t[1]]
h.reporteGramatical1 +="asignaciones ::= asigna\n"
h.reporteGramatical2 +="t[0]=[t[1]]\n"
def p_asigna(t):
'asigna : ID IGUAL operacion'
t[0] = AsignacioninTable(t[1],t[3])
h.reporteGramatical1 +="asigna ::= ID IGUAL operacion\n"
h.reporteGramatical2 +="t[0]=AsignacioninTable(t[1],t[3])\n"
#-----------------------------------------------------DELETE IN BD--------------------------------------------------------------------
def p_deleteinBD_1(t):
'deleteinBD : DELETE FROM ID PUNTOYCOMA'
t[0] = t[3]
h.reporteGramatical1 +="deleteinBD ::= DELETE FROM ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=t[3]\n"
def p_deleteinBD_2(t):
'deleteinBD : DELETE FROM ID WHERE operacion PUNTOYCOMA'
t[0] = DeleteinDataBases(t[3],t[5])
h.reporteGramatical1 +="deleteinBD ::= DELETE FROM ID WHERE asignaciones PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=DeleteinDataBases(t[3],t[5])\n"
#-----------------------------------------------------CREATE TABLE CON INHERITS-------------------------------------------------------
def p_inheritsBD(t):
'inheritsBD : CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA INHERITS PARENTESISIZQUIERDA ID PARENTESISDERECHA PUNTOYCOMA'
t[0]=InheritsBD(t[3],t[9],t[5])
h.reporteGramatical1 +="inheritsBD ::= CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA INHERITS PARENTESISIZQUIERDA ID PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=InheritsBD(t[3],t[9],t[5])\n"
#-----------------------------------------------------CREATE TABLE--------------------------------------------------------------------
def p_createTable(t):
'createTable : CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA PUNTOYCOMA'
t[0]= CreateTable(t[3],t[5])
h.reporteGramatical1 +="createTable ::= CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 += "t[0]= CreateTable(t[3],t[5])\n"
# --------------------------------------------------------------------------------------------------------------
# SE SEPARO LA LISTA EN 2 METODOS PARA MANEJAR DATOS
def p_creaColumna(t):
'''creaColumnas : creaColumnas COMA Columna
'''
t[1].append(t[3])
t[0] = t[1]
#print(t[0])
h.reporteGramatical1 +="creaColumnas ::= creaColumnas COMA Columna\n"
h.reporteGramatical2 +="t[1]\n"
def p_creaColumna_2(t):
'''creaColumnas : Columna
'''
t[0]=[t[1]]
h.reporteGramatical1 +="createTable ::= Columna\n"
h.reporteGramatical2 +="[t[1]]\n"
# --------------------------------------------------------------------------------------------------------------
#INICIA LAS PRODUCCIONES DE COLUMNAS
def p_columna_1(t):
'Columna : ID tipo'
t[0]=TipoAtributoTable(ColumnasTable(t[1],t[2],None),OPERACION_RESTRICCION_COLUMNA.COLUMNASINRESTRICCION)
h.reporteGramatical1 +="Columna ::= ID tipo\n"
h.reporteGramatical2 +="t[0]=TipoAtributoTable(ColumnasTable(t[1],t[2],None),OPERACION_RESTRICCION_COLUMNA.COLUMNASINRESTRICCION)"
def p_columna_2(t):
'Columna : ID tipo paramOpcional'
t[0]=TipoAtributoTable(ColumnasTable(t[1],t[2],t[3]),OPERACION_RESTRICCION_COLUMNA.COLUMNACONRESTRICCION)
h.reporteGramatical1 +="Columna ::= ID tipo paramOpcional"
h.reporteGramatical2 +="t[0]=TipoAtributoTable(ColumnasTable(t[1],t[2],t[3]),OPERACION_RESTRICCION_COLUMNA.COLUMNACONRESTRICCION)\n"
def p_columna_3(t):
'Columna : UNIQUE PARENTESISIZQUIERDA listaParam PARENTESISDERECHA'
t[0]=TipoAtributoTable(RestriccionUnique(t[3]),OPERACION_RESTRICCION_COLUMNA.UNIQUE_ATRIBUTO)
h.reporteGramatical1 +="Columna : UNIQUE PARENTESISIZQUIERDA listaParam PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=TipoAtributoTable(RestriccionUnique(t[3]),OPERACION_RESTRICCION_COLUMNA.UNIQUE_ATRIBUTO)\n"
def p_columna_4(t):
'''Columna : constraintcheck
'''
t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_CONSTRAINT)
h.reporteGramatical1 +="Columna ::= constraintcheck\n"
h.reporteGramatical2 +="t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_CONSTRAINT)\n"
def p_columna_5(t):
'Columna : checkinColumn'
t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_SIMPLE)
h.reporteGramatical1 +="Columna ::= checkinColumn\n"
h.reporteGramatical2 +="t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_SIMPLE)\n"
def p_columna_6(t):
'Columna : primaryKey'
t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.PRIMARY_KEY)
h.reporteGramatical1 +="Columna ::= primaryKey\n"
h.reporteGramatical2 +="t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.PRIMARY_KEY)\n"
def p_columna_7(t):
'Columna : foreignKey'
t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.FOREIGN_KEY)
h.reporteGramatical1 +="Columna ::= foreingKey\n"
h.reporteGramatical2 += "t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.FOREIGN_KEY)\n"
# --------------------------------------------------------------------------------------------------------------
#INICIA LA LISTA DE RESTRICCIONES OPCIONALES EN LAS COLUMNAS
def p_paramOpcional(t):
'''paramOpcional : paramOpcional paramopc
'''
t[1].append(t[2])
t[0] = t[1]
h.reporteGramatical1 +="paramOpcional ::= paramOpcional paramopc\n"
h.reporteGramatical2 +="t[0]=t[1]\n"
def p_paramOpcional_1(t):
'''paramOpcional : paramopc
'''
t[0] = [t[1]]
h.reporteGramatical1 +="paramOpcional ::= paramopc\n"
h.reporteGramatical2 +="t[0]=[t[1]]\n"
# --------------------------------------------------------------------------------------------------------------
#INICIA LAS RESTRICCIONES EN LAS COLUMNAS
def p_paramopc_1(t):
'''paramopc : DEFAULT final
| NULL
| NOT NULL
| UNIQUE
| PRIMARY KEY
'''
if t[1].upper() == "DEFAULT":
t[0] = TipoRestriccion(RestriccionDefaul(t[2]),OPERACION_RESTRICCION_COLUMNA.DEFAULT)
h.reporteGramatical1 +="paramopc ::= DEFAULT final\n"
h.reporteGramatical2 +="TipoRestriccion(RestriccionDefaul(t[2]),OPERACION_RESTRICCION_COLUMNA.DEFAULT)\n"
elif t[1].upper() == "NULL":
t[0] = TipoRestriccion(RestriccionNull(1),OPERACION_RESTRICCION_COLUMNA.NULL)
h.reporteGramatical1 +="paramopc ::= NULL\n"
h.reporteGramatical2 +="TipoRestriccion(RestriccionNull(1),OPERACION_RESTRICCION_COLUMNA.NULL)\n"
elif t[1].upper() == "NOT":
t[0] = TipoRestriccion(RestriccionNotNull(1),OPERACION_RESTRICCION_COLUMNA.NOT_NULL)
h.reporteGramatical1 +="paramopc ::= NOT NULL\n"
h.reporteGramatical2 +="t[0] = TipoRestriccion(RestriccionNotNull(1),OPERACION_RESTRICCION_COLUMNA.NOT_NULL)\n"
elif t[1].upper() == "UNIQUE":
t[0] = TipoRestriccion(RestriccionUniqueSimple(1),OPERACION_RESTRICCION_COLUMNA.UNIQUE_COLUMNA)
h.reporteGramatical1 +="paramopc ::= UNIQUE\n"
h.reporteGramatical2 +="TipoRestriccion(RestriccionUniqueSimple(1),OPERACION_RESTRICCION_COLUMNA.UNIQUE_COLUMNA)\n"
elif t[1].upper() == "PRIMARY" and t[2].upper()=="KEY":
t[0] = TipoRestriccion(RestriccionPrimaryKeyColumn(1),OPERACION_RESTRICCION_COLUMNA.PRIMARY_KEY)
h.reporteGramatical1 +="paramopc ::= PRIMARY KEY\n"
h.reporteGramatical2 +="TipoRestriccion(RestriccionPrimaryKeyColumn(1),OPERACION_RESTRICCION_COLUMNA.PRIMARY_KEY)\n"
else:
print("FFFFF")
# --------------------------------------------------------------------------------------------------------------
#LLAMADA A LAS RESTRICCION CHECK
def p_paramopc_2(t):
'paramopc : constraintcheck'
t[0] = TipoRestriccion(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_CONSTRAINT)
h.reporteGramatical1 +="paramopc ::= constraintcheck\n"
h.reporteGramatical2 +="t[0] = TipoRestriccion(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_CONSTRAINT)\n"
def p_paramopc_3(t):
'paramopc : checkinColumn'
t[0]=TipoRestriccion(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_SIMPLE)
h.reporteGramatical1 +="paramopc ::= checkinColumn\n"
h.reporteGramatical2 +="t[0]=TipoRestriccion(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_SIMPLE)\n"
# --------------------------------------------------------------------------------------------------------------
#RESTRICCION UNIQUE
def p_paramopc_4(t):
'paramopc : CONSTRAINT ID UNIQUE'
t[0] = TipoRestriccion(RestriccionConstraintUnique(t[2]),OPERACION_RESTRICCION_COLUMNA.UNIQUE_CONSTAINT)
h.reporteGramatical1 +="paramopc ::= CONSTRAINT ID UNIQUE\n"
h.reporteGramatical2 +="t[0] = TipoRestriccion(RestriccionConstraintUnique(t[2]),OPERACION_RESTRICCION_COLUMNA.UNIQUE_CONSTAINT)\n"
# --------------------------------------------------------------------------------------------------------------
#RESTRICION CHECK
def p_checkcolumna(t):
'checkinColumn : CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA'
t[0]=RestriccionCheck(t[3])
h.reporteGramatical1 +="checkinColumn ::= CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=RestriccionCheck(t[3])\n"
def p_constraintcheck(t):
'constraintcheck : CONSTRAINT ID CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA'
t[0]=RestriccionConstraintCheck(t[2],t[5])
h.reporteGramatical1 +="constraintcheck : CONSTRAINT ID CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=RestriccionConstraintCheck(t[2],t[5])\n"
def p_primaryKey(t):
'primaryKey : PRIMARY KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA'
t[0]=RestriccionPrimaryKey(t[4])
h.reporteGramatical1 +="primaryKey ::= PRIMARY KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=RestriccionPrimaryKey(t[4])\n"
def p_foreingkey(t):
'foreignKey : FOREIGN KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA'
t[0]=RestriccionForeingkey(t[7],t[4],t[9])
h.reporteGramatical1 +="foreignKey ::= FOREIGN KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=RestriccionForeingkey(t[7],t[4],t[9])\n"
#-----------------------------------------------------TIPOS DE DATOS--------------------------------------------------------------------
def p_tipo(t):
'''tipo : SMALLINT
| INTEGER
| BIGINT
| DECIMAL
| NUMERIC
| REAL
| DOUBLE PRECISION
| MONEY
| VARCHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA
| CHARACTER VARYING PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA
| CHARACTER PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA
| CHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA
| TEXT
| BOOLEAN
| TIMESTAMP
| TIME
| INTERVAL
| DATE
| YEAR
| MONTH
| DAY
| HOUR
| MINUTE
| SECOND
'''
# --------------------------------------------------------------------------------------------------------------
if t[1].upper()=="SMALLINT":
t[0] = TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="INTEGER":
t[0] = TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="BIGINT":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="DECIMAL":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="NUMERIC":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="REAL":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="DOUBLE":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="MONEY":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="CHARACTER" and t[2].upper()=="VARYING":
t[0]=TipoDatoColumna(t[2],t[4])
h.reporteGramatical1 +="tipo ::= CHARACTER VARYING PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],t[4])\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="VARCHAR":
t[0]=TipoDatoColumna(t[1],t[3])
h.reporteGramatical1 +="tipo ::= VARCHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],t[3])\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="CHARACTER":
t[0]=TipoDatoColumna(t[1],t[3])
h.reporteGramatical1 +="tipo ::= CHARACTER PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],t[3])\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="CHAR":
t[0]=TipoDatoColumna(t[1],t[3])
h.reporteGramatical1 +="tipo ::= CHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],t[3])\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="TEXT":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="BOOLEAN":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="TIMESTAMP":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="TIME":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="INTERVAL":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="DATE":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="YEAR":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="MONT":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="HOUR":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="MINUT":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="SECOND":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
#--------------------------------------------------- SENTENCIA SELECT --------------------------------------------------------------
def p_select(t):
'''selectData : SELECT select_list FROM select_list WHERE search_condition opcionesSelect
| SELECT POR FROM select_list WHERE search_condition opcionesSelect
'''
if t[2]=='*':
h.reporteGramatical1 +="selectData ::= SELECT POR FROM select_list WHERE search_condition opcionesSelect \n"
print("/////////////////// SELECT CON ASTERISCO ////////////////////////")
print("Columnas: ",t[2])
print("Tablas: ",t[4])
print("Where: ",QueryWhere(t[6]))
print("Extras: ",t[7])
t[0]=Select5(t[2],t[4],QueryWhere(t[6]),t[7])
else:
h.reporteGramatical1 +="selectData ::= SELECT select_list FROM select_list WHERE search_condition opcionesSelect \n"
print("/////////////////// SELECT SIN ASTERISCO ////////////////////////")
print("Columnas: ",t[2])
print("Tablas: ",t[4])
print("Where: ",QueryWhere(t[6]))
print("Extras: ",t[7])
t[0]=Select5(t[2],t[4],QueryWhere(t[6]),t[7])
def p_select_1(t):
'''selectData : SELECT select_list FROM select_list WHERE search_condition
| SELECT POR FROM select_list WHERE search_condition
'''
if t[2]=='*':
h.reporteGramatical1 +="selectData ::= SELECT POR FROM select_list WHERE search_condition \n"
h.reporteGramatical2 +="t[0]=Select3(t[4],QueryWhere(t[6]))\n"
print("entra al select con where y asterisco/////////////////")
t[0]=Select3(t[4],QueryWhere(t[6]))
print("el objeto que sube")
print(t[0])
else:
h.reporteGramatical1 +="selectData ::= SELECT select_list FROM select_list WHERE search_condition \n"
h.reporteGramatical2 +=" t[0]=Select4(t[2],t[4],QueryWhere(t[6]))\n"
print("entra al select con where y campos /////////////////")
print(t[2])
print(t[4])
print(t[6])
t[0]=Select4(t[2],t[4],QueryWhere(t[6]))
print(t[0])
# esta full
def p_select_2(t):
'''selectData : SELECT select_list FROM select_list
| SELECT POR FROM select_list
'''
if t[2]=='*':
h.reporteGramatical1 +="selectData ::= SELECT POR FROM select_list \n"
h.reporteGramatical2 +=" t[0]=Select(1,t[4])\n"
print("entra a select_2 A")
#se le agrega como segundo parametro el 2 que significa que venia asterirsco o todas las tablas
t[0]=Select(1,2,t[4])
else:
# select tipo 4
h.reporteGramatical1 +="selectData ::= SELECT select_list FROM select_list \n"
h.reporteGramatical2 +=" t[0]=Select2(2,t[2],t[4])\n"
print("entra a select_2 B")
print(t[2])
print(t[4])
t[0]=Select2(2,t[2],t[4])
# esta full
def p_select_3(t):
'''selectData : SELECT select_list
'''
h.reporteGramatical1 +="selectData ::= SELECT select_list \n"
h.reporteGramatical2 +=" t[0]=Select(1,t[2])\n"
#se le agrega el 2do 1 si solo vienen datos y no tablas
t[0]=Select(1,1,t[2])
def p_opcionesSelect_1(t):
'''opcionesSelect : opcionesSelect opcionSelect
'''
h.reporteGramatical1 +="opcionesSelect ::= opcionesSelect opcionSelect\n"
print(t[1])
t[1].append(t[2])
t[0]=t[1]
def p_opcionesSelect_2(t):
'''opcionesSelect : opcionSelect
'''
h.reporteGramatical1 +="opcionesSelect ::= opcionSelect\n"
print(t[1])
t[0]=[t[1]]
def p_opcionesSelect_3(t):
'''opcionSelect : LIMIT operacion
| GROUP BY select_list
| HAVING select_list
| ORDER BY select_list
'''
if t[1].upper()=="LIMIT":
h.reporteGramatical1 +="opcionSelect ::= LIMIT operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLimit(t[2])\n"
t[0]=ExpresionLimit(t[2])
elif t[1].upper()=="GROUP":
h.reporteGramatical1 +="opcionSelect ::= GROUP BY select_list\n"
h.reporteGramatical2 +="t[0]=ExpresionGroup(t[3])\n"
t[0]=ExpresionGroup(t[3])
elif t[1].upper()=="HAVING":
h.reporteGramatical1 +="opcionSelect ::= HAVING select_list\n"
h.reporteGramatical2 +="t[0]=ExpresionHaving(t[2])\n"
t[0]=ExpresionHaving(t[2])
elif t[1].upper()=="ORDER":
h.reporteGramatical1 +="opcionSelect ::= ORDER BY select_list\n"
h.reporteGramatical2 +="t[0]=ExpresionOrder(t[3],'ASC')\n"
t[0]=ExpresionOrder(t[3],'ASC')
def p_opcionesSelect_4(t):
'''opcionSelect : LIMIT operacion OFFSET operacion
| ORDER BY select_list ordenamiento
'''
if t[1].upper()=="LIMIT":
h.reporteGramatical1 +="opcionSelect ::= LIMIT operacion OFFSET operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLimitOffset(t[2],t[4])\n"
t[0]=ExpresionLimitOffset(t[2],t[4])
elif t[1].upper()=="ORDER":
h.reporteGramatical1 +="opcionSelect ::= ORDER BY select_list ordenamiento\n"
h.reporteGramatical2 +="t[0]=ExpresionOrder(t[3],t[4])\n"
t[0]=ExpresionOrder(t[3],t[4])
def p_ordenamiento(t):
'''ordenamiento : ASC
| DESC '''
h.reporteGramatical1 +="ordenamiento ::= "+str(t[1])+"\n"
h.reporteGramatical2 +=" t[0]=str(t[1])\n"
t[0]=str(t[1])
def p_search_condition_2(t):
'search_condition : final NOT IN PARENTESISIZQUIERDA selectData PARENTESISDERECHA'
h.reporteGramatical1 +="search_condition ::= NOT search_condition\n"
print("esta condicion es del not con operacion******************")
print(t[1])
print(t[5])
t[0]=ExpresionNotIn(t[1],t[5])
# PARA ABAJO YA ESTA
def p_search_condition_3(t):
'search_condition : operacion'
h.reporteGramatical1 +="search_condition ::= operacion\n"
h.reporteGramatical2 +=" t[0]=t[1]\n"
print("entra a la operacion del seach_condition++++++++++++++++++++++++++++++++++++++++")
print(t[1])
t[0]=t[1]
def p_search_condition_4(t):
'search_condition : PARENTESISIZQUIERDA search_condition PARENTESISDERECHA'
h.reporteGramatical1 +="search_condition ::= PARENTESISIZQUIERDA search_condition PARENTESISDERECHA\n"
h.reporteGramatical2 +=" t[0]=t[2]\n"
print("entra a la condicion con el parentesis")
print(t[2])
t[0]=t[2]
def p_select_list_1(t):
' select_list : select_list COMA operacion'
h.reporteGramatical1 +="select_list ::= select_list COMA operacion\n"
h.reporteGramatical2 +=" t[1].append(t[3])\nt[0]=t[1]\n"
print("Entra a select list COMA operacion****************************************")
t[1].append(t[3])
print(t[1])
t[0]=t[1]
def p_select_list_6(t):
' select_list : select_list COMA asignacion'
h.reporteGramatical1 +="select_list ::= select_list COMA asignacion\n"
h.reporteGramatical2 +=" t[0]=Asignacion(t[1],t[3])\n"
print(" entra al select_list COMA operacion-------------")
t[1].append(t[3])
t[0]=t[1]
print(t[0])
def p_select_list_7(t):
' select_list : asignacion'
h.reporteGramatical1 +="select_list ::= asignacion\n"
h.reporteGramatical2 +=" t[0]=t[1]\n"
print(" entra al select_list: asignacion-------------")
print(t[1])
t[0]=[t[1]]
def p_select_list_2(t):
'select_list : operacion'
h.reporteGramatical1 +="select_list ::= operacion\n"
h.reporteGramatical2 +=" t[0]=[ExpresionFuncionBasica(t[1])]\n"
print("select_list+++++++++++++++++++++++++")
print(t[1])
t[0]=[ExpresionFuncionBasica(t[1])]
def p_asignacion_1(t):
' asignacion : operacion AS operacion'
h.reporteGramatical1 +="select_list ::= select_list AS operacion\n"
h.reporteGramatical2 +=" t[0]=[Asignacion(t[1],t[3])]\n"
print("entra a asignacion: operacion AS operacion")
t[0]=Asignacion(t[1],t[3])
def p_asignacion_2(t):
' asignacion : final final'
h.reporteGramatical1 +="select_list ::= final final\n"
h.reporteGramatical2 +=" t[0]=[Asignacion(t[1],t[2])]\n"
print(" entra al select_list de 2 finales-------------")
t[0]=Asignacion(t[1],t[2])
print(t[0])
def p_funcion_basica_4(t):
'funcionBasica : operacion BETWEEN operacion '
h.reporteGramatical1 +="funcionBasica ::= operacion BETWEEN operacion AND operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionBetween(t[1],t[3])\n"
print("entra al between con sus operaciones")
print(t[1])
print(t[3])
t[0]=ExpresionBetween(t[1],t[3])
def p_funcion_basica_7(t):
'funcionBasica : operacion NOT BETWEEN operacion'
h.reporteGramatical1 +="funcionBasica ::= operacion NOT BETWEEN operacion AND operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionNotBetween(t[1],t[4])\n"
print("entra al NOT between con sus operaciones")
print(t[1])
print(t[3])
t[0]=ExpresionNotBetween(t[1],t[4])
def p_funcion_basica_8(t):
'funcionBasica : operacion BETWEEN SYMMETRIC operacion '
h.reporteGramatical1 +="funcionBasica ::= operacion BETWEEN SYMMETRIC operacion AND operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionBetweenSymmetric(t[1],t[4])\n"
t[0]=ExpresionBetweenSymmetric(t[1],t[4])
def p_funcion_basica_9(t):
'funcionBasica : operacion NOT BETWEEN SYMMETRIC operacion '
h.reporteGramatical1 +="funcionBasica ::= operacion NOT BETWEEN SYMMETRIC operacion AND operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionNotBetweenSymmetric(t[1],t[5])\n"
t[0]=ExpresionNotBetweenSymmetric(t[1],t[5])
def p_funcion_basica_10(t):
'''funcionBasica : operacion IS DISTINCT FROM operacion
'''
h.reporteGramatical1 +="funcionBasica ::= operacion IS DISTINCT FROM operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionIsDistinct(t[1],t[5])\n"
print("entra al IS DISTINCT ++++++++++++++++++")
t[0]=ExpresionIsDistinct(t[1],t[5])
def p_funcion_basica_11(t):
'''funcionBasica : operacion IS NOT DISTINCT FROM operacion'''
h.reporteGramatical1 +="funcionBasica ::= operacion IS NOT DISTINCT FROM operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionIsNotDistinct(t[1],t[6])\n"
print("entra al IS NOT DISTINCT ++++++++++++++++++")
t[0]=ExpresionIsNotDistinct(t[1],t[6])
def p_tipos(t):
'''tipos : CREATE TYPE final AS ENUM PARENTESISIZQUIERDA select_list PARENTESISDERECHA PUNTOYCOMA'''
print("entra al enum++++++++++++++++++++++++++++++++")
h.reporteGramatical1 +="tipos ::= CREATE TYPE final AS ENUM PARENTESISIZQUIERDA select_list PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=Tipo(t[3],t[7])\n"
print(t[3])
print(t[7])
t[0]=Tipo(t[3],t[7])
#para manejar los errores sintacticos
#def p_error(t): #en modo panico :v
# print("token error: ",t)
# print("Error sintáctico en '%s'" % t.value[0])
# print("Error sintáctico en '%s'" % t.value[1])
#def p_error(t): #en modo panico :v
# while True:
# tok=parser.token()
# if not tok or tok.type==';':break
# parser.errok()
# return tok
def find_column(input, token):
line_start = input.rfind('\n', 0, token.lexpos) + 1
print((token.lexpos - line_start) +1 )
return (token.lexpos - line_start)
def p_error(t):
print("token: '%s'" %t)
print("Error sintáctico en '%s' " % t.value)
#h.filapivote+=1
x=caden.splitlines()
filas=len(x)-1
print("filas que no cambian: ",filas)
if h.filapivote>0:
fila=(t.lineno-1)-h.filapivote*filas
else:
fila=(t.lineno-1)
h.filapivote+=1
h.errores+= "<tr><td>"+str(t.value)+"</td><td>"+str(fila)+"</td><td>"+str(find_column(caden,t))+"</td><td>SINTACTICO</td><td>el token no va aqui</td></tr>\n"
print("Error sintáctico fila '%s'" % fila)
print("Error sintáctico col '%s'" % find_column(caden,t))
if not t:
print("End of File!")
return
# Read ahead looking for a closing '}'
while True:
tok = parser.token() # Get the next token
if not tok or tok.type == 'PUNTOYCOMA':
break
parser.restart()
import ply.yacc as yacc
parser = yacc.yacc()
def parse(input) :
global caden
caden=""
caden=input
return parser.parse(input) | 46.266762 | 186 | 0.539672 |
aceae5e7e7c2bf0540d4626b2439defd4329abed | 821 | py | Python | var/spack/repos/builtin/packages/virtualgl/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/virtualgl/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/virtualgl/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
from spack import *
class Virtualgl(CMakePackage):
"""VirtualGL redirects 3D commands from a Unix/Linux OpenGL application
onto a server-side GPU and converts the rendered 3D images into a video
stream with which remote clients can interact to view and control the
3D application in real time."""
homepage = "http://www.virtualgl.org/Main/HomePage"
url = "http://downloads.sourceforge.net/project/virtualgl/2.5.2/VirtualGL-2.5.2.tar.gz"
version('2.5.2', sha256='4f43387678b289a24139c5b7c3699740ca555a9f10011c979e51aa4df2b93238')
depends_on("jpeg")
depends_on("glu")
| 35.695652 | 96 | 0.736906 |
aceae5f871e9a01900d6cf5229f6ed78d82368dd | 3,554 | py | Python | src/models/gbm1_indiv_pair_h2h_tuned.py | bushal01/league-ml2 | 6a3abf522faefcead017efa3fc6f9e2e8cafcd7f | [
"FTL"
] | 1 | 2019-11-14T22:46:08.000Z | 2019-11-14T22:46:08.000Z | src/models/gbm1_indiv_pair_h2h_tuned.py | bushal01/league-ml2 | 6a3abf522faefcead017efa3fc6f9e2e8cafcd7f | [
"FTL"
] | 25 | 2020-02-24T17:57:47.000Z | 2022-03-11T09:17:10.000Z | src/models/gbm1_indiv_pair_h2h_tuned.py | bushal01/league-ml2 | 6a3abf522faefcead017efa3fc6f9e2e8cafcd7f | [
"FTL"
] | 1 | 2018-08-04T02:50:10.000Z | 2018-08-04T02:50:10.000Z | import pandas as pd
import sklearn.ensemble
import sys
import os
import dotenv
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import get_modeling_data
import model_evaluation.model_performance_functions as mpf
import time
if __name__ == '__main__':
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
else:
project_dir = os.path.join(os.getcwd(), os.pardir)
dotenv_path = os.path.join(project_dir, '.env')
dotenv.load_dotenv(dotenv_path)
train = get_modeling_data.get_train()
validation = get_modeling_data.get_validation()
train = train.fillna(0)
validation = validation.fillna(0)
non_modeling_cols = get_modeling_data.get_non_modeling_cols()
X_train = train.drop(non_modeling_cols + ['team_100_win'], axis=1)
Y_train = train['team_100_win']
X_validation = validation.drop(non_modeling_cols + ['team_100_win'], axis=1)
Y_validation = validation['team_100_win']
learning_rates = [0.02]
n_estimators = [500]
min_samples_splits = [100, 200, 500]
max_depths = [2, 3, 5, 7]
for lr in learning_rates:
for mss in min_samples_splits:
for md in max_depths:
gbm_params = {'learning_rate': lr,
'n_estimators': 1500,
'min_samples_split': mss,
'min_samples_leaf': 50,
'max_depth': md,
'random_state': 414}
print(gbm_params)
start_time = time.time()
model = sklearn.ensemble.GradientBoostingClassifier(**gbm_params)
model_fit = model.fit(X_train, Y_train)
n_est_performance = mpf.gbm_best_iter(model_fit, X_validation, Y_validation)
# Get training and validation predictions using best iteration
ctr = 1
for prediction in model_fit.staged_predict(X_train):
if ctr == n_est_performance['best_iter']:
train_pred = prediction
ctr = ctr + 1
ctr = 1
for prediction in model_fit.staged_predict(X_validation):
if ctr == n_est_performance['best_iter']:
validation_pred = prediction
ctr = ctr + 1
train_time = time.time() - start_time
ks_gini_train = mpf.ks_gini(Y_train, train_pred)
ks_gini_validation = mpf.ks_gini(Y_validation, validation_pred)
correct_pred_train = mpf.correct_prediction_rate(Y_train, train_pred)
correct_pred_validation = mpf.correct_prediction_rate(Y_validation, validation_pred)
model_performance = mpf.record_gbm_performance(description='GBM,Indiv,paired,h2h,230k,best_iteration',
**gbm_params, best_iter=n_est_performance['best_iter'], num_vars=X_train.shape[1],
train_rows=X_train.shape[0], valid_rows=X_validation.shape[0],
correct_pred_train=correct_pred_train, correct_pred_validation=correct_pred_validation,
ks_train=ks_gini_train['ks'], ks_valid=ks_gini_validation['ks'],
gini_train=ks_gini_train['gini'], gini_valid=ks_gini_validation['gini'],
mse_train=mpf.mse(Y_train, train_pred), mse_valid=mpf.mse(Y_validation, validation_pred),
train_time=train_time, file=os.getenv('DATA_DIR') + 'model_performance/gbm_eval.csv')
print(model_performance) | 46.155844 | 128 | 0.633089 |
aceae62bd7b8a95334499129f44663436b6dd81a | 3,152 | py | Python | 2_helpers/getters.py | olibchr/factChecker | 6affa8636884e1198eff8a9e360ad376f511e37d | [
"MIT"
] | 1 | 2018-06-23T20:40:41.000Z | 2018-06-23T20:40:41.000Z | 2_helpers/getters.py | olibchr/factChecker | 6affa8636884e1198eff8a9e360ad376f511e37d | [
"MIT"
] | 1 | 2021-12-13T19:47:10.000Z | 2021-12-13T19:47:10.000Z | 2_helpers/getters.py | olibchr/factChecker | 6affa8636884e1198eff8a9e360ad376f511e37d | [
"MIT"
] | null | null | null | import glob, json, datetime, os
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
from string import digits
from decoder import decoder
STAND_DIR = os.path.dirname(__file__) + '/../../3_Data/'
NLTK_STOPWORDS = set(stopwords.words('english'))
WNL = WordNetLemmatizer()
def datetime_converter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
def get_tokenize_text(text, only_retweets=False):
tokenizer = RegexpTokenizer(r'\w+')
if only_retweets:
text = text.lower()
if 'rt' not in text: return []
text = text[text.find('rt'):]
text = text[text.find('@'):text.find(':')]
return [WNL.lemmatize(i.lower()) for i in tokenizer.tokenize(text) if
i.lower() not in NLTK_STOPWORDS]
return [WNL.lemmatize(i.lower()) for i in tokenizer.tokenize(text)]
def get_data(DIR = STAND_DIR):
fact_file = glob.glob(DIR + 'facts.json')[0]
transactions_file = glob.glob(DIR + 'factTransaction.json')[0]
facts = json.load(open(fact_file), object_hook=decoder)
transactions = json.load(open(transactions_file), object_hook=decoder)
return facts, transactions
def get_transactions(DIR = STAND_DIR):
transactions_file = glob.glob(DIR + 'factTransaction.json')[0]
return pd.read_json(transactions_file)
def get_data_df(DIR = STAND_DIR):
fact_file = glob.glob(DIR + 'facts.json')[0]
transactions_file = glob.glob(DIR + 'factTransaction.json')[0]
facts = pd.read_json(open(fact_file))
transactions = pd.read_json(open(transactions_file))
return facts, transactions
def get_users(DIR = STAND_DIR):
user_files = glob.glob(DIR + 'user_tweets/' + 'user_*.json')
print('{} users'.format(len(user_files)))
if len(user_files) < 10: print('WRONG DIR?')
users = []
for user_file in user_files:
user = json.loads(open(user_file).readline(), object_hook=decoder)
users.append(user)
return users
def get_corpus():
corpus_file = glob.glob('model_data/bow_corpus.json')[0]
bow_corpus = json.loads(open(corpus_file).readline())
return bow_corpus
def get_fact_topics(DIR = STAND_DIR):
fact_file = glob.glob(DIR + 'facts.json')[0]
facts_df = pd.read_json(fact_file)
remove_digits = str.maketrans('', '', digits)
facts_df['text_parsed'] = facts_df['text'].map(lambda t: get_tokenize_text(t.translate(remove_digits)))
facts_df['entities_parsed'] = facts_df['entities'].map(lambda ents:
[item for sublist in
[e['surfaceForm'].lower().split() for e in ents if
e['similarityScore'] >= 0.6]
for item in sublist])
facts_df['topic'] = facts_df['topic'].map(lambda t: [t])
facts_df['fact_terms'] = facts_df['text_parsed'] + facts_df['entities_parsed'] + facts_df['topic']
return facts_df
| 37.975904 | 110 | 0.642132 |
aceae6402b9c901249e7afc4b562c50354b4e155 | 4,259 | py | Python | bokeh/application/handlers/function.py | jeisch/bokeh | 6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1 | [
"BSD-3-Clause"
] | 1 | 2020-03-21T04:11:51.000Z | 2020-03-21T04:11:51.000Z | bokeh/application/handlers/function.py | jeisch/bokeh | 6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1 | [
"BSD-3-Clause"
] | 2 | 2021-05-08T11:43:21.000Z | 2021-05-10T19:16:43.000Z | bokeh/application/handlers/function.py | jeisch/bokeh | 6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a Bokeh Application Handler to build up documents by running
a specified Python function.
This Handler is not used by the Bokeh server command line tool, but is often
useful if users wish to embed the Bokeh server programmatically:
.. code-block:: python
def make_doc(doc):
# do work to modify the document, add plots, widgets, etc.
return doc
app = Application(FunctionHandler(make_doc))
server = Server({'/bkapp': app}, io_loop=IOLoop.current())
server.start()
For complete examples of this technique, see
:bokeh-tree:`examples/howto/server_embed`
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ...util.callback_manager import _check_callback
from .handler import Handler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'FunctionHandler',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class FunctionHandler(Handler):
''' A Handler that accepts a plain python function to use for modifying
Bokeh Documents.
For example, the following code configures a handler with a function that
adds an empty plot to a Document:
.. code-block:: python
def add_empty_plot(doc):
p = figure(x_range=(0,10), y_range=(0, 10))
doc.add_root(p)
return doc
handler = FunctionHandler(add_empty_plot)
This handler could be configured on an Application, and the Application
would run this function every time a new session is created.
'''
def __init__(self, func):
'''
Args:
func (callable) : a function to modify and return a Bokeh Document.
The function should have the form:
.. code-block:: python
def func(doc):
# modify doc
return doc
and it should return the passed-in document after making any
modifications in-place.
'''
super().__init__()
_check_callback(func, ('doc',))
self._func = func
self._safe_to_fork = True
# Properties --------------------------------------------------------------
@property
def safe_to_fork(self):
''' Whether it is still safe for the Bokeh server to fork new workers.
``False`` if ``modify_doc`` has already been called.
'''
return self._safe_to_fork
# Public methods ----------------------------------------------------------
def modify_document(self, doc):
''' Execute the configured ``func`` to modify the document.
After this method is first executed, ``safe_to_fork`` will return
``False``.
'''
self._func(doc)
self._safe_to_fork = False
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 30.205674 | 79 | 0.429913 |
aceae692a0c8d794df9647ddcba14b3f7d05f9c1 | 2,021 | py | Python | lucia/worker/tasks/find_nearest_resource.py | LuxAI-Gamers/LucIA | ac0dd691780f4af6a5cfb778ae1882c256020b04 | [
"MIT"
] | 1 | 2021-11-21T22:24:46.000Z | 2021-11-21T22:24:46.000Z | lucia/worker/tasks/find_nearest_resource.py | LuxAI-Gamers/LucIA | ac0dd691780f4af6a5cfb778ae1882c256020b04 | [
"MIT"
] | null | null | null | lucia/worker/tasks/find_nearest_resource.py | LuxAI-Gamers/LucIA | ac0dd691780f4af6a5cfb778ae1882c256020b04 | [
"MIT"
] | null | null | null | import math
from lux.constants import Constants
from ...bh_trees import Task
class FindNearestResource(Task):
def __init__(self):
super(FindNearestResource, self).__init__()
def run(self):
"""
"""
map = self._blackboard.get_value('map')
object = self._blackboard.get_value('object')
width = self._blackboard.get_value('width')
height = self._blackboard.get_value('height')
player = self._blackboard.get_value('player')
tiles_resource = self.find_all_resources(map, width, height)
close_resource = self.find_closest_resources(object, player)
if close_resource:
self._blackboard.set_values(position=close_resource.pos)
return True
return False
def find_all_resources(self, map, height, width):
"""
"""
resource_tiles = []
for y in range(height):
for x in range(width):
cell = map.get_cell(x, y)
if cell.has_resource():
resource_tiles.append(cell)
self._resource_tiles = resource_tiles
def find_closest_resources(self, object, player):
"""
"""
tiles_assigned = self._blackboard.get_value('tiles_assigned')
closest_dist = math.inf
closest_resource_tile = None
for resource_tile in self._resource_tiles:
if (resource_tile.resource.type == Constants.RESOURCE_TYPES.COAL
and not player.researched_coal()):
continue
if (resource_tile.resource.type == Constants.RESOURCE_TYPES.URANIUM
and not player.researched_uranium()):
continue
if resource_tile.pos in tiles_assigned:
continue
dist = resource_tile.pos.distance_to(object.pos)
if dist < closest_dist:
closest_dist = dist
closest_resource_tile = resource_tile
return closest_resource_tile
| 28.069444 | 79 | 0.60663 |
aceae6b7eab2a606b06b5756a6646326171e875d | 3,035 | py | Python | ECUSimulation/config/timing_reg_formulas.py | arturmrowca/IVNS | 8915142d16debe4af780a9eb6859e44dea2ca7e6 | [
"MIT"
] | 8 | 2016-03-02T11:47:16.000Z | 2021-11-05T07:49:25.000Z | ECUSimulation/config/timing_reg_formulas.py | arturmrowca/IVNS | 8915142d16debe4af780a9eb6859e44dea2ca7e6 | [
"MIT"
] | 1 | 2019-03-19T14:23:52.000Z | 2019-03-25T14:24:45.000Z | ECUSimulation/config/timing_reg_formulas.py | PhilippMundhenk/IVNS | b2e395611e9b5111aeda7ab128f3486354bbbf0d | [
"MIT"
] | 5 | 2016-02-11T10:12:27.000Z | 2020-10-09T02:27:41.000Z | '''
This module makes it possible to calculate timings depending
on input data
'''
import importlib
def call(func_or_var, *args):
# static method call
if isinstance(func_or_var, list):
try:
lst = func_or_var[0].split('.')
le = func_or_var[0][:-len(lst[-1]) - 1]
impo = importlib.import_module(le)
obj = impo.__dict__[lst[-1]]() # @UnusedVariable
func_name = func_or_var[1]
val = None
val = eval("obj." + func_name + "(*args)")
return val
except:
pass
# ECULogger().log_traceback()
if hasattr(func_or_var, '__call__'):
return func_or_var(*args)
else:
return func_or_var
#===============================================================================
# StdCANBus
#===============================================================================
def calc_prop_delay(ecu_distance):
t_propagation = 1.25 * (ecu_distance / (1 * 100000000)) # assume bit stuffing to extend frame
return t_propagation
def calc_sending_time(msg_length_in_bit, effective_datarate):
t_sending = 1.25 * (msg_length_in_bit / effective_datarate) # Time to send and to receive (conciously 1 not 2)
return t_sending
#===============================================================================
# StdDatalinkLayer
#===============================================================================
def calc_collis_backoff(bittime):
''' wait for 3 bittimes then continue'''
return bittime * 3
#===============================================================================
# SegmentTransportLayer
#===============================================================================
def segtl_send_time(msg_length, len_datafield):
''' a message of length msg_length is to be sent. The
maximum number of bytes that can be transmitted in
one frame is len_datafield'''
return 0
def segtl_receive_time(msg_length, len_datafield):
''' a message of length msg_length was received and this
is the time it takes to process this message
(i.e. to stick all segments together)
one received segment was len_datafield long '''
return 0
#===============================================================================
# FakeSegmentTransportLayer
#===============================================================================
def fake_segtl_send_time(msg_length, len_datafield):
''' a message of length msg_length is to be sent. The
maximum number of bytes that can be transmitted in
one frame is len_datafield'''
return 0
def fake_segtl_receive_time(msg_length, len_datafield):
''' a message of length msg_length was received and this
is the time it takes to process this message
(i.e. to stick all segments together)
one received segment was len_datafield long '''
return 0
| 34.488636 | 115 | 0.504778 |
aceae7f8b11f63894cdb91e26130e4bb6852a3ad | 1,063 | bzl | Python | src/main/starlark/builtins_bzl/bazel/exports.bzl | fmeum/bazel | 20269a6b298c0db7dc155ccb1b3e76ddfe717e00 | [
"Apache-2.0"
] | 1 | 2022-03-22T11:55:06.000Z | 2022-03-22T11:55:06.000Z | src/main/starlark/builtins_bzl/bazel/exports.bzl | fmeum/bazel | 20269a6b298c0db7dc155ccb1b3e76ddfe717e00 | [
"Apache-2.0"
] | null | null | null | src/main/starlark/builtins_bzl/bazel/exports.bzl | fmeum/bazel | 20269a6b298c0db7dc155ccb1b3e76ddfe717e00 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exported builtins symbols that are specific to OSS Bazel."""
load("@_builtins//:common/java/java_library.bzl", "java_library")
load("@_builtins//:common/java/java_plugin.bzl", "java_plugin")
load("@_builtins//:common/java/proto/java_proto_library.bzl", "java_proto_library")
exported_toplevels = {}
exported_rules = {
"java_library": java_library,
"java_plugin": java_plugin,
"-java_proto_library": java_proto_library,
}
exported_to_java = {}
| 37.964286 | 83 | 0.75635 |
aceae859a462f1b94024a8a43398607d7bc2394e | 3,939 | py | Python | python/tests/test_tasks.py | eventbrite/invoke-release | d2ba31ceea211575f56ca8981011c886c48fac49 | [
"Apache-2.0"
] | 21 | 2018-05-02T13:52:25.000Z | 2021-06-18T19:45:57.000Z | python/tests/test_tasks.py | eventbrite/invoke-release | d2ba31ceea211575f56ca8981011c886c48fac49 | [
"Apache-2.0"
] | 17 | 2019-01-02T22:17:39.000Z | 2021-10-02T17:33:41.000Z | python/tests/test_tasks.py | eventbrite/invoke-release | d2ba31ceea211575f56ca8981011c886c48fac49 | [
"Apache-2.0"
] | 8 | 2018-11-06T20:02:24.000Z | 2021-09-08T11:24:14.000Z | from __future__ import absolute_import, unicode_literals
from unittest import TestCase
from invoke_release import tasks
class TestTasks(TestCase):
"""
At a later point, we will write some actual tests. This project is difficult to test with automated tests, and
we largely rely on manual tests.
"""
def test_case_sensitive_regular_file_exists(self):
assert tasks._case_sensitive_regular_file_exists(__file__) is True
assert tasks._case_sensitive_regular_file_exists(__file__.upper()) is False
assert tasks._case_sensitive_regular_file_exists(__file__ + '.bogus') is False
def test_get_version_element_to_bump_if_any_chooses_major_version_if_a_major_commit_is_present(self):
changelog_message = [
'- [PATCH] A patch-commit message.\n',
'- [MINOR] A minor-commit message.\n',
'- [MAJOR] A major-commit message.\n',
]
version_element_to_bump = tasks._get_version_element_to_bump_if_any(changelog_message)
assert version_element_to_bump == tasks.MAJOR_VERSION_PREFIX
def test_get_version_element_to_bump_if_any_chooses_minor_if_only_a_minor_commit_is_present(self):
changelog_message = [
'- [MINOR] A minor-commit message.\n',
]
version_element_to_bump = tasks._get_version_element_to_bump_if_any(changelog_message)
assert version_element_to_bump == tasks.MINOR_VERSION_PREFIX
def test_get_version_element_to_bump_if_any_returns_none_if_a_commit_doesnt_have_tag_and_there_is_no_major(self):
changelog_message = [
'- [MINOR] A minor-commit message.\n',
'A commit message [PATCH] with a tag in between.\n',
]
version_element_to_bump = tasks._get_version_element_to_bump_if_any(changelog_message)
assert version_element_to_bump is None
def test_get_version_element_to_bump_if_any_returns_major_if_commit_does_not_have_tag_but_there_is_a_major(self):
changelog_message = [
'A commit message with no tag.\n',
'- [MAJOR] A minor-commit message.\n',
]
version_element_to_bump = tasks._get_version_element_to_bump_if_any(changelog_message)
assert version_element_to_bump == tasks.MAJOR_VERSION_PREFIX
def test_suggest_version_suggests_a_patch_bump_for_patch_tag(self):
current_version = '1.2.3'
suggested_version = tasks._suggest_version(current_version, tasks.PATCH_VERSION_PREFIX)
assert suggested_version == '1.2.4'
def test_suggest_version_suggests_a_minor_bump_successfully_if_metadata_is_present_for_minor_tag(self):
current_version = '1.2.3+meta.data'
suggested_version = tasks._suggest_version(current_version, tasks.MINOR_VERSION_PREFIX)
assert suggested_version == '1.3.0'
def test_suggest_version_suggests_a_major_bump_if_metadata_and_prerelease_info_is_present_for_major_Tag(self):
current_version = '1.2.3-pre.release+meta.data'
suggested_version = tasks._suggest_version(current_version, tasks.MAJOR_VERSION_PREFIX)
assert suggested_version == '2.0.0'
def test_suggest_version_suggests_minor_bump_for_major_version_zero_and_major_tag(self):
current_version = '0.50.1'
suggested_version = tasks._suggest_version(current_version, tasks.MAJOR_VERSION_PREFIX)
assert suggested_version == '0.51.0'
def test_suggest_version_suggests_patch_bump_for_major_version_zero_and_patch_bump(self):
current_version = '0.50.1'
suggested_version = tasks._suggest_version(current_version, tasks.PATCH_VERSION_PREFIX)
assert suggested_version == '0.50.2'
def test_suggest_version_returns_none_if_no_version_to_bump_is_provided(self):
current_version = '2.50.1'
suggested_version = tasks._suggest_version(current_version, None)
assert suggested_version is None
| 35.809091 | 117 | 0.746636 |
aceae9458b5c2df6e52ae0219c0320c5fd653769 | 5,513 | py | Python | license_identifier/scores_test.py | codeauroraforum/lid | 76cd5d44f5e8787d3974732ae4108a0503483cdc | [
"BSD-3-Clause"
] | 10 | 2017-10-04T17:56:18.000Z | 2020-06-17T18:47:56.000Z | license_identifier/scores_test.py | craigez/lid | 76cd5d44f5e8787d3974732ae4108a0503483cdc | [
"BSD-3-Clause"
] | 5 | 2019-03-06T05:08:53.000Z | 2019-06-11T20:47:12.000Z | license_identifier/scores_test.py | codeauroraforum/lid | 76cd5d44f5e8787d3974732ae4108a0503483cdc | [
"BSD-3-Clause"
] | 3 | 2020-06-23T22:21:46.000Z | 2021-02-20T01:34:39.000Z | # Copyright (c) 2017, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of The Linux Foundation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import n_grams as ng
from . import prep
from . import scores
def test_ngram_similarity():
universe_ng = ng.NGrams("a b c d")
scorer = scores.NgramSimilarity(universe_n_grams=universe_ng)
assert scorer.score(mklic(["a b c d"]), mksrc(["a b c d"])) == 1.0
assert scorer.score(mklic(["a b c d"]), mksrc(["a b c"])) == \
(3/4. + 2/3. * 6.0 + 1/2. * 8.0) / 15.0
assert scorer.score(mklic(["a b c d"]), mksrc(["a"])) == 1/60.
assert scorer.score(mklic(["a b c d"]), mksrc(["a x y"])) == 1/60.
assert scorer.score(mklic(["a b c d"]), mksrc(["a b c x y"])) == \
(3/4. + 2/3. * 6.0 + 1/2. * 8.0) / 15.0
def test_edit_weighted_similarity():
scorer = scores.EditWeightedSimilarity(penalty_only_source=2.0,
penalty_only_license=3.0,
punct_weight=0.5)
assert scorer.penalty_only_source == 2.0
assert scorer.penalty_only_license == 3.0
assert scorer.score(mklic([""]), mksrc([""])) == 0.0
assert scorer.score(mklic(["a"]), mksrc(["a"])) == 1.0
assert scorer.score(mklic(["."]), mksrc(["."])) == 1.0
assert scorer.score(mklic(["a"]), mksrc([""])) == 0.0
assert scorer.score(mklic([""]), mksrc(["a"])) == 0.0
assert scorer.score(mklic(["a"]), mksrc(["a b"])) == 1/3.
assert scorer.score(mklic(["a c"]), mksrc(["a b"])) == 1/6.
assert scorer.score(mklic([". c"]), mksrc([". b"])) == 1/11.
assert scorer.score(mklic(["a ."]), mksrc(["a b"])) == 2/9.
assert scorer.score(mklic(["a c"]), mksrc(["a ."])) == 1/5.
assert scorer.score(mklic(["a c"]), mksrc(["a"])) == 1/4.
result = scorer.score_and_rationale(mklic(["a c"]), mksrc(["a"]),
extras=False)
assert "score" in result.keys()
assert "diff_chunks" not in result.keys()
def test_edit_weighted_similarity_rationale():
scorer = scores.EditWeightedSimilarity(
penalty_only_source=2.0,
penalty_only_license=3.0,
punct_weight=0.5)
result = scorer.score_and_rationale(
lic=mklic(["a x\t", "b c d e f g"]),
src=mksrc([" a ", "b c m", "n g x x"]),
extras=True)
assert set(["score", "diff_chunks"]).issubset(set(result.keys()))
assert result["init_ignored_src"] == " "
assert result["init_ignored_lic"] == ""
chunks = result["diff_chunks"]
assert len(chunks) == 6
assert chunks[0]["op"] == "equal"
assert chunks[0]["tokens_src"] == ["a"]
assert chunks[0]["tokens_lic"] == ["a"]
assert chunks[0]["ignored_src"] == [" \n"]
assert chunks[0]["ignored_lic"] == [" "]
assert chunks[1]["op"] == "insert"
assert chunks[1]["tokens_src"] == []
assert chunks[1]["tokens_lic"] == ["x"]
assert chunks[1]["ignored_src"] == []
assert chunks[1]["ignored_lic"] == ["\t\n"]
assert chunks[2]["op"] == "equal"
assert chunks[2]["tokens_src"] == ["b", "c"]
assert chunks[2]["tokens_lic"] == ["b", "c"]
assert chunks[2]["ignored_src"] == [" ", " "]
assert chunks[2]["ignored_lic"] == [" ", " "]
assert chunks[3]["op"] == "replace"
assert chunks[3]["tokens_src"] == ["m", "n"]
assert chunks[3]["tokens_lic"] == ["d", "e", "f"]
assert chunks[3]["ignored_src"] == ["\n", " "]
assert chunks[3]["ignored_lic"] == [" ", " ", " "]
assert chunks[4]["op"] == "equal"
assert chunks[4]["tokens_src"] == ["g"]
assert chunks[4]["tokens_lic"] == ["g"]
assert chunks[4]["ignored_src"] == [" "]
assert chunks[4]["ignored_lic"] == ["\n"]
assert chunks[5]["op"] == "delete"
assert chunks[5]["tokens_src"] == ["x", "x"]
assert chunks[5]["tokens_lic"] == []
assert chunks[5]["ignored_src"] == [" ", "\n"]
assert chunks[5]["ignored_lic"] == []
def mklic(lines):
return prep.License.from_lines(lines)
def mksrc(lines):
return prep.Source.from_lines(lines)
| 40.837037 | 72 | 0.611464 |
aceaea52652a0fc813b0947aec76b77559e0443b | 3,301 | py | Python | pipeline/transform.py | choldener/Vector-Engineering-Project | be494dcbd7ab7c6685eabf6047b50ab64f34d6f2 | [
"MIT"
] | null | null | null | pipeline/transform.py | choldener/Vector-Engineering-Project | be494dcbd7ab7c6685eabf6047b50ab64f34d6f2 | [
"MIT"
] | null | null | null | pipeline/transform.py | choldener/Vector-Engineering-Project | be494dcbd7ab7c6685eabf6047b50ab64f34d6f2 | [
"MIT"
] | null | null | null | import pandas as pd
def transform_cases(cases):
"""Transforms cases dataframe, adds iso3, lat, long, population, and continent"""
country = pd.read_csv('https://storage.googleapis.com/ve-public/country_iso.csv')
continent = pd.read_csv('https://pkgstore.datahub.io/JohnSnowLabs/country-and-continent-codes-list/country-and-continent-codes-list-csv_csv/data/b7876b7f496677669644f3d1069d3121/country-and-continent-codes-list-csv_csv.csv')
cases_group = cases.groupby(['Country'], as_index=False).sum()
for c, i in enumerate(cases_group['Country']): #Get data from country csv
try: cases_group.loc[c, 'iso3'] = country.loc[country['country'] == i]['iso3'].item()
except: cases_group.loc[c,'iso3'] = 'NaN'
try: cases_group.loc[c, 'Lat'] = country.loc[country['country'] == i]['Lat'].item()
except: cases_group.loc[c,'Lat'] = 'NaN'
try: cases_group.loc[c, 'Long_'] = country.loc[country['country'] == i]['Long_'].item()
except: cases_group.loc[c,'Long_'] = 'NaN'
try: cases_group.loc[c, 'Population'] = country.loc[country['country'] == i]['Population'].item()
except: cases_group.loc[c,'Population'] = 'NaN'
for c, i in enumerate(cases_group['iso3']): #Get data from continent csv
try: cases_group.loc[c, 'continent'] = continent.loc[continent['Three_Letter_Country_Code'] == i]['Continent_Name'].item()
except: cases_group.loc[c,'continent'] = 'NaN'
cases_transformed = cases_group.melt(id_vars = ['Country','iso3','Lat','Long_','Population','continent' ], var_name = 'Date', value_name = 'Cases')
cases_transformed['Date'] = pd.to_datetime(cases_transformed['Date'], format = '%m/%d/%y')
return cases_transformed
def transform_sequence(sequence):
"""Converts string date to pandas datetime, renames columns"""
sequence['date'] = pd.to_datetime(sequence['date'], format = '%Y/%m/%d')
sequence_transform = sequence.rename(columns={'date':'Date','country':'Country'})
return sequence_transform
def transform_merge(sequence_trasnform, cases_transformed):
"""Merges both the cases and sequences dataframes"""
full_data = cases_transformed.merge(sequence_trasnform, on=['Country','Date'], how='left')
full_data['new_sequences'].fillna(0, inplace = True)
full_data['Cases'].fillna(0, inplace = True)
full_data['total_sequence'] = full_data.groupby('Country')['new_sequences'].cumsum()
# full_data['cases_pop'] = full_data['Cases']/full_data['Population'] *100
# full_data['sequence_case'] = full_data['total_sequence']/full_data['Cases'] *100
# full_data_diffq = (full_data["Cases"].max() - full_data["Cases"].min()) / 219
# full_data["Cases_scale"] = (full_data["Cases"] - full_data["Cases"].min()) / full_data_diffq + 1
full_data_diffq = (full_data["total_sequence"].max() - full_data["total_sequence"].min()) / 219
full_data["total_sequence_scale"] = (full_data["total_sequence"] - full_data["total_sequence"].min()) / full_data_diffq
# #full_data['Date'] = full_data['Date'].dt.strftime('%d-%m-%Y')
# full_data['case_sequence_dif'] = full_data['Cases'] - full_data['total_sequence']
full_data['case_sequence_ratio'] = (full_data['total_sequence']/(full_data['Cases']))
return full_data
| 66.02 | 228 | 0.685247 |
aceaeac9be6fd2606d146e8716c63818b9d79161 | 1,781 | py | Python | supervisoragent/eventmonitor.py | silverfernsys/supervisoragent | dda727d161866e190ce36de22a23381e65534ff8 | [
"BSD-4-Clause"
] | null | null | null | supervisoragent/eventmonitor.py | silverfernsys/supervisoragent | dda727d161866e190ce36de22a23381e65534ff8 | [
"BSD-4-Clause"
] | null | null | null | supervisoragent/eventmonitor.py | silverfernsys/supervisoragent | dda727d161866e190ce36de22a23381e65534ff8 | [
"BSD-4-Clause"
] | null | null | null | import json
import os
import socket
from logging import getLogger
from threading import Thread
class EventMonitor(object):
def __init__(self, process_monitor):
self.process_monitor = process_monitor
def start(self):
thread = Thread(target=self.monitor, args=())
thread.daemon = True
thread.start()
def monitor(self):
logger = getLogger('Event Server')
# Create a UDS socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_address = '/run/supervisoragent.sock'
# Make sure the socket does not already exist
try:
os.unlink(server_address)
except OSError:
if os.path.exists(server_address):
raise
logger.info('Starting server at %s' % server_address)
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
logger.info('Waiting for a connection...')
connection, client_address = sock.accept()
logger.info('Connected to client.')
try:
handle = connection.makefile()
while True:
line = handle.readline()
headers = dict([x.split(':') for x in line.split()])
data = handle.read(int(headers['LENGTH']))
json_data = json.loads(data)
self.process_monitor.update(**json_data)
except Exception as e:
print('Exception: %s' % e)
print('Closing connection...')
logger.info('Closing connection...')
connection.close()
logger.info('Connection closed.')
| 31.245614 | 72 | 0.562605 |
aceaeba792d859013acc0a2319e484a16b2cbaaf | 92 | py | Python | config.py | daktari01/my_flask_project | 4f9ab540bff483aa3471a3137ca1a5179e8e0f34 | [
"MIT"
] | null | null | null | config.py | daktari01/my_flask_project | 4f9ab540bff483aa3471a3137ca1a5179e8e0f34 | [
"MIT"
] | null | null | null | config.py | daktari01/my_flask_project | 4f9ab540bff483aa3471a3137ca1a5179e8e0f34 | [
"MIT"
] | null | null | null | # config.py
# Enable Flask's debugging feature. Should be False in production
DEBUG = True | 18.4 | 65 | 0.76087 |
aceaec3ffa90f4f287b5276fec7f303eddd0bcbc | 7,378 | py | Python | enemy.py | jeremycryan/ScoreSpace8 | bc3418d5e3e132a7b4a177b2ebce4fc156a24f20 | [
"MIT"
] | 1 | 2020-05-05T07:38:03.000Z | 2020-05-05T07:38:03.000Z | enemy.py | jeremycryan/ScoreSpace8 | bc3418d5e3e132a7b4a177b2ebce4fc156a24f20 | [
"MIT"
] | null | null | null | enemy.py | jeremycryan/ScoreSpace8 | bc3418d5e3e132a7b4a177b2ebce4fc156a24f20 | [
"MIT"
] | null | null | null | import constants as c
import pygame
import math
from particle import Particle, Chunk, Fadeout
import os
import random
import time
lantern_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "lantern.png"))
lantern_touched_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "lantern_touched.png"))
big_lantern_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "big_lantern.png"))
big_lantern_touched_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "big_lantern_touched.png"))
perfect_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "perfect.png"))
perfect_surf_large = pygame.transform.scale(perfect_surf, (perfect_surf.get_width()*2, perfect_surf.get_height()*2))
good_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "good.png"))
okay_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "okay.png"))
nope_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "nope.png"))
class Enemy:
def __init__(self, game, radius = 30, x=c.WINDOW_WIDTH//2, y=c.WINDOW_HEIGHT//2):
self.game = game
self.radius = radius
self.x = x
self.y = y
self.angle = random.random() * 60 + 15
self.surf = lantern_surf
self.draw_surf = pygame.transform.rotate(self.surf, self.angle)
self.touched_surf = lantern_touched_surf
self.touched_surf = pygame.transform.rotate(self.touched_surf, self.angle)
# self.draw_surf.set_colorkey(c.BLACK)
# self.touched_surf.set_colorkey(c.BLACK)
self.touched = False
self.launch_factor=1.0
self.glow = self.generate_glow()
self.age = random.random()
def generate_glow(self, radius=1.7):
glow_radius = int(radius * self.radius)
self.glow = pygame.Surface((glow_radius*2, glow_radius*2))
pygame.draw.circle(self.glow, c.WHITE, (glow_radius, glow_radius), glow_radius)
self.glow.set_alpha(20)
self.glow.set_colorkey(c.BLACK)
return self.glow
def update(self, dt, events):
if self.y < self.game.y_offset - self.radius*3:
self.remove()
self.age += dt
radius = 1.7 + 0.07*math.sin(self.age*25)
if self.y < self.game.y_offset + 1.5*c.WINDOW_HEIGHT:
self.glow = self.generate_glow(radius)
def draw(self, surface):
if self.y > self.game.y_offset + c.WINDOW_HEIGHT*2:
return
x, y = self.game.game_position_to_screen_position((self.x, self.y))
surface.blit(self.glow, (int(x - self.glow.get_width()//2), int(y - self.glow.get_height()//2)))
if not self.touched:
surface.blit(self.draw_surf,
(int(x - self.draw_surf.get_width()/2), int(y - self.draw_surf.get_height()/2)))
else:
surface.blit(self.touched_surf,
(int(x - self.draw_surf.get_width()/2), int(y - self.draw_surf.get_height()/2)))
def touch(self):
self.touched = True
def remove(self):
self.game.enemies.remove(self)
def destroy(self, cut_prop=0.5):
self.remove()
angle = self.game.player.get_angle()
cutoff = int(cut_prop*self.radius*2)
top_offset = self.radius - cutoff//2
bottom_offset = -cutoff//2
angle_rad = -angle/180 * math.pi
top_offset = (top_offset * math.sin(angle_rad), top_offset * math.cos(angle_rad))
bottom_offset = (bottom_offset * math.sin(angle_rad), bottom_offset * math.cos(angle_rad))
particle_surf = pygame.Surface((self.radius*2, cutoff))
particle_surf.blit(self.surf, (0, 0))
top_half = Particle(self.game,
particle_surf,
(self.x + top_offset[0], self.y + top_offset[1]),
rotation=120,
velocity=(-30, 500),
angle=angle)
self.game.particles.append(top_half)
particle_surf = pygame.Surface((self.radius*2, self.radius*2 - cutoff))
particle_surf.blit(self.surf, (0, -cutoff))
bottom_half = Particle(self.game,
particle_surf,
(self.x + bottom_offset[0], self.y + bottom_offset[1]),
rotation=-40,
velocity=(60, 150),
angle=angle)
self.game.particles.append(bottom_half)
self.game.particles.append(Fadeout(self.game, self.glow, (self.x, self.y)))
for i in range(30):
self.game.particles.append(Chunk(self.game, (self.x, self.y)))
if abs(cut_prop - 0.5) < 0.02:
self.glow.set_alpha(100)
surf = perfect_surf.copy().convert()
surf2 = perfect_surf_large.copy().convert()
surf2.set_colorkey((255, 0, 255))
surf2.set_alpha(90)
self.game.text_particles.append(Fadeout(self.game, surf2, (self.x, self.y), rate=200))
self.game.flare_up(60)
self.game.tear_sound()
elif abs(cut_prop - 0.5) < 0.25:
surf = good_surf.copy().convert()
self.game.bad_tear_sound()
else:
surf = okay_surf.copy().convert()
self.game.bad_tear_sound()
surf.set_colorkey((255, 0, 255))
surf.set_alpha(255)
self.game.text_particles.append(Fadeout(self.game, surf, (self.x, self.y), rate=400))
class BigEnemy(Enemy):
def __init__(self, game, x=c.WINDOW_WIDTH//2, y=c.WINDOW_HEIGHT//2):
self.game = game
self.radius = 40
self.x = x
self.y = y
self.angle = random.random() * 60 - 30
self.surf = big_lantern_surf
self.draw_surf = pygame.transform.rotate(self.surf, self.angle)
self.touched_surf = big_lantern_touched_surf
self.touched_surf = pygame.transform.rotate(self.touched_surf, self.angle)
self.touched = False
self.launch_factor = 1.3
self.age = 0
self.glow = self.generate_glow()
class TutorialEnemy(BigEnemy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def draw(self, surface):
super().draw(surface)
def destroy(self, cut_prop=0.5):
if abs(cut_prop - 0.5) < 0.02:
super().destroy(cut_prop=cut_prop)
else:
self.game.nope.play()
self.game.shake_effect(15)
surf = nope_surf.copy().convert()
surf.set_colorkey((255, 0, 255))
surf.set_alpha(255)
self.game.text_particles.append(Fadeout(self.game, surf, (self.x, self.y), rate=400))
self.since_hit = 0
class SmallEnemy(Enemy):
def __init__(self, game, x=c.WINDOW_WIDTH//2, y=c.WINDOW_HEIGHT//2):
self.game = game
self.radius = 35
self.x = x
self.y = y
self.angle = random.random() * 60 + 15
self.surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "small_lantern.png"))
self.draw_surf = pygame.transform.rotate(self.surf, self.angle)
self.touched_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "small_lantern_touched.png"))
self.touched_surf = pygame.transform.rotate(self.touched_surf, self.angle)
self.touched = False
self.launch_factor = 1.15
self.age = 0
self.glow = self.generate_glow() | 40.31694 | 116 | 0.604364 |
aceaec6c1b8e5c8152799c4d307cc3deea19e47f | 1,989 | py | Python | lium_diarization_editor/diarization.py | maxhollmann/lium-diarization-editor | c9fe124e684ec0967bfd1f071b07a6454c1a9b83 | [
"MIT"
] | 3 | 2018-12-24T01:57:34.000Z | 2020-09-06T19:11:15.000Z | lium_diarization_editor/diarization.py | maxhollmann/lium-diarization-editor | c9fe124e684ec0967bfd1f071b07a6454c1a9b83 | [
"MIT"
] | 3 | 2019-01-30T11:04:39.000Z | 2020-02-04T14:33:24.000Z | lium_diarization_editor/diarization.py | maxhollmann/lium-diarization-editor | c9fe124e684ec0967bfd1f071b07a6454c1a9b83 | [
"MIT"
] | 1 | 2019-06-18T10:15:58.000Z | 2019-06-18T10:15:58.000Z | import os
import warnings
import numpy as np
import pandas as pd
class Diarization:
def __init__(self, segments, frame_size = 10):
self.segs = segments
self.frame_size = frame_size
# Calculate some useful things
self.num_segments = self.segs.shape[0]
last = self.segs.iloc[self.num_segments - 1]
self.estimated_total_frames = last.start + last.length
self.speakers = np.unique(self.segs.speaker)
self.num_speakers = self.speakers.shape[0]
def iter_segments(self):
return self.segs.iterrows()
def segment_at(self, i):
return self.segs.iloc[i]
def change_speaker(self, speaker, new_name):
new_segs = self.segs.copy()
new_segs.loc[self.segs.speaker == speaker, "speaker"] = new_name
return self.update_segs(new_segs)
def merge_equal_neighbors(self):
""" Merge neighbors with same speaker. """
IDX_LENGTH = 3
merged = self.segs.copy()
current_start = 0
j = 0
seg = self.segs.iloc[0]
for i in range(1, self.num_segments):
seg = self.segs.iloc[i]
last = self.segs.iloc[i - 1]
if seg.speaker == last.speaker:
merged.iat[j, IDX_LENGTH] = seg.start + seg.length - current_start
else:
j += 1
merged.iloc[j] = seg
current_start = seg.start
merged = merged.iloc[:(j+1)]
merged.sort_values('start', inplace = True)
return self.update_segs(merged)
def update_segs(self, new_segs):
return Diarization(segments = new_segs,
frame_size = self.frame_size)
def frame_to_time(self, frame):
return frame * self.frame_size
def save(self, filename):
self.segs.to_pickle(filename)
@classmethod
def load(cls, filename):
segs = pd.read_pickle(filename)
return Diarization(segments = segs)
| 25.831169 | 82 | 0.598291 |
aceaecdc5a9e16ed1ce54b005167109fce001053 | 1,069 | py | Python | src/bioregistry/external/cli.py | egonw/bioregistry | 5070e0310a4e9f695d9089b302e0a2421c155d02 | [
"MIT"
] | 2 | 2020-12-23T15:23:51.000Z | 2021-02-13T17:50:34.000Z | src/bioregistry/external/cli.py | egonw/bioregistry | 5070e0310a4e9f695d9089b302e0a2421c155d02 | [
"MIT"
] | 19 | 2020-12-27T14:12:38.000Z | 2021-03-29T20:20:42.000Z | src/bioregistry/external/cli.py | egonw/bioregistry | 5070e0310a4e9f695d9089b302e0a2421c155d02 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Command line interface for the bioregistry."""
import click
from .bioportal import get_bioportal
from .go import get_go
from .miriam import get_miriam
from .n2t import get_n2t
from .ncbi import get_ncbi
from .obofoundry import get_obofoundry
from .ols import get_ols
from .wikidata import get_wikidata
from ..utils import secho
__all__ = [
"download",
]
@click.command()
def download():
"""Download/update the external entries in the Bioregistry."""
secho("Downloading MIRIAM")
get_miriam(force_download=True)
secho("Download N2T")
get_n2t(force_download=True)
secho("Downloading NCBI")
get_ncbi(force_download=True)
secho("Downloading OBO Foundry")
get_obofoundry(force_download=True)
secho("Downloading OLS")
get_ols(force_download=True)
secho("Download Wikidata")
get_wikidata(force_download=True)
secho("Download GO")
get_go(force_download=True)
secho("Download BioPortal")
get_bioportal(force_download=True)
if __name__ == "__main__":
download()
| 20.557692 | 66 | 0.724041 |
aceaeceed6fec9102d0025cce17778e8684cee1f | 10,787 | py | Python | metrics/linear_separability.py | adamFinastra/stylegan2-ada | d46f89060fd1eab40a9cb9f293c58db63360d4e0 | [
"BSD-Source-Code"
] | 3 | 2021-03-08T12:05:19.000Z | 2022-02-10T11:04:38.000Z | metrics/linear_separability.py | adamFinastra/stylegan2-ada | d46f89060fd1eab40a9cb9f293c58db63360d4e0 | [
"BSD-Source-Code"
] | null | null | null | metrics/linear_separability.py | adamFinastra/stylegan2-ada | d46f89060fd1eab40a9cb9f293c58db63360d4e0 | [
"BSD-Source-Code"
] | 2 | 2021-07-01T19:29:32.000Z | 2021-07-11T21:35:44.000Z | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Linear Separability (LS) from the paper
"A Style-Based Generator Architecture for Generative Adversarial Networks"."""
import pickle
from collections import defaultdict
import numpy as np
import sklearn.svm
import tensorflow.compat.v1 as tensorflow
tf = tensorflow
tf.disable_v2_behavior()
import dnnlib
import dnnlib.tflib as tflib
from metrics import metric_base
#----------------------------------------------------------------------------
classifier_urls = [
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-00-male.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-01-smiling.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-02-attractive.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-03-wavy-hair.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-04-young.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-05-5-o-clock-shadow.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-06-arched-eyebrows.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-07-bags-under-eyes.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-08-bald.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-09-bangs.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-10-big-lips.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-11-big-nose.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-12-black-hair.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-13-blond-hair.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-14-blurry.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-15-brown-hair.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-16-bushy-eyebrows.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-17-chubby.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-18-double-chin.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-19-eyeglasses.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-20-goatee.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-21-gray-hair.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-22-heavy-makeup.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-23-high-cheekbones.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-24-mouth-slightly-open.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-25-mustache.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-26-narrow-eyes.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-27-no-beard.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-28-oval-face.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-29-pale-skin.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-30-pointy-nose.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-31-receding-hairline.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-32-rosy-cheeks.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-33-sideburns.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-34-straight-hair.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-35-wearing-earrings.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-36-wearing-hat.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-37-wearing-lipstick.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-38-wearing-necklace.pkl',
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/celebahq-classifier-39-wearing-necktie.pkl',
]
#----------------------------------------------------------------------------
def prob_normalize(p):
p = np.asarray(p).astype(np.float32)
assert len(p.shape) == 2
return p / np.sum(p)
def mutual_information(p):
p = prob_normalize(p)
px = np.sum(p, axis=1)
py = np.sum(p, axis=0)
result = 0.0
for x in range(p.shape[0]):
p_x = px[x]
for y in range(p.shape[1]):
p_xy = p[x][y]
p_y = py[y]
if p_xy > 0.0:
result += p_xy * np.log2(p_xy / (p_x * p_y)) # get bits as output
return result
def entropy(p):
p = prob_normalize(p)
result = 0.0
for x in range(p.shape[0]):
for y in range(p.shape[1]):
p_xy = p[x][y]
if p_xy > 0.0:
result -= p_xy * np.log2(p_xy)
return result
def conditional_entropy(p):
# H(Y|X) where X corresponds to axis 0, Y to axis 1
# i.e., How many bits of additional information are needed to where we are on axis 1 if we know where we are on axis 0?
p = prob_normalize(p)
y = np.sum(p, axis=0, keepdims=True) # marginalize to calculate H(Y)
return max(0.0, entropy(y) - mutual_information(p)) # can slip just below 0 due to FP inaccuracies, clean those up.
#----------------------------------------------------------------------------
class LS(metric_base.MetricBase):
def __init__(self, num_samples, num_keep, attrib_indices, minibatch_per_gpu, **kwargs):
assert num_keep <= num_samples
super().__init__(**kwargs)
self.num_samples = num_samples
self.num_keep = num_keep
self.attrib_indices = attrib_indices
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, G_kwargs, num_gpus, **_kwargs): # pylint: disable=arguments-differ
minibatch_size = num_gpus * self.minibatch_per_gpu
# Construct TensorFlow graph for each GPU.
result_expr = []
for gpu_idx in range(num_gpus):
with tf.device(f'/gpu:{gpu_idx}'):
Gs_clone = Gs.clone()
# Generate images.
latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
labels = self._get_random_labels_tf(self.minibatch_per_gpu)
dlatents = Gs_clone.components.mapping.get_output_for(latents, labels, **G_kwargs)
images = Gs_clone.get_output_for(latents, None, **G_kwargs)
if images.shape[1] == 1: images = tf.tile(images, [1, 3, 1, 1])
# Downsample to 256x256. The attribute classifiers were built for 256x256.
if images.shape[2] > 256:
factor = images.shape[2] // 256
images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])
images = tf.reduce_mean(images, axis=[3, 5])
# Run classifier for each attribute.
result_dict = dict(latents=latents, dlatents=dlatents[:,-1])
for attrib_idx in self.attrib_indices:
with dnnlib.util.open_url(classifier_urls[attrib_idx]) as f:
classifier = pickle.load(f)
logits = classifier.get_output_for(images, None)
predictions = tf.nn.softmax(tf.concat([logits, -logits], axis=1))
result_dict[attrib_idx] = predictions
result_expr.append(result_dict)
# Sampling loop.
results = []
for begin in range(0, self.num_samples, minibatch_size):
self._report_progress(begin, self.num_samples)
results += tflib.run(result_expr)
results = {key: np.concatenate([value[key] for value in results], axis=0) for key in results[0].keys()}
# Calculate conditional entropy for each attribute.
conditional_entropies = defaultdict(list)
for attrib_idx in self.attrib_indices:
# Prune the least confident samples.
pruned_indices = list(range(self.num_samples))
pruned_indices = sorted(pruned_indices, key=lambda i: -np.max(results[attrib_idx][i]))
pruned_indices = pruned_indices[:self.num_keep]
# Fit SVM to the remaining samples.
svm_targets = np.argmax(results[attrib_idx][pruned_indices], axis=1)
for space in ['latents', 'dlatents']:
svm_inputs = results[space][pruned_indices]
try:
svm = sklearn.svm.LinearSVC()
svm.fit(svm_inputs, svm_targets)
svm.score(svm_inputs, svm_targets)
svm_outputs = svm.predict(svm_inputs)
except:
svm_outputs = svm_targets # assume perfect prediction
# Calculate conditional entropy.
p = [[np.mean([case == (row, col) for case in zip(svm_outputs, svm_targets)]) for col in (0, 1)] for row in (0, 1)]
conditional_entropies[space].append(conditional_entropy(p))
# Calculate separability scores.
scores = {key: 2**np.sum(values) for key, values in conditional_entropies.items()}
self._report_result(scores['latents'], suffix='_z')
self._report_result(scores['dlatents'], suffix='_w')
#----------------------------------------------------------------------------
| 57.684492 | 140 | 0.669602 |
aceaed024e580fac6c0d610aebf6b02087d771a6 | 304 | py | Python | examples/futures/account/adl_quantile.py | leozaragoza/binance-connector-python | 3311d102c9e788e3d71047f0af103c00d1ae2162 | [
"MIT"
] | 3 | 2021-08-16T15:29:09.000Z | 2021-09-14T16:25:03.000Z | examples/futures/account/adl_quantile.py | leozaragoza/binance-connector-python | 3311d102c9e788e3d71047f0af103c00d1ae2162 | [
"MIT"
] | null | null | null | examples/futures/account/adl_quantile.py | leozaragoza/binance-connector-python | 3311d102c9e788e3d71047f0af103c00d1ae2162 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging
from binance.futures import Futures as Client
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
key = ""
secret = ""
client = Client(key, secret, base_url="https://testnet.binancefuture.com")
logging.info(client.adl_quantile())
| 20.266667 | 74 | 0.769737 |
aceaed2b79f305346c783fa6802f8b5c452e47cd | 322 | py | Python | src/budget/admin.py | dominikhofman/Budget | fdd5e51d1c5613f5b56bfc7298496728100c8992 | [
"MIT"
] | null | null | null | src/budget/admin.py | dominikhofman/Budget | fdd5e51d1c5613f5b56bfc7298496728100c8992 | [
"MIT"
] | null | null | null | src/budget/admin.py | dominikhofman/Budget | fdd5e51d1c5613f5b56bfc7298496728100c8992 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Budget, BudgetEntryCategory, BudgetEntry
class BudgetAdmin(admin.ModelAdmin):
readonly_fields = ('created', 'updated')
admin.site.register(Budget, BudgetAdmin)
admin.site.register(BudgetEntryCategory, BudgetAdmin)
admin.site.register(BudgetEntry, BudgetAdmin)
| 24.769231 | 60 | 0.807453 |
aceaed33fdc890835afe7c674bd85fa48205ed32 | 12,646 | py | Python | src/container/kube-manager/kube_manager/vnc/vnc_network.py | EWERK-DIGITAL/tf-controller | 311ea863b03d425a67d04d27c1f1b9cf1e20c926 | [
"Apache-2.0"
] | 37 | 2020-09-21T10:42:26.000Z | 2022-01-09T10:16:40.000Z | src/container/kube-manager/kube_manager/vnc/vnc_network.py | EWERK-DIGITAL/tf-controller | 311ea863b03d425a67d04d27c1f1b9cf1e20c926 | [
"Apache-2.0"
] | null | null | null | src/container/kube-manager/kube_manager/vnc/vnc_network.py | EWERK-DIGITAL/tf-controller | 311ea863b03d425a67d04d27c1f1b9cf1e20c926 | [
"Apache-2.0"
] | 21 | 2020-08-25T12:48:42.000Z | 2022-03-22T04:32:18.000Z | #
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
"""
VNC network management for kubernetes.
"""
from __future__ import print_function
from builtins import str
from netaddr import IPNetwork, IPAddress
from six import string_types
import uuid
from cfgm_common.exceptions import RefsExistError, NoIdError
from vnc_api.gen.resource_client import (
NetworkIpam, VirtualNetwork
)
from vnc_api.gen.resource_xsd import (
IpamSubnets, IpamSubnetType, SubnetType,
VirtualNetworkType, VnSubnetsType
)
from kube_manager.vnc.config_db import (
NetworkIpamKM, VirtualNetworkKM
)
from kube_manager.vnc.vnc_kubernetes_config import VncKubernetesConfig as vnc_kube_config
from kube_manager.vnc.vnc_common import VncCommon
from kube_manager.common.kube_config_db import NetworkKM
class VncNetwork(VncCommon):
def __init__(self):
self._k8s_event_type = 'Network'
super(VncNetwork, self).__init__(self._k8s_event_type)
self._name = type(self).__name__
self._vnc_lib = vnc_kube_config.vnc_lib()
self._args = vnc_kube_config.args()
self._logger = vnc_kube_config.logger()
self._queue = vnc_kube_config.queue()
def process(self, event):
event_type = event['type']
kind = event['object'].get('kind')
name = event['object']['metadata'].get('name')
ns_id = event['object']['metadata'].get('uid')
namespace = event['object']['metadata'].get('namespace')
annotations = event['object']['metadata'].get('annotations')
print(
"%s - Got %s %s %s:%s"
% (self._name, event_type, kind, name, ns_id))
self._logger.debug(
"%s - Got %s %s %s:%s Namespace: %s"
% (self._name, event_type, kind, name, ns_id, namespace))
if event['type'] == 'ADDED' or event['type'] == 'MODIFIED':
# If CIDR is provided in the network YAML, we need to Create
# the virtual network and IPAM objects in contrail, else need to do
# nothing here as network already exists
if 'opencontrail.org/cidr' in annotations:
subnets = annotations.get('opencontrail.org/cidr', None)
ann_val = annotations.get(
'opencontrail.org/ip_fabric_forwarding', 'false')
if ann_val.lower() == 'true':
ip_fabric_forwarding = True
elif ann_val.lower() == 'false':
ip_fabric_forwarding = False
else:
ip_fabric_forwarding = None
ann_val = annotations.get(
'opencontrail.org/ip_fabric_snat', 'false')
if ann_val.lower() == 'true':
ip_fabric_snat = True
elif ann_val.lower() == 'false':
ip_fabric_snat = False
else:
ip_fabric_snat = None
# Get Newtork object associated with the namespace
# and network name
nw = NetworkKM.get_network_fq_name(name, namespace)
if not nw:
self._logger.error(
"%s -Error in retrieving Network"
" object for VN_Name %s and Namespace %s "
% (self._name, name, namespace))
return
nw.annotated_vn_fq_name = self._make_vn_fq_name(
ns_name=namespace, vn_name=name)
# Get project and IPAM for given subnet if it does not exist
proj_obj = self._get_project(namespace)
if proj_obj is None:
self._logger.error(
"%s -Error in retrieving Project"
" for namespace %s " % (self._name, namespace))
return
self._logger.debug("%s - Create IPAM- Subnets %s"
% (self._name, subnets))
subnet_data = self._create_subnet_data(subnets)
ipam_name = self._get_network_pod_ipam_name(name)
vn_name = self._get_network_pod_vn_name(name)
pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
self._create_ipam(ipam_name=ipam_name, proj_obj=proj_obj)
provider = None
if ip_fabric_forwarding:
ip_fabric_fq_name = vnc_kube_config.\
cluster_ip_fabric_network_fq_name()
provider = self._vnc_lib.\
virtual_network_read(fq_name=ip_fabric_fq_name)
# Create virtual network if it does not exist
self._create_virtual_network(
vn_name=vn_name, proj_obj=proj_obj, provider=provider,
ipam_obj=pod_ipam_obj, ipam_update=pod_ipam_update,
subnets=subnet_data, type='user-defined-subnet-only',
ip_fabric_snat=ip_fabric_snat)
elif event['type'] == 'DELETED':
vn_name = self._get_network_pod_vn_name(name)
self._delete_virtual_network(ns_name=namespace, vn_name=vn_name)
else:
self._logger.warning(
'Unknown event type: "{}" in Network creation. \
Ignoring'.format(event['type']))
def _get_network_pod_ipam_name(self, nw_name):
return vnc_kube_config.cluster_name() + '-' + nw_name + '-pod-ipam'
def _get_network_pod_vn_name(self, nw_name):
return vnc_kube_config.cluster_name() + '-' + nw_name + "-pod-network"
def _make_vn_fq_name(self, ns_name, vn_name):
vn_fq_name = []
domain_name = vnc_kube_config.cluster_domain()
vn_fq_name.append(domain_name)
project_name = vnc_kube_config.cluster_project_name(ns_name)
vn_fq_name.append(project_name)
virtual_net_name = vnc_kube_config.get_pod_network_name(vn_name)
vn_fq_name.append(virtual_net_name)
return vn_fq_name
def _create_subnet_data(self, vn_subnet):
subnets = [vn_subnet] if isinstance(vn_subnet, string_types) else vn_subnet
subnet_infos = []
for subnet in subnets:
cidr = IPNetwork(subnet)
subnet_infos.append(
IpamSubnetType(
subnet=SubnetType(
str(cidr.network),
int(cidr.prefixlen),
),
default_gateway=str(IPAddress(cidr.last - 1)),
subnet_uuid=str(uuid.uuid4()),
)
)
subnet_data = VnSubnetsType(subnet_infos)
return subnet_data
def _get_project(self, service_namespace):
proj_fq_name =\
vnc_kube_config.cluster_project_fq_name(service_namespace)
try:
proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
return proj_obj
except NoIdError:
return None
def _create_ipam(self, ipam_name, proj_obj, subnets=[], type=None):
ipam_obj = NetworkIpam(name=ipam_name, parent_obj=proj_obj)
ipam_subnets = []
for subnet in subnets:
pfx, pfx_len = subnet.split('/')
ipam_subnet = IpamSubnetType(subnet=SubnetType(pfx, int(pfx_len)))
ipam_subnets.append(ipam_subnet)
if not len(ipam_subnets):
self._logger.debug(
"%s - %s subnet is empty for %s"
% (self._name, ipam_name, subnets))
if type == 'flat-subnet':
ipam_obj.set_ipam_subnet_method('flat-subnet')
ipam_obj.set_ipam_subnets(IpamSubnets(ipam_subnets))
ipam_update = False
try:
ipam_uuid = self._vnc_lib.network_ipam_create(ipam_obj)
ipam_update = True
except RefsExistError:
curr_ipam_obj = self._vnc_lib.network_ipam_read(
fq_name=ipam_obj.get_fq_name())
ipam_uuid = curr_ipam_obj.get_uuid()
if type == 'flat-subnet' and not curr_ipam_obj.get_ipam_subnets():
self._vnc_lib.network_ipam_update(ipam_obj)
ipam_update = True
# Cache ipam info.
NetworkIpamKM.locate(ipam_uuid)
return ipam_update, ipam_obj, ipam_subnets
def _is_ipam_exists(self, vn_obj, ipam_fq_name, subnet=None):
curr_ipam_refs = vn_obj.get_network_ipam_refs()
if curr_ipam_refs:
for ipam_ref in curr_ipam_refs:
if ipam_fq_name == ipam_ref['to']:
if subnet:
# Subnet is specified.
# Validate that we are able to match subnect as well.
if len(ipam_ref['attr'].ipam_subnets) and \
subnet == ipam_ref['attr'].ipam_subnets[0].subnet:
return True
else:
# Subnet is not specified.
# So ipam-fq-name match will suffice.
return True
return False
def _create_virtual_network(self, vn_name, proj_obj, ipam_obj,
ipam_update, provider=None, subnets=None,
type='flat-subnet-only', ip_fabric_snat=None):
vn_exists = False
vn = VirtualNetwork(
name=vn_name, parent_obj=proj_obj,
address_allocation_mode=type)
try:
vn_obj = self._vnc_lib.virtual_network_read(
fq_name=vn.get_fq_name())
vn_exists = True
except NoIdError:
# VN does not exist. Create one.
vn_obj = vn
if vn_exists:
return vn_obj
# Attach IPAM to virtual network.
#
# For flat-subnets, the subnets are specified on the IPAM and
# not on the virtual-network to IPAM link. So pass an empty
# list of VnSubnetsType.
# For user-defined-subnets, use the provided subnets
if ipam_update or \
not self._is_ipam_exists(vn_obj, ipam_obj.get_fq_name()):
if subnets and type == 'user-defined-subnet-only':
vn_obj.add_network_ipam(ipam_obj, subnets)
else:
vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([]))
vn_obj.set_virtual_network_properties(
VirtualNetworkType(forwarding_mode='l2_l3'))
if not vn_exists:
if provider:
# enable ip_fabric_forwarding
vn_obj.add_virtual_network(provider)
elif ip_fabric_snat:
# enable fabric_snat
vn_obj.set_fabric_snat(True)
else:
# disable fabric_snat
vn_obj.set_fabric_snat(False)
# Create VN.
self._vnc_lib.virtual_network_create(vn_obj)
else:
# TODO: Handle Network update
pass
vn_obj = self._vnc_lib.virtual_network_read(
fq_name=vn_obj.get_fq_name())
VirtualNetworkKM.locate(vn_obj.uuid)
return vn_obj
def _delete_virtual_network(self, ns_name, vn_name):
"""
Delete the virtual network associated with this namespace.
"""
# First lookup the cache for the entry.
vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name)
if not vn:
return
proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
try:
vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name)
# Delete/cleanup ipams allocated for this network.
ipam_refs = vn_obj.get_network_ipam_refs()
if ipam_refs:
proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
for ipam in ipam_refs:
ipam_obj = NetworkIpam(
name=ipam['to'][-1], parent_obj=proj_obj)
vn_obj.del_network_ipam(ipam_obj)
self._vnc_lib.virtual_network_update(vn_obj)
except RefsExistError as e:
# Delete of custom network when it is still in use is not
# supported yet. Log deletion attempt and return without deleting VN
self._logger.error("%s: Cannot delete Network %s . %s"
% (self._name, vn_name, str(e)))
return
except NoIdError:
pass
# Delete the network.
self._vnc_lib.virtual_network_delete(id=vn.uuid)
# Delete the network from cache.
VirtualNetworkKM.delete(vn.uuid)
| 39.892744 | 89 | 0.581607 |
aceaed399a640c9e8c5974605fb43e8ce697c052 | 684 | py | Python | postfix_mailq_count/postfix_mailq_count.py | sasank1/plugins | f12dcb207dcd53819f2f23eeaab3b60a40885d4b | [
"BSD-2-Clause"
] | null | null | null | postfix_mailq_count/postfix_mailq_count.py | sasank1/plugins | f12dcb207dcd53819f2f23eeaab3b60a40885d4b | [
"BSD-2-Clause"
] | null | null | null | postfix_mailq_count/postfix_mailq_count.py | sasank1/plugins | f12dcb207dcd53819f2f23eeaab3b60a40885d4b | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
import subprocess
import json
#Setting this to true will alert you when there is a communication problem while posting plugin data to server
HEARTBEAT = "true"
#if any impacting changes to this plugin kindly increment the plugin version here.
PLUGIN_VERSION = "1"
if __name__ == '__main__':
cmd = 'mailq | grep -c "^[A-F0-9]"'
data = {}
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
data['mailq_count'] = int(output)
data['heartbeat_required'] = HEARTBEAT
data['plugin_version'] = PLUGIN_VERSION
print(json.dumps(data, indent=2, sort_keys=False))
| 28.5 | 110 | 0.690058 |
aceaedf1c53c50eabcaa842d65dccd234ea3ca15 | 1,658 | py | Python | venv/Lib/site-packages/fbs/upload.py | Acuf5928/check- | 4b993e0bcee33434506565dab11ece3dfa9c5cab | [
"MIT"
] | 1 | 2020-03-30T00:08:41.000Z | 2020-03-30T00:08:41.000Z | venv/Lib/site-packages/fbs/upload.py | Acuf5928/check- | 4b993e0bcee33434506565dab11ece3dfa9c5cab | [
"MIT"
] | null | null | null | venv/Lib/site-packages/fbs/upload.py | Acuf5928/check- | 4b993e0bcee33434506565dab11ece3dfa9c5cab | [
"MIT"
] | 2 | 2018-12-29T07:49:59.000Z | 2020-03-18T02:44:31.000Z | from fbs import _server, SETTINGS, path
from fbs._aws import upload_file, upload_folder_contents
from fbs_runtime import FbsError
from fbs_runtime.platform import is_linux
from os.path import basename
import json
def _upload_repo(username, password):
status, response = _server.post_json('start_upload', {
'username': username,
'password': password
})
unexpected_response = lambda: FbsError(
'Received unexpected server response %d:\n%s' % (status, response)
)
if status // 2 != 100:
raise unexpected_response()
try:
data = json.loads(response)
except ValueError:
raise unexpected_response()
try:
credentials = data['bucket'], data['key'], data['secret']
except KeyError:
raise unexpected_response()
dest_path = lambda p: username + '/' + SETTINGS['app_name'] + '/' + p
installer = path('target/${installer}')
installer_dest = dest_path(basename(installer))
upload_file(installer, installer_dest, *credentials)
uploaded = [installer_dest]
if is_linux():
repo_dest = dest_path(SETTINGS['repo_subdir'])
uploaded.extend(
upload_folder_contents(path('target/repo'), repo_dest, *credentials)
)
pubkey_dest = dest_path('public-key.gpg')
upload_file(
path('src/sign/linux/public-key.gpg'), pubkey_dest, *credentials
)
uploaded.append(pubkey_dest)
status, response = _server.post_json('complete_upload', {
'username': username,
'password': password,
'files': uploaded
})
if status != 201:
raise unexpected_response() | 34.541667 | 80 | 0.6538 |
aceaee189a8ec55088a152ef8f5840a176939022 | 6,574 | py | Python | mlrun/feature_store/retrieval/local_merger.py | katyakats/mlrun | e0cacad73113f616eeb5c0297628b87244957ab0 | [
"Apache-2.0"
] | null | null | null | mlrun/feature_store/retrieval/local_merger.py | katyakats/mlrun | e0cacad73113f616eeb5c0297628b87244957ab0 | [
"Apache-2.0"
] | null | null | null | mlrun/feature_store/retrieval/local_merger.py | katyakats/mlrun | e0cacad73113f616eeb5c0297628b87244957ab0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import pandas as pd
import mlrun
import mlrun.errors
from ...utils import logger
from ..feature_vector import OfflineVectorResponse
class LocalFeatureMerger:
def __init__(self, vector):
self._result_df = None
self.vector = vector
def start(
self,
entity_rows=None,
entity_timestamp_column=None,
target=None,
drop_columns=None,
start_time=None,
end_time=None,
):
index_columns = []
drop_indexes = False if self.vector.spec.with_indexes else True
def append_index(key):
if drop_indexes and key and key not in index_columns:
index_columns.append(key)
if entity_timestamp_column:
index_columns.append(entity_timestamp_column)
feature_set_objects, feature_set_fields = self.vector.parse_features()
self.vector.save()
# load dataframes
feature_sets = []
dfs = []
df_module = None # for use of dask or other non pandas df module
for name, columns in feature_set_fields.items():
feature_set = feature_set_objects[name]
feature_sets.append(feature_set)
column_names = [name for name, alias in columns]
df = feature_set.to_dataframe(
columns=column_names,
df_module=df_module,
start_time=start_time,
end_time=end_time,
time_column=entity_timestamp_column,
)
# rename columns with aliases
df.rename(
columns={name: alias for name, alias in columns if alias}, inplace=True
)
dfs.append(df)
append_index(feature_set.spec.timestamp_key)
for key in feature_set.spec.entities.keys():
append_index(key)
self.merge(entity_rows, entity_timestamp_column, feature_sets, dfs)
if drop_columns or index_columns:
for field in drop_columns or []:
if field not in index_columns:
index_columns.append(field)
self._result_df.drop(columns=index_columns, inplace=True, errors="ignore")
if self.vector.status.label_column:
self._result_df = self._result_df.dropna(
subset=[self.vector.status.label_column]
)
if target:
is_persistent_vector = self.vector.metadata.name is not None
if not target.path and not is_persistent_vector:
raise mlrun.errors.MLRunInvalidArgumentError(
"target path was not specified"
)
target.set_resource(self.vector)
size = target.write_dataframe(self._result_df)
if is_persistent_vector:
target_status = target.update_resource_status("ready", size=size)
logger.info(f"wrote target: {target_status}")
self.vector.save()
return OfflineVectorResponse(self)
def merge(
self,
entity_df,
entity_timestamp_column: str,
featuresets: list,
featureset_dfs: List[pd.DataFrame],
):
merged_df = entity_df
if entity_df is None:
merged_df = featureset_dfs.pop(0)
featureset = featuresets.pop(0)
entity_timestamp_column = (
entity_timestamp_column or featureset.spec.timestamp_key
)
for featureset, featureset_df in zip(featuresets, featureset_dfs):
if featureset.spec.timestamp_key:
merge_func = self._asof_join
else:
merge_func = self._join
merged_df = merge_func(
merged_df, entity_timestamp_column, featureset, featureset_df,
)
self._result_df = merged_df
def _asof_join(
self,
entity_df,
entity_timestamp_column: str,
featureset,
featureset_df: pd.DataFrame,
):
indexes = list(featureset.spec.entities.keys())
index_col_not_in_entity = "index" not in entity_df.columns
index_col_not_in_featureset = "index" not in featureset_df.columns
# Sort left and right keys
if type(entity_df.index) != pd.RangeIndex:
entity_df = entity_df.reset_index()
if type(featureset_df.index) != pd.RangeIndex:
featureset_df = featureset_df.reset_index()
entity_df[entity_timestamp_column] = pd.to_datetime(
entity_df[entity_timestamp_column]
)
featureset_df[featureset.spec.timestamp_key] = pd.to_datetime(
featureset_df[featureset.spec.timestamp_key]
)
entity_df = entity_df.sort_values(by=entity_timestamp_column)
featureset_df = featureset_df.sort_values(by=entity_timestamp_column)
merged_df = pd.merge_asof(
entity_df,
featureset_df,
left_on=entity_timestamp_column,
right_on=featureset.spec.timestamp_key,
by=indexes,
)
# Undo indexing tricks for asof merge
# to return the correct indexes and not
# overload `index` columns
if (
"index" not in indexes
and index_col_not_in_entity
and index_col_not_in_featureset
and "index" in merged_df.columns
):
merged_df = merged_df.drop(columns="index")
return merged_df
def _join(
self,
entity_df,
entity_timestamp_column: str,
featureset,
featureset_df: pd.DataFrame,
):
indexes = list(featureset.spec.entities.keys())
merged_df = pd.merge(entity_df, featureset_df, on=indexes)
return merged_df
def get_status(self):
if self._result_df is None:
raise RuntimeError("unexpected status, no result df")
return "completed"
def get_df(self):
return self._result_df
| 34.239583 | 87 | 0.623061 |
aceaee2971e2f54e727966232b6a271fe9209185 | 835 | py | Python | pygame/mouse/keep-mouse-in-window-2.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | pygame/mouse/keep-mouse-in-window-2.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | pygame/mouse/keep-mouse-in-window-2.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z | #!/usr/bin/env python
import pygame
'''
it keeps mouse in window and it can move object with mouse
'''
import pygame
pygame.init()
screen = pygame.display.set_mode((800,600))
pos = pygame.Rect(0, 0, 100, 100)
pos.center = (400, 300)
clock = pygame.time.Clock()
running = True
pygame.event.set_grab(True)
pygame.mouse.set_pos(400, 300)
pygame.event.clear()
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
running = False
if event.type == pygame.MOUSEMOTION:
print(event.rel)
pos.x -= event.rel[0]
pos.y -= event.rel[1]
screen.fill((0,0,0))
pygame.draw.rect(screen, (255,0,0), pos)
pygame.display.flip()
clock.tick(30)
pygame.quit()
| 19.418605 | 58 | 0.610778 |
aceaee6916a523860d7db5f74d7f90d11200e7d3 | 3,946 | py | Python | backend/tests/functional_tests/api/scans/test_fetching_scans.py | kolszewska/MedTagger | c691c822dd23a9fb402d1314e7fe2e6bde898e9c | [
"Apache-2.0"
] | 71 | 2019-01-31T19:50:31.000Z | 2022-02-20T07:36:49.000Z | backend/tests/functional_tests/api/scans/test_fetching_scans.py | kolszewska/MedTagger | c691c822dd23a9fb402d1314e7fe2e6bde898e9c | [
"Apache-2.0"
] | 379 | 2019-02-16T19:12:01.000Z | 2022-03-11T23:12:24.000Z | backend/tests/functional_tests/api/scans/test_fetching_scans.py | kolszewska/MedTagger | c691c822dd23a9fb402d1314e7fe2e6bde898e9c | [
"Apache-2.0"
] | 16 | 2019-01-31T16:44:39.000Z | 2022-02-14T15:23:29.000Z | """Tests for REST API for fetching metadata about Scans."""
import json
from typing import Any
from medtagger.repositories import (
datasets as DatasetsRepository,
scans as ScansRepository,
)
from tests.functional_tests import get_api_client, get_headers
from tests.functional_tests.conftest import get_token_for_logged_in_user
def test_get_paginated_scans(prepare_environment: Any) -> None:
"""Test for fetching Scans in the paginated way."""
api_client = get_api_client()
user_token = get_token_for_logged_in_user('admin')
# Step 1. Prepare a structure for the test
dataset = DatasetsRepository.add_new_dataset('KIDNEYS', 'Kidneys')
# Step 2. Add example Scans to the system
for _ in range(50):
ScansRepository.add_new_scan(dataset, number_of_slices=3)
# Step 3. Fetch them with MedTagger REST API
response = api_client.get('/api/v1/scans?dataset_key=KIDNEYS', headers=get_headers(token=user_token))
assert response.status_code == 200
json_response = json.loads(response.data)
assert json_response['pagination']['page'] == 1
assert json_response['pagination']['per_page'] == 25
assert json_response['pagination']['total'] == 50
assert len(json_response['scans']) == 25
# Step 4. Fetch the next page with different size
response = api_client.get('/api/v1/scans?dataset_key=KIDNEYS&page=2&per_page=10',
headers=get_headers(token=user_token))
assert response.status_code == 200
json_response = json.loads(response.data)
assert json_response['pagination']['page'] == 2
assert json_response['pagination']['per_page'] == 10
assert json_response['pagination']['total'] == 50
assert len(json_response['scans']) == 10
def test_get_paginated_scans_by_volunteer(prepare_environment: Any) -> None:
"""Test for fetching Scans in the paginated way by volunteers."""
api_client = get_api_client()
user_token = get_token_for_logged_in_user('volunteer')
# Step 1. Prepare a structure for the test
dataset = DatasetsRepository.add_new_dataset('KIDNEYS', 'Kidneys')
# Step 2. Add example Scans to the system
for _ in range(50):
ScansRepository.add_new_scan(dataset, number_of_slices=3)
# Step 3. Fetch them with MedTagger REST API
response = api_client.get('/api/v1/scans?dataset_key=KIDNEYS', headers=get_headers(token=user_token))
assert response.status_code == 403
json_response = json.loads(response.data)
assert json_response['message'] == 'Access forbidden'
assert json_response['details'] == 'You don\'t have required roles to access this method.'
def test_get_paginated_scans_with_invalid_arguments(prepare_environment: Any) -> None:
"""Test for fetching Scans in the paginated way with invalid arguments."""
api_client = get_api_client()
user_token = get_token_for_logged_in_user('admin')
# Step 1. Prepare a structure for the test
dataset = DatasetsRepository.add_new_dataset('KIDNEYS', 'Kidneys')
# Step 2. Add example Scans to the system
for _ in range(50):
ScansRepository.add_new_scan(dataset, number_of_slices=3)
# Step 3. Fetch them with MedTagger REST API in the wrong way
response = api_client.get('/api/v1/scans?dataset_key=KIDNEYS&page=-1', headers=get_headers(token=user_token))
assert response.status_code == 400
json_response = json.loads(response.data)
assert json_response['message'] == 'Invalid arguments.'
assert json_response['details'] == 'Page cannot be smaller than 1.'
# Step 4. Make a mistake again
response = api_client.get('/api/v1/scans?dataset_key=KIDNEYS&per_page=5000', headers=get_headers(token=user_token))
assert response.status_code == 400
json_response = json.loads(response.data)
assert json_response['message'] == 'Invalid arguments.'
assert json_response['details'] == 'Cannot fetch more than 100 entries at once.'
| 43.362637 | 119 | 0.727319 |
aceaef7af874fe107bb6085e8c5af63c32f867bb | 205 | py | Python | archer/__init__.py | moonshadow/archer | cc0fd94ba8bc5047c22bd75a198b786a9e500357 | [
"MIT"
] | null | null | null | archer/__init__.py | moonshadow/archer | cc0fd94ba8bc5047c22bd75a198b786a9e500357 | [
"MIT"
] | null | null | null | archer/__init__.py | moonshadow/archer | cc0fd94ba8bc5047c22bd75a198b786a9e500357 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__version__ = '0.3'
from .app import Archer
from .event import before_api_call, tear_down_api_call, after_api_call
from .test import TestClient
from .helper import make_client
| 17.083333 | 70 | 0.756098 |
aceaef86edba34239549a2829cb466ef4917088e | 2,729 | py | Python | sequencing_np/nn/rnn_cells/rnn.py | SwordYork/sequencing | bcbc2006bf17315411ac3d629f7014f790b70418 | [
"MIT"
] | 45 | 2017-08-06T15:02:12.000Z | 2021-01-24T19:12:13.000Z | sequencing_np/nn/rnn_cells/rnn.py | SwordYork/sequencing | bcbc2006bf17315411ac3d629f7014f790b70418 | [
"MIT"
] | null | null | null | sequencing_np/nn/rnn_cells/rnn.py | SwordYork/sequencing | bcbc2006bf17315411ac3d629f7014f790b70418 | [
"MIT"
] | 14 | 2017-08-07T04:56:55.000Z | 2019-01-07T09:43:24.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Sword York
# GitHub: https://github.com/SwordYork/sequencing
# No rights reserved.
#
from abc import ABCMeta, abstractmethod
from ..base import Layer
from ... import np, TIME_MAJOR
class RNN(Layer, metaclass=ABCMeta):
def __init__(self, init_state, param_keys, activation=None,
base_name=None, name=None, *args, **kwargs):
"""
numpy rnn cell.
It only used for inferring, not training, thus we don't need initialization
in this implementation.
The weights and other things are passed by params.
:param init_state: initial states of RNN, [B, H] or tuple([B, H], ...)
:param param_keys: name of params, such as kernel and bias
:param activation: activation function
:param base_name: name of parent Layer
:param name: name of this Layer
"""
super(RNN, self).__init__(param_keys, base_name, name, **kwargs)
# get state size
if type(init_state) != type(np.empty([])):
self.init_state = tuple(init_state)
self.hidden_units = tuple(init_state)[0].shape[1]
else:
self.init_state = init_state
self.hidden_units = init_state.shape[1]
self.time_major = TIME_MAJOR
self.activation = activation or np.tanh
def encode(self, inputs, sequence_length=None, reverse=False):
"""
Encode multi-step inputs.
:param inputs: if time_major [T, B, ...] else [B, T, ...]
:param sequence_length: length of the sequence [B]
:param reverse: used in bidirectional RNN
:return: lstm outputs
"""
if not self.time_major:
inputs = np.transpose(inputs, (1, 0, 2))
steps = inputs.shape[0]
outputs = np.zeros(inputs.shape[:-1] + (self.hidden_units,),
inputs.dtype)
state = self.init_state
iter_range = reversed(range(steps)) if reverse else range(steps)
for idx in iter_range:
# rnn step
curr_input = inputs[idx, :, :]
mask = idx < sequence_length if sequence_length is not None else None
outputs[idx, :, :], state = self.step(state, curr_input, mask)
if not self.time_major:
outputs = np.transpose(outputs, (1, 0, 2))
return outputs, state
@abstractmethod
def step(self, prev_states, input_, mask=None):
"""
run rnn for one step
:param prev_states: [B, ...]
:param input_: [B, ...]
:param mask: mask the terminated sequence in the batch
:return: output, state
"""
raise NotImplementedError
| 34.1125 | 83 | 0.596189 |
aceaef9dae2c10251969ee23acda1722b6a90cdf | 2,279 | py | Python | cvxpy/reductions/utilities.py | jasondark/cvxpy | 56aaa01b0e9d98ae5a91a923708129a7b37a6f18 | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2015-06-03T01:33:46.000Z | 2021-11-15T01:48:49.000Z | cvxpy/reductions/utilities.py | Toby-Gao/cvxpy | bd6f5142effa8cf883d1a0d7fd46c0d906b2fb93 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-03-09T13:13:44.000Z | 2022-03-09T13:13:44.000Z | cvxpy/reductions/utilities.py | Toby-Gao/cvxpy | bd6f5142effa8cf883d1a0d7fd46c0d906b2fb93 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-10-22T01:35:58.000Z | 2022-01-19T10:48:51.000Z | """
Copyright 2018 Akshay Agrawal
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms.affine.reshape import reshape
from cvxpy.atoms.affine.vec import vec
from cvxpy.constraints.zero import Zero
from cvxpy.constraints.nonpos import NonPos, NonNeg
from collections import defaultdict
import numpy as np
import scipy.sparse as sp
def lower_ineq_to_nonpos(inequality):
lhs = inequality.args[0]
rhs = inequality.args[1]
return NonPos(lhs - rhs, constr_id=inequality.constr_id)
def lower_ineq_to_nonneg(inequality):
lhs = inequality.args[0]
rhs = inequality.args[1]
return NonNeg(rhs - lhs, constr_id=inequality.constr_id)
def lower_equality(equality):
lhs = equality.args[0]
rhs = equality.args[1]
return Zero(lhs - rhs, constr_id=equality.constr_id)
def special_index_canon(expr, args):
select_mat = expr._select_mat
final_shape = expr._select_mat.shape
select_vec = np.reshape(select_mat, select_mat.size, order='F')
# Select the chosen entries from expr.
arg = args[0]
identity = sp.eye(arg.size).tocsc()
lowered = reshape(identity[select_vec] @ vec(arg), final_shape)
return lowered, []
def are_args_affine(constraints):
return all(arg.is_affine() for constr in constraints
for arg in constr.args)
def group_constraints(constraints):
"""Organize the constraints into a dictionary keyed by constraint names.
Parameters
---------
constraints : list of constraints
Returns
-------
dict
A dict keyed by constraint types where dict[cone_type] maps to a list
of exactly those constraints that are of type cone_type.
"""
constr_map = defaultdict(list)
for c in constraints:
constr_map[type(c)].append(c)
return constr_map
| 29.597403 | 77 | 0.728828 |
aceaf2adec2db19a0f882e248d77cbb5f9840c1a | 644 | py | Python | pynars/NARS/InferenceEngine/GeneralEngine/Rules/NAL9.py | AIxer/PyNARS | 443b6a5e1c9779a1b861df1ca51ce5a190998d2e | [
"MIT"
] | null | null | null | pynars/NARS/InferenceEngine/GeneralEngine/Rules/NAL9.py | AIxer/PyNARS | 443b6a5e1c9779a1b861df1ca51ce5a190998d2e | [
"MIT"
] | null | null | null | pynars/NARS/InferenceEngine/GeneralEngine/Rules/NAL9.py | AIxer/PyNARS | 443b6a5e1c9779a1b861df1ca51ce5a190998d2e | [
"MIT"
] | null | null | null | from collections import OrderedDict
from pynars.NARS.DataStructures import LinkType, TaskLink, TermLink
from pynars.utils.SparseLUT import SparseLUT
from pynars import Global
from ....RuleMap.add_rule import *
from pynars.NARS.MentalOperation import *
def add_rules__NAL9(sparse_lut: SparseLUT=None, structure: OrderedDict=None):
''''''
register(Believe, execute__believe)
register(Doubt, execute__doubt)
register(Evaluate, execute__evaluate)
register(Hesitate, execute__hesitate)
register(Want, execute__want)
register(Wonder, execute__wonder)
# register(Anticipate, execute__anticipate)
| 37.882353 | 77 | 0.762422 |
aceaf380b538e219838aab54002e81b73674ddd9 | 1,528 | py | Python | cradlepy/components/helper.py | cblanquera/cradlepy | 1634fe38a0cc58f92dbfc2b0c84ace9d16821c3c | [
"MIT"
] | null | null | null | cradlepy/components/helper.py | cblanquera/cradlepy | 1634fe38a0cc58f92dbfc2b0c84ace9d16821c3c | [
"MIT"
] | null | null | null | cradlepy/components/helper.py | cblanquera/cradlepy | 1634fe38a0cc58f92dbfc2b0c84ace9d16821c3c | [
"MIT"
] | null | null | null | from inspect import signature
class ConditionalTrait:
'Adds a generic `when()` method used during chainable calls'
def when(self, conditional, callback = None):
'Invokes Callback if conditional callback is true'
# if the callback is not callable
if not callable(callback):
# if the conditional is callable
# as in: @object.when
if callable(conditional):
def wrapper(callback):
callback(self, conditional(self))
conditional.callback = wrapper
#return the condition to have global access to it
return conditional
# otherwise the conditional is not callable
# as in: @object.when(True)
def wrapper(callback):
callback(self, conditional)
return wrapper
# otherwise the callback is callable
# if the conditional is callable
if callable(conditional):
#flatten out the conditional
conditional = conditional(self)
#go ahead and call the callback
callback(self, conditional)
return self
class LoopTrait:
'Adds a generic `loop()` method used during chainable calls'
def loop(self, callback = None):
# if callback is callable
# as in: @object.loop
if callable(callback):
index = 0
#please return false :)
while(callback(self, index) != False):
index += 1
| 29.960784 | 65 | 0.581806 |
aceaf389d1ce55cc1617c191d598ce58a57be5f6 | 937 | py | Python | 12/12.py | stevenpclark/aoc2020 | f264dae2b2bc2753a952dc3908ecea7f871a61ad | [
"MIT"
] | null | null | null | 12/12.py | stevenpclark/aoc2020 | f264dae2b2bc2753a952dc3908ecea7f871a61ad | [
"MIT"
] | null | null | null | 12/12.py | stevenpclark/aoc2020 | f264dae2b2bc2753a952dc3908ecea7f871a61ad | [
"MIT"
] | null | null | null | DP_MAP = {'N':(0,1), 'S':(0,-1), 'E':(1,0), 'W':(-1,0)}
FLIP_TURN = {'L':'R', 'R':'L'}
def turn(dp, c, v):
dx, dy = dp
if v == 180:
return (-dx, -dy)
if v == 270:
v = 90
c = FLIP_TURN[c]
if c == 'R':
return (dy, -dx)
else:
return (-dy, dx)
def main(dp=(1,0), use_waypoint=False):
fn = 'input.txt'
with open(fn, 'r') as f:
cmds = [(s[0], int(s[1:])) for s in f.readlines()]
x, y = 0, 0
for c, v in cmds:
if c in ['L', 'R']:
dp = turn(dp, c, v)
continue
if c == 'F':
x += v*dp[0]
y += v*dp[1]
continue
dx, dy = DP_MAP[c]
if not use_waypoint:
x += v*dx
y += v*dy
else:
dp = (dp[0]+v*dx, dp[1]+v*dy)
print(abs(x)+abs(y))
if __name__ == '__main__':
main()
main(dp=(10,1), use_waypoint=True)
| 21.295455 | 58 | 0.391676 |
aceaf405395f1b6bb08894294927df0906167754 | 1,416 | py | Python | courses/machine_learning/deepdive2/production_ml/labs/samples/core/dns_config/dns_config.py | memeyankm/training-data-analyst | 33bdca94cb063036f7313a2eaf6e515126d31344 | [
"Apache-2.0"
] | 6,140 | 2016-05-23T16:09:35.000Z | 2022-03-30T19:00:46.000Z | courses/machine_learning/deepdive2/production_ml/labs/samples/core/dns_config/dns_config.py | memeyankm/training-data-analyst | 33bdca94cb063036f7313a2eaf6e515126d31344 | [
"Apache-2.0"
] | 1,720 | 2021-01-25T09:32:00.000Z | 2022-03-31T08:09:51.000Z | courses/machine_learning/deepdive2/production_ml/labs/samples/core/dns_config/dns_config.py | memeyankm/training-data-analyst | 33bdca94cb063036f7313a2eaf6e515126d31344 | [
"Apache-2.0"
] | 5,110 | 2016-05-27T13:45:18.000Z | 2022-03-31T18:40:42.000Z | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp
from kfp import dsl
from kubernetes.client.models import V1PodDNSConfig, V1PodDNSConfigOption
def echo_op():
return dsl.ContainerOp(
name='echo',
image='library/bash:4.4.23',
command=['sh', '-c'],
arguments=['echo "hello world"']
)
@dsl.pipeline(
name='dnsConfig setting',
description='Passes dnsConfig setting to workflow.'
)
def dns_config_pipeline():
echo_task = echo_op()
if __name__ == '__main__':
pipeline_conf = kfp.dsl.PipelineConf()
pipeline_conf.set_dns_config(dns_config=V1PodDNSConfig(
nameservers=["1.2.3.4"],
options=[V1PodDNSConfigOption(name="ndots", value="2")]
))
kfp.compiler.Compiler().compile(
dns_config_pipeline,
__file__ + '.yaml',
pipeline_conf=pipeline_conf
)
| 27.764706 | 74 | 0.69774 |
aceaf4e0b85af625ffd6bac49dda8f9c3c53b9b4 | 1,712 | py | Python | config/wsgi.py | giussepi/wsp-math-exam | b1c83029291635dc8387dc6692fb186ae302f5ee | [
"MIT"
] | null | null | null | config/wsgi.py | giussepi/wsp-math-exam | b1c83029291635dc8387dc6692fb186ae302f5ee | [
"MIT"
] | null | null | null | config/wsgi.py | giussepi/wsp-math-exam | b1c83029291635dc8387dc6692fb186ae302f5ee | [
"MIT"
] | null | null | null | """
WSGI config for Math class exams project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# math_class_exams directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'math_class_exams'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 41.756098 | 79 | 0.798481 |
aceaf51b41edc8c25cb43fb9ea7310d25839a14d | 500 | py | Python | apps/users/migrations/0005_auto_20170908_1837.py | Palameng/test_mxOnline | 1be8f9caff5166dfdc49c1facd95af4445d30f9c | [
"Apache-2.0"
] | 1 | 2017-08-07T07:28:23.000Z | 2017-08-07T07:28:23.000Z | apps/users/migrations/0005_auto_20170908_1837.py | Palameng/test_mxOnline | 1be8f9caff5166dfdc49c1facd95af4445d30f9c | [
"Apache-2.0"
] | null | null | null | apps/users/migrations/0005_auto_20170908_1837.py | Palameng/test_mxOnline | 1be8f9caff5166dfdc49c1facd95af4445d30f9c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-09-08 18:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20170723_1733'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='image',
field=models.ImageField(max_length='100', upload_to='courses/%Y/%m', verbose_name='头像'),
),
]
| 23.809524 | 100 | 0.624 |
aceaf5327566dca04e665833b78e98e6ebfe56d6 | 2,470 | py | Python | netdisc/base/threaded.py | zohassadar/netdisc | 9ce4d5c2b0f30d36e71118ffbe6b7ffd93e0dfc8 | [
"MIT"
] | null | null | null | netdisc/base/threaded.py | zohassadar/netdisc | 9ce4d5c2b0f30d36e71118ffbe6b7ffd93e0dfc8 | [
"MIT"
] | null | null | null | netdisc/base/threaded.py | zohassadar/netdisc | 9ce4d5c2b0f30d36e71118ffbe6b7ffd93e0dfc8 | [
"MIT"
] | null | null | null | import logging
import queue
import threading
import time
import typing
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class WorkerThread(threading.Thread):
def __init__(
self,
worker: typing.Callable,
timeout: int,
index: int,
input: queue.Queue,
output: queue.Queue,
):
super().__init__(name=f"{worker.__name__}_Thread{index:02d}")
self.worker = worker
self.timeout = timeout
self.index = index
self.input = input
self.output = output
self.stop = False
logger.debug("Instantiated")
def run(self):
logger.debug("Running")
while True:
if self.stop:
logging.info("Thread %s: Finished", self.index)
break
try:
work = self.input.get(timeout=self.timeout)
except queue.Empty:
continue
result = self.worker(work)
self.output.put(result)
self.input.task_done()
class WorkerPoolThread(threading.Thread):
def __init__(
self,
worker: typing.Callable,
timeout: int,
max_workers: int,
):
super().__init__(name=type(self).__name__)
self.worker = worker
self.timeout = timeout
self.max_workers = max_workers
self.input = queue.Queue()
self.output = queue.Queue()
self.stop = False
self._workers: list[WorkerThread] = []
for index in range(self.max_workers):
worker = WorkerThread(
worker=self.worker,
timeout=self.timeout,
index=index,
input=self.input,
output=self.output,
)
self._workers.append(worker)
logger.debug("Instantiated")
def __enter__(self):
logger.debug("Starting Workers")
for worker in self._workers:
worker.start()
self.start()
return self
def __exit__(self, *exc):
for worker in self._workers:
worker.stop = True
self.stop = True
logger.debug("Stopping Workers")
def run(self):
logger.debug("Starting")
while True:
if self.stop:
break
time.sleep(self.timeout)
| 27.142857 | 70 | 0.533603 |
aceaf53dc6da6ed1126ffd14db852810cc71eb0d | 6,951 | py | Python | data/p3BR/R2/benchmark/startQiskit_noisy304.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit_noisy304.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit_noisy304.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=61
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.cx(input_qubit[0],input_qubit[2]) # number=54
prog.x(input_qubit[2]) # number=55
prog.cx(input_qubit[0],input_qubit[2]) # number=56
prog.cx(input_qubit[0],input_qubit[2]) # number=47
prog.h(input_qubit[2]) # number=58
prog.cz(input_qubit[0],input_qubit[2]) # number=59
prog.h(input_qubit[2]) # number=60
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.rx(-0.9173450548482197,input_qubit[1]) # number=57
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy304.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 31.595455 | 140 | 0.637175 |
aceaf604bbc5743ddbf961ff28fbd0a4e4c25815 | 16,031 | py | Python | markdown/__init__.py | tylerbutler/Python-Markdown | caef55d59153baf149353657740446d762cb4305 | [
"BSD-3-Clause"
] | null | null | null | markdown/__init__.py | tylerbutler/Python-Markdown | caef55d59153baf149353657740446d762cb4305 | [
"BSD-3-Clause"
] | null | null | null | markdown/__init__.py | tylerbutler/Python-Markdown | caef55d59153baf149353657740446d762cb4305 | [
"BSD-3-Clause"
] | null | null | null | """
Python Markdown
===============
Python Markdown converts Markdown to HTML and can be used as a library or
called from the command line.
## Basic usage as a module:
import markdown
html = markdown.markdown(your_text_string)
See <http://packages.python.org/Markdown/> for more
information and instructions on how to extend the functionality of
Python Markdown. Read that before you try modifying this file.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: markdown@freewisdom.org
Copyright 2007-2012 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE for details).
"""
from __version__ import version, version_info
import re
import codecs
import sys
import logging
import util
from preprocessors import build_preprocessors
from blockprocessors import build_block_parser
from treeprocessors import build_treeprocessors
from inlinepatterns import build_inlinepatterns
from postprocessors import build_postprocessors
from extensions import Extension
from serializers import to_html_string, to_xhtml_string
__all__ = ['Markdown', 'markdown', 'markdownFromFile']
logger = logging.getLogger('MARKDOWN')
class Markdown:
"""Convert Markdown to HTML."""
doc_tag = "div" # Element used to wrap document - later removed
option_defaults = {
'html_replacement_text' : '[HTML_REMOVED]',
'tab_length' : 4,
'enable_attributes' : True,
'smart_emphasis' : True,
'lazy_ol' : True,
}
output_formats = {
'html' : to_html_string,
'html4' : to_html_string,
'html5' : to_html_string,
'xhtml' : to_xhtml_string,
'xhtml1': to_xhtml_string,
'xhtml5': to_xhtml_string,
}
ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']',
'(', ')', '>', '#', '+', '-', '.', '!']
def __init__(self, *args, **kwargs):
"""
Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If they are of type string, the module mdx_name.py will be loaded.
If they are a subclass of markdown.Extension, they will be used
as-is.
* extension_configs: Configuration settingis for extensions.
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml5": Outputs XHTML style tags of HTML 5
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html5": Outputs HTML style tags of HTML 5
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* html_replacement_text: Text used when safe_mode is set to "replace".
* tab_length: Length of tabs in the source. Default: 4
* enable_attributes: Enable the conversion of attributes. Default: True
* smart_emphasis: Treat `_connected_words_` intelegently Default: True
* lazy_ol: Ignore number of first item of ordered lists. Default: True
"""
# For backward compatibility, loop through old positional args
pos = ['extensions', 'extension_configs', 'safe_mode', 'output_format']
c = 0
for arg in args:
if not kwargs.has_key(pos[c]):
kwargs[pos[c]] = arg
c += 1
if c == len(pos):
# ignore any additional args
break
# Loop through kwargs and assign defaults
for option, default in self.option_defaults.items():
setattr(self, option, kwargs.get(option, default))
self.safeMode = kwargs.get('safe_mode', False)
if self.safeMode and not kwargs.has_key('enable_attributes'):
# Disable attributes in safeMode when not explicitly set
self.enable_attributes = False
self.registeredExtensions = []
self.docType = ""
self.stripTopLevelTags = True
self.build_parser()
self.references = {}
self.htmlStash = util.HtmlStash()
self.set_output_format(kwargs.get('output_format', 'xhtml1'))
self.registerExtensions(extensions=kwargs.get('extensions', []),
configs=kwargs.get('extension_configs', {}))
self.reset()
def build_parser(self):
""" Build the parser from the various parts. """
self.preprocessors = build_preprocessors(self)
self.parser = build_block_parser(self)
self.inlinePatterns = build_inlinepatterns(self)
self.treeprocessors = build_treeprocessors(self)
self.postprocessors = build_postprocessors(self)
return self
def registerExtensions(self, extensions, configs):
"""
Register extensions with this instance of Markdown.
Keyword arguments:
* extensions: A list of extensions, which can either
be strings or objects. See the docstring on Markdown.
* configs: A dictionary mapping module names to config options.
"""
for ext in extensions:
if isinstance(ext, basestring):
ext = self.build_extension(ext, configs.get(ext, []))
if isinstance(ext, Extension):
ext.extendMarkdown(self, globals())
elif ext is not None:
raise TypeError(
'Extension "%s.%s" must be of type: "markdown.Extension"'
% (ext.__class__.__module__, ext.__class__.__name__))
return self
def build_extension(self, ext_name, configs = []):
"""Build extension by name, then return the module.
The extension name may contain arguments as part of the string in the
following format: "extname(key1=value1,key2=value2)"
"""
# Parse extensions config params (ignore the order)
configs = dict(configs)
pos = ext_name.find("(") # find the first "("
if pos > 0:
ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
# Setup the module name
module_name = ext_name
if '.' not in ext_name:
module_name = '.'.join(['markdown.extensions', ext_name])
# Try loading the extension first from one place, then another
try: # New style (markdown.extensons.<extension>)
module = __import__(module_name, {}, {}, [module_name.rpartition('.')[0]])
except ImportError:
module_name_old_style = '_'.join(['mdx', ext_name])
try: # Old style (mdx_<extension>)
module = __import__(module_name_old_style)
except ImportError, e:
message = "Failed loading extension '%s' from '%s' or '%s'" \
% (ext_name, module_name, module_name_old_style)
e.args = (message,) + e.args[1:]
raise
# If the module is loaded successfully, we expect it to define a
# function called makeExtension()
try:
return module.makeExtension(configs.items())
except AttributeError, e:
message = e.args[0]
message = "Failed to initiate extension " \
"'%s': %s" % (ext_name, message)
e.args = (message,) + e.args[1:]
raise
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
return self
def reset(self):
"""
Resets all state variables so that we can start with a new text.
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
if hasattr(extension, 'reset'):
extension.reset()
return self
def set_output_format(self, format):
""" Set the output format for the class instance. """
self.output_format = format.lower()
try:
self.serializer = self.output_formats[self.output_format]
except KeyError, e:
valid_formats = self.output_formats.keys()
valid_formats.sort()
message = 'Invalid Output Format: "%s". Use one of %s.' \
% (self.output_format,
'"' + '", "'.join(valid_formats) + '"')
e.args = (message,) + e.args[1:]
raise
return self
def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
Markdown processing takes place in five steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One
such treeprocessor runs InlinePatterns against the ElementTree,
detecting inline markup.
4. Some post-processors are run against the text after the ElementTree
has been serialized into text.
5. The output is written to a string.
"""
# Fixup the source text
if not source.strip():
return u"" # a blank unicode string
try:
source = unicode(source)
except UnicodeDecodeError, e:
# Customise error message while maintaining original trackback
e.reason += '. -- Note: Markdown only accepts unicode input!'
raise
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors.values():
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root)
if newRoot:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output = self.serializer(root)
if self.stripTopLevelTags:
try:
start = output.index('<%s>'%self.doc_tag)+len(self.doc_tag)+2
end = output.rindex('</%s>'%self.doc_tag)
output = output[start:end].strip()
except ValueError:
if output.strip().endswith('<%s />'%self.doc_tag):
# We have an empty document
output = ''
else:
# We have a serious problem
raise ValueError('Markdown failed to strip top-level tags. Document=%r' % output.strip())
# Run the text post-processors
for pp in self.postprocessors.values():
output = pp.run(output)
return output.strip()
def convertFile(self, input=None, output=None, encoding=None):
"""Converts a markdown file and returns the HTML as a unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file. The 'xmlcharrefreplace' error handler is
used when encoding the output.
**Note:** This is the only place that decoding and encoding of unicode
takes place in Python-Markdown. (All other code is unicode-in /
unicode-out.)
Keyword arguments:
* input: File object or path. Reads from stdin if `None`.
* output: File object or path. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.
"""
encoding = encoding or "utf-8"
# Read the source
if input:
if isinstance(input, str):
input_file = codecs.open(input, mode="r", encoding=encoding)
else:
input_file = codecs.getreader(encoding)(input)
text = input_file.read()
input_file.close()
else:
text = sys.stdin.read()
if not isinstance(text, unicode):
text = text.decode(encoding)
text = text.lstrip('\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if output:
if isinstance(output, str):
output_file = codecs.open(output, "w",
encoding=encoding,
errors="xmlcharrefreplace")
output_file.write(html)
output_file.close()
else:
writer = codecs.getwriter(encoding)
output_file = writer(output, errors="xmlcharrefreplace")
output_file.write(html)
# Don't close here. User may want to write more.
else:
# Encode manually and write bytes to stdout.
html = html.encode(encoding, "xmlcharrefreplace")
try:
# Write bytes directly to buffer (Python 3).
sys.stdout.buffer.write(html)
except AttributeError:
# Probably Python 2, which works with bytes by default.
sys.stdout.write(html)
return self
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: markdown() and
markdownFromFile().
"""
def markdown(text, *args, **kwargs):
"""Convert a markdown string to HTML and return HTML as a unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* Any arguments accepted by the Markdown class.
Returns: An HTML document as a string.
"""
md = Markdown(*args, **kwargs)
return md.convert(text)
def markdownFromFile(*args, **kwargs):
"""Read markdown code from a file and write it to a file or a stream.
This is a shortcut function which initializes an instance of Markdown,
and calls the convertFile method rather than convert.
Keyword arguments:
* input: a file name or readable object.
* output: a file name or writable object.
* encoding: Encoding of input and output.
* Any arguments accepted by the Markdown class.
"""
# For backward compatibility loop through positional args
pos = ['input', 'output', 'extensions', 'encoding']
c = 0
for arg in args:
if not kwargs.has_key(pos[c]):
kwargs[pos[c]] = arg
c += 1
if c == len(pos):
break
md = Markdown(**kwargs)
md.convertFile(kwargs.get('input', None),
kwargs.get('output', None),
kwargs.get('encoding', None))
| 36.269231 | 109 | 0.599089 |
aceaf73248199ac39dbb886cb9544265790adf7d | 1,354 | py | Python | nuitka/tools/quality/restlint/__main__.py | juanfra684/Nuitka | 0e276895fadabefb598232f2ccf8cc7736c9a85b | [
"Apache-2.0"
] | 1 | 2019-09-09T19:27:43.000Z | 2019-09-09T19:27:43.000Z | nuitka/tools/quality/restlint/__main__.py | juanfra684/Nuitka | 0e276895fadabefb598232f2ccf8cc7736c9a85b | [
"Apache-2.0"
] | 1 | 2020-07-11T17:53:56.000Z | 2020-07-11T17:53:56.000Z | nuitka/tools/quality/restlint/__main__.py | juanfra684/Nuitka | 0e276895fadabefb598232f2ccf8cc7736c9a85b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Main program for PyLint checker tool.
"""
from __future__ import print_function
import os
import sys
# Unchanged, running from checkout, use the parent directory, the nuitka
# package ought be there.
sys.path.insert(
0, os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
)
from nuitka.tools.Basics import goHome # isort:skip
from nuitka.tools.release.Documentation import checkReleaseDocumentation # isort:skip
def main():
goHome()
checkReleaseDocumentation()
if __name__ == "__main__":
main()
| 28.808511 | 86 | 0.71418 |
aceaf73ede9896f0ddcbfb0f5ae3b97f2ed32d98 | 502 | py | Python | env/lib/python3.8/site-packages/plotly/validators/layout/ternary/aaxis/tickformatstop/_value.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/layout/ternary/aaxis/tickformatstop/_value.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/layout/ternary/aaxis/tickformatstop/_value.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="value",
parent_name="layout.ternary.aaxis.tickformatstop",
**kwargs
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
| 27.888889 | 67 | 0.613546 |
aceaf76301b372008a80f5e521da6d8c426826b2 | 749 | py | Python | app/config.py | uoguelph-ri/coco-annotator | ce70c62e5f7856fff999ffd7cd6537b65f5fdd5b | [
"MIT"
] | 4 | 2018-11-19T20:35:04.000Z | 2021-07-07T01:55:27.000Z | app/config.py | uoguelph-ri/coco-annotator | ce70c62e5f7856fff999ffd7cd6537b65f5fdd5b | [
"MIT"
] | null | null | null | app/config.py | uoguelph-ri/coco-annotator | ce70c62e5f7856fff999ffd7cd6537b65f5fdd5b | [
"MIT"
] | null | null | null | import os
from .util.version_util import get_tag
class Config:
NAME = "COCO Annotator"
VERSION = get_tag()
# Flask instance
SWAGGER_UI_JSONEDITOR = True
MAX_CONTENT_LENGTH = 1 * 1024 * 1024 * 1024 # 1GB
MONGODB_HOST = os.getenv("MONGODB_HOST", "mongodb://database/flask")
SECRET_KEY = os.getenv('SECRET_KEY', '<--- YOUR_SECRET_FORM_KEY --->')
TESTING = os.getenv("TESTING", False)
# Dataset Options
DATASET_DIRECTORY = os.getenv("DATASET_DIRECTORY", "/datasets/")
INITIALIZE_FROM_FILE = os.getenv("INITIALIZE_FROM_FILE")
LOAD_IMAGES_ON_START = os.getenv("LOAD_IMAGES_ON_START", False)
# User Options
LOGIN_DISABLED = os.getenv('LOGIN_DISABLED', False)
ALLOW_REGISTRATION = True
| 25.827586 | 74 | 0.695594 |
aceaf79e5cbea8081fd2d29c5318768b4116104b | 4,231 | py | Python | bin/cmssw_handle_pileup.py | cms-sw/cmssw-wm-tools | aa1626fb2d2fdbde6b3259e4b44828220883a809 | [
"Apache-2.0"
] | 1 | 2021-05-04T07:09:58.000Z | 2021-05-04T07:09:58.000Z | bin/cmssw_handle_pileup.py | cms-sw/cmssw-wm-tools | aa1626fb2d2fdbde6b3259e4b44828220883a809 | [
"Apache-2.0"
] | 4 | 2020-10-12T11:57:12.000Z | 2021-04-09T20:45:14.000Z | bin/cmssw_handle_pileup.py | cms-sw/cmssw-wm-tools | aa1626fb2d2fdbde6b3259e4b44828220883a809 | [
"Apache-2.0"
] | 2 | 2020-10-12T11:52:08.000Z | 2020-12-16T18:54:47.000Z | #!/bin/sh
""":"
python_cmd="python"
python3 -c "from FWCore.PythonFramework.CmsRun import CmsRun" 2>/dev/null && python_cmd="python3"
exec ${python_cmd} $0 ${1+"$@"}
"""
import sys, os
sys.path.insert(0, os.path.join( os.path.dirname(os.path.abspath(__file__)), '..', 'python'))
import FWCore.ParameterSet.Config as cms
import pickle
try:
import argparse
except ImportError: #get it from this package instead
import archived_argparse as argparse
import re, random
from tweak_program_helpers import make_parser, do_loop, get_cmssw_version
from tweak_program_helpers import isCMSSWSupported, adjust_source_guid
def getPileupMixingModules(process):
"""
Method returns two lists:
1) list of mixing modules ("MixingModule")
2) list of data mixing modules ("DataMixingModules")
The first gets added only pileup files of type "mc", the
second pileup files of type "data".
"""
mixModules, dataMixModules = [], []
prodsAndFilters = {}
prodsAndFilters.update(process.producers)
prodsAndFilters.update(process.filters)
for key, value in prodsAndFilters.items():
if value.type_() in ["MixingModule", "DataMixingModule", "PreMixingModule"]:
mixModules.append(value)
if value.type_() == "DataMixingModule":
dataMixModules.append(value)
return mixModules, dataMixModules
def read_json(f):
import json
try:
with open(f) as json_file:
json_data = json.load(json_file)
except Exception as e:
print("Error opening file "+f)
sys.exit(1)
if not isinstance(json_data,dict):
print("Error loading dictionary "+f)
sys.exit(1)
return json_data
def process_pileup_mixing_modules(process, args, modules, requestedPileupType):
# this needs to be read from json
pileupDict = read_json(args.pileup_dict)
for m in modules:
print("Processing " + m.type_() + " " + m.label_() + " type " + requestedPileupType)
for pileupType in pileupDict.keys():
# there should be either "input" or "secsource" attributes
# and both "MixingModule", "DataMixingModule" can have both
inputTypeAttrib = getattr(m, "input", None) or getattr(m, "secsource", None)
if not inputTypeAttrib:
continue
inputTypeAttrib.fileNames = cms.untracked.vstring()
if pileupType == requestedPileupType:
eventsAvailable = pileupDict[pileupType]["eventsAvailable"]
print(" Found "+str(eventsAvailable)+" events")
for fileLFN in pileupDict[pileupType]["FileList"]:
# vstring does not support unicode
inputTypeAttrib.fileNames.append(str(fileLFN))
print(" Added %4d files"%len(pileupDict[pileupType]["FileList"]))
if requestedPileupType == 'data':
if args.skip_pileup_events:
# For deterministic pileup, we want to shuffle the list the
# same for every job in the task and skip events
random.seed(int(args.random_seed))
inputTypeAttrib.skipEvents = cms.untracked.uint32(
int(args.skip_pileup_events) % eventsAvailable)
inputTypeAttrib.sequential = cms.untracked.bool(True)
# Shuffle according to the seed above or randomly
random.shuffle(inputTypeAttrib.fileNames)
# Handle enforceGUIDInFileName for pileup
#DL: need to figure this one out..
adjust_source_guid(inputTypeAttrib)
return process
def handle_pileup(process, args):
mixModules, dataMixModules = getPileupMixingModules(process)
process = process_pileup_mixing_modules(process, args, dataMixModules, "data")
process = process_pileup_mixing_modules(process, args, mixModules, "mc")
return process
def init_argparse():
parser = make_parser("Handle random number seeding")
parser.add_argument('--skip_pileup_events', required=False)
parser.add_argument('--random_seed', required=False)
parser.add_argument('--pileup_dict', required=True)
return parser
def main():
parser = init_argparse()
args = parser.parse_args()
do_loop(args, handle_pileup)
main()
| 37.114035 | 97 | 0.673127 |
aceaf84ade5ac9a565b86dba9d200611071325fb | 1,272 | py | Python | flow_plugin.py | stillinsecure/acl_audit | 07cdeba10ffb4cf6595b602c973de9e0f278bb13 | [
"MIT"
] | null | null | null | flow_plugin.py | stillinsecure/acl_audit | 07cdeba10ffb4cf6595b602c973de9e0f278bb13 | [
"MIT"
] | null | null | null | flow_plugin.py | stillinsecure/acl_audit | 07cdeba10ffb4cf6595b602c973de9e0f278bb13 | [
"MIT"
] | null | null | null | import re
from netlib.odict import ODict
from plugin import Plugin
class FlowResult(object):
def __init__(self,
flow_id,
report_col_name, # This is a generated name that is used by the report to group results
path,
status_code,
response,
content_type):
self.flow_id = flow_id
self.path = path
self.status_code = status_code
self.response = response
self.content_type = content_type
self.report_col_name = report_col_name
class FlowPlugin(Plugin):
def __init__(self):
super(FlowPlugin, self).__init__()
self.current_flow_id = 0
def request(self, flow):
pass
def response(self, flow):
return True
def format_result(self, flow_result, group):
response = re.sub('[\r,\n,\t]', '', flow_result.response)
return 'Status Code:{0:4} {1}'.format(flow_result.status_code, response)
def remove_session(self, flow):
flow.request.cookies = ODict()
flow.request.headers.pop('cookie')
def format_request(self, request):
return request.path
def start_session(self):
pass
def end_session(self):
pass
| 24.941176 | 107 | 0.603774 |
aceaf9fd2fa831fdead025c76d5fdf1b0cbf060a | 1,933 | py | Python | resqs/packages/boto/ec2/ec2object.py | UMass-Rescue/moto | 3aa52aca28c622be9708da5fd31a8c8b92801634 | [
"Apache-2.0"
] | null | null | null | resqs/packages/boto/ec2/ec2object.py | UMass-Rescue/moto | 3aa52aca28c622be9708da5fd31a8c8b92801634 | [
"Apache-2.0"
] | null | null | null | resqs/packages/boto/ec2/ec2object.py | UMass-Rescue/moto | 3aa52aca28c622be9708da5fd31a8c8b92801634 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Object
"""
from resqs.packages.boto.ec2.tag import TagSet
class EC2Object(object):
def __init__(self, connection=None):
self.connection = connection
self.region = None
class TaggedEC2Object(EC2Object):
"""
Any EC2 resource that can be tagged should be represented
by a Python object that subclasses this class. This class
has the mechanism in place to handle the tagSet element in
the Describe* responses. If tags are found, it will create
a TagSet object and allow it to parse and collect the tags
into a dict that is stored in the "tags" attribute of the
object.
"""
def __init__(self, connection=None):
super(TaggedEC2Object, self).__init__(connection)
self.tags = TagSet()
| 39.44898 | 74 | 0.740817 |
aceafae6150c02010849d9e6e9cb7de3fd751523 | 733 | py | Python | Dataset/Leetcode/valid/78/302.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/valid/78/302.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/valid/78/302.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, nums: List[int]) -> List[List[int]]:
final = list()
# ----------------------------------------------------
if len(nums)==1:
return [[],nums]
if len(nums)==0:
return []
# ------------------------------------------------------
def pop(cut):
if not cut:
return
else:
for i in range(len(cut)):
tmp = copy.deepcopy(cut)
tmp.pop(i)
if tmp not in final:
final.append(tmp)
pop(tmp)
pop(nums)
if nums:
final.append(nums)
return final
| 29.32 | 64 | 0.317872 |
aceafc33949257934136566e554e631b6f110548 | 1,272 | py | Python | twisted-benchmarks/names.py | nikan1996/gevent_reactor | 99e9ed7a5d7ebb8a8cbd1307009749bb08907c10 | [
"Apache-2.0"
] | 2 | 2019-03-01T10:19:46.000Z | 2019-08-05T05:34:54.000Z | twisted-benchmarks/names.py | nikan1996/gevent_reactor | 99e9ed7a5d7ebb8a8cbd1307009749bb08907c10 | [
"Apache-2.0"
] | 1 | 2019-08-05T07:47:01.000Z | 2019-08-07T05:10:52.000Z | twisted-benchmarks/names.py | nikan1996/gevent_reactor | 99e9ed7a5d7ebb8a8cbd1307009749bb08907c10 | [
"Apache-2.0"
] | 1 | 2019-11-26T23:47:35.000Z | 2019-11-26T23:47:35.000Z |
from twisted.names.dns import DNSDatagramProtocol
from twisted.names.server import DNSServerFactory
from twisted.names import hosts, client
from benchlib import Client, driver
class Client(Client):
def __init__(self, reactor, portNumber, timeout):
self._resolver = client.Resolver(servers=[('127.0.0.1', portNumber)])
self._timeout = timeout
super(Client, self).__init__(reactor)
def _request(self):
d = self._resolver.lookupAddress(
'localhost', timeout=(self._timeout,))
d.addCallback(self._continue)
d.addErrback(self._stop)
def main(reactor, duration):
concurrency = 10
controller = DNSServerFactory([hosts.Resolver()])
port = reactor.listenUDP(0, DNSDatagramProtocol(controller))
# Have queries time out no sooner than the duration of this benchmark so
# we don't have to deal with retries or timeout errors.
client = Client(reactor, port.getHost().port, duration)
d = client.run(concurrency, duration)
def cleanup(passthrough):
d = port.stopListening()
d.addCallback(lambda ign: passthrough)
return d
d.addBoth(cleanup)
return d
if __name__ == '__main__':
import sys
import names
driver(names.main, sys.argv)
| 27.652174 | 77 | 0.688679 |
aceafc6657c114f8fd10f4c6a60d114db4ebdb47 | 888 | py | Python | pythonlibs/mantis/fanbei/smarthome/errors.py | adoggie/Tibet.6 | 3c53060edafd80b9c4dafa10699a68d86a410c66 | [
"MIT"
] | 22 | 2019-10-28T07:28:12.000Z | 2022-03-19T15:36:41.000Z | pythonlibs/mantis/fanbei/smarthome/errors.py | adoggie/Tibet.6 | 3c53060edafd80b9c4dafa10699a68d86a410c66 | [
"MIT"
] | 1 | 2019-11-07T04:54:14.000Z | 2019-11-07T07:12:48.000Z | pythonlibs/mantis/fanbei/smarthome/errors.py | adoggie/Tibet.6 | 3c53060edafd80b9c4dafa10699a68d86a410c66 | [
"MIT"
] | 13 | 2019-10-28T07:29:07.000Z | 2021-11-03T06:53:12.000Z | #coding: utf-8
from mantis.fundamental.errors import ErrorEntry
class ErrorDefs(object):
NoError = ErrorEntry(0,u'no error')
UnknownError = ErrorEntry(1,u'未定义的错误')
SystemFault = ErrorEntry(1001, u'系统故障')
TokenInvalid = ErrorEntry(1002,u'令牌错误')
AccessDenied = ErrorEntry(1003,u'访问受限')
PermissionDenied = ErrorEntry(1004,u'权限受限')
ParameterInvalid = ErrorEntry(105,u'参数无效')
PasswordError = ErrorEntry(1006,u'密码错误')
UserNotExist = ErrorEntry(1007,u'用户不存在')
ObjectHasExist = ErrorEntry(1008,u'对象已存在')
ObjectNotExist = ErrorEntry(1009,u'对象不存在')
ResExpired = ErrorEntry(1010,u'资源过期')
ReachLimit = ErrorEntry(1011,u'达到上限')
DeviceServerNotFound = ErrorEntry(2001,u'接入服务器未配置')
DeviceNotOnline = ErrorEntry(2002,u'设备未在线')
| 29.6 | 62 | 0.628378 |
aceafdb177e4784e41692ff76f5d76b8037457b1 | 9,258 | py | Python | hw4/spectral_clustering/constructW.py | Cjkkkk/data_mining_homework | 649dc3144444611d7c8ccaf6109050f53b5c611c | [
"MIT"
] | 2 | 2019-08-31T09:15:21.000Z | 2020-02-03T09:01:04.000Z | hw4/hw4_code/spectral_clustering/constructW.py | Junlin-Yin/Data-Mining-Homework | 043f836e3dd30f32b5b06f40af61ae55b9287fbc | [
"MIT"
] | null | null | null | hw4/hw4_code/spectral_clustering/constructW.py | Junlin-Yin/Data-Mining-Homework | 043f836e3dd30f32b5b06f40af61ae55b9287fbc | [
"MIT"
] | null | null | null | import numpy as np
import scipy.sparse
import scipy.stats
def Eudist2(x, y):
def square(m):
if isinstance(m, np.ndarray):
return m * m
else:
return m.multiply(m)
distance = -2 * (x @ y.T)
if not isinstance(distance, np.ndarray):
distance = distance.toarray()
distance += np.sum(square(x), axis=1).reshape((x.shape[0], 1))
distance += np.sum(square(y), axis=1).reshape((1, y.shape[0]))
return distance
def NormalizeFea(fea):
fea_norm = np.sum(np.multiply(fea, fea), axis=1)
fea_norm = np.max(np.finfo(fea_norm.dtype), fea_norm) ** 0.5
return fea * fea_norm.reshape((fea.shape[0], 1))
def constructW(fea, NeighborMode='KNN', WeightMode='HeatKernel', **kwargs):
"""
Usage:
W = constructW(fea, options)
fea: Rows of vectors of data points. Each row is x_i
options: Struct value in Matlab. The fields in options that can be set:
NeighborMode - Indicates how to construct the graph. Choices
are: [Default 'KNN']
'KNN' - k = 0
Complete graph
k > 0
Put an edge between two nodes if and
only if they are among the k nearst
neighbors of each other. You are
required to provide the parameter k in
the options. Default k=5.
'Supervised' - k = 0
Put an edge between two nodes if and
only if they belong to same class.
k > 0
Put an edge between two nodes if
they belong to same class and they
are among the k nearst neighbors of
each other.
Default: k=0
You are required to provide the label
information gnd in the options.
WeightMode - Indicates how to assign weights for each edge
in the graph. Choices are:
'Binary' - 0-1 weighting. Every edge receiveds weight
of 1.
'HeatKernel' - If nodes i and j are connected, put weight
W_ij = exp(-norm(x_i - x_j)/2t^2). You are
required to provide the parameter t. [Default One]
'Cosine' - If nodes i and j are connected, put weight
cosine(x_i,x_j).
k - The parameter needed under 'KNN' NeighborMode.
Default will be 5.
gnd - The parameter needed under 'Supervised'
NeighborMode. Colunm vector of the label
information for each data point.
bLDA - 0 or 1. Only effective under 'Supervised'
NeighborMode. If 1, the graph will be constructed
to make LPP exactly same as LDA. Default will be
0.
t - The parameter needed under 'HeatKernel'
WeightMode. Default will be 1
bNormalized - 0 or 1. Only effective under 'Cosine' WeightMode.
Indicates whether the fea are already be
normalized to 1. Default will be 0
bSelfConnected - 0 or 1. Indicates whether W(i,i) == 1. Default 0
if 'Supervised' NeighborMode & bLDA == 1,
bSelfConnected will always be 1. Default 0.
bTrueKNN - 0 or 1. If 1, will construct a truly kNN graph
(Not symmetric!). Default will be 0. Only valid
for 'KNN' NeighborMode
Examples:
fea = rand(50,15);
options = [];
options.NeighborMode = 'KNN';
options.k = 5;
options.WeightMode = 'HeatKernel';
options.t = 1;
W = constructW(fea,options);
fea = rand(50,15);
gnd = [ones(10,1);ones(15,1)*2;ones(10,1)*3;ones(15,1)*4];
options = [];
options.NeighborMode = 'Supervised';
options.gnd = gnd;
options.WeightMode = 'HeatKernel';
options.t = 1;
W = constructW(fea,options);
fea = rand(50,15);
gnd = [ones(10,1);ones(15,1)*2;ones(10,1)*3;ones(15,1)*4];
options = [];
options.NeighborMode = 'Supervised';
options.gnd = gnd;
options.bLDA = 1;
W = constructW(fea,options);
For more details about the different ways to construct the W, please
refer:
Deng Cai, Xiaofei He and Jiawei Han, "Document Clustering Using
Locality Preserving Indexing" IEEE TKDE, Dec. 2005.
Written by Deng Cai (dengcai2 AT cs.uiuc.edu), April/2004, Feb/2006,
May/2007
"""
# deal with options
def set_default(key, value):
if key not in kwargs:
kwargs[key] = value
if NeighborMode.lower() == 'KNN'.lower():
set_default('k', 5)
elif NeighborMode.lower() == 'Supervised'.lower():
set_default('bLDA', 0)
if kwargs['bLDA']:
set_default('bSelfConnected', 1)
set_default('k', 0)
if 'gnd' not in kwargs:
raise Exception('Label(gnd) should be provided under \'Supervised\' NeighborMode!')
if len(kwargs['gnd']) != len(fea):
raise Exception('gnd doesn\'t match with fea!')
else:
raise Exception('NeighborMode does not exist!')
bBinary = 0
bCosine = 0
if WeightMode.lower() == 'Binary'.lower():
bBinary = 1
elif WeightMode.lower() == 'HeatKernel'.lower():
if 't' not in kwargs:
fea_sample = fea
if fea.shape[0] > 3000:
fea_sample = fea[np.random.permutation(np.arange(1, fea.shape[0]))[:3000]]
d = Eudist2(fea_sample, fea_sample)
kwargs['t'] = np.mean(d)
elif WeightMode.lower() == 'Cosine'.lower():
kwargs['bNormalized'] = kwargs.get('bNormalized', 0)
bCosine = 1
else:
raise Exception('WeightMode does not exist!')
set_default('bSelfConnected', 0)
if 'gnd' in kwargs:
nSmp = len(kwargs['gnd'])
else:
nSmp = fea.shape[0]
maxM = 62500000 # 500M
BlockSize = max(1, maxM // (nSmp * 3))
if NeighborMode.lower() == 'Supervised'.lower():
raise NotImplementedError("Supervised is not implimented and it is not needed in this Homework")
Normfea = fea
if bCosine and not kwargs['bNormalized']:
Normfea = NormalizeFea(fea)
# Always Use Euclidean distance to get K-nearest-neighbor
# But if feature is normalized, we **could** (but actually TA do not) use inner-produce to sort
if NeighborMode.lower() == 'KNN'.lower():
k = kwargs['k']
G = np.zeros((3, nSmp * (k + 1)))
for block in range(0, nSmp, BlockSize):
block_end = min(nSmp, block + BlockSize)
dist = Eudist2(fea[block: block_end, :], fea)
# build KNN
arg_p = np.argpartition(dist, k + 1)[:, :k + 1] # get index of minium K + 1
dist = dist[np.arange(dist.shape[0])[:, None], arg_p] # distance of min K + 1
arg_s = np.argsort(dist) # sort it
idx = arg_p[np.arange(dist.shape[0])[:, None], arg_s] # get index of min
dump = dist[np.arange(dist.shape[0])[:, None], arg_s] # get value of min
if not bBinary:
if bCosine:
dist = Normfea[block:block_end, :] @ Normfea.T # inner product of normalized feature is cosin
dump = dist[np.arange(dist.shape[0])[:, None], idx]
else:
dump = np.exp(-dump / (2 * kwargs['t'] * kwargs['t']))
G[0, block * (k + 1):block_end * (k + 1)] = \
np.repeat(np.arange(block, block_end), k + 1)
G[1, block * (k + 1):block_end * (k + 1)] = idx.flatten()
if not bBinary:
G[2, block * (k + 1):block_end * (k + 1)] = dump.flatten()
else:
G[2, block * (k + 1):block_end * (k + 1)] = 1
W = scipy.sparse.coo_matrix((G[2], (G[0], G[1])), shape=(nSmp, nSmp))
if not kwargs['bSelfConnected']:
W.setdiag(0)
if not kwargs.get('bTrueKNN', False):
W = W.maximum(W.T)
return W
| 42.273973 | 114 | 0.482826 |
aceaff05b3bcf5d93b3ca158c501d55fc999243e | 1,131 | py | Python | commercialoperator/migrations/0125_auto_20210902_1231.py | GraemeMuller/commercialoperator | 9218fb0a8844bc7f41cc371f4bd9488538df5fda | [
"Apache-2.0"
] | null | null | null | commercialoperator/migrations/0125_auto_20210902_1231.py | GraemeMuller/commercialoperator | 9218fb0a8844bc7f41cc371f4bd9488538df5fda | [
"Apache-2.0"
] | 12 | 2020-02-12T06:26:55.000Z | 2022-02-13T05:52:54.000Z | commercialoperator/migrations/0125_auto_20210902_1231.py | GraemeMuller/commercialoperator | 9218fb0a8844bc7f41cc371f4bd9488538df5fda | [
"Apache-2.0"
] | 8 | 2020-02-24T05:11:18.000Z | 2021-02-26T07:54:24.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-09-02 04:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('commercialoperator', '0124_bookinginvoice_property_cache'),
]
operations = [
migrations.AlterField(
model_name='globalsettings',
name='key',
field=models.CharField(choices=[('credit_facility_link', 'Credit Facility Link'), ('deed_poll', 'Deed poll'), ('deed_poll_filming', 'Deed poll Filming'), ('deed_poll_event', 'Deed poll Event'), ('online_training_document', 'Online Training Document'), ('park_finder_link', 'Park Finder Link'), ('fees_and_charges', 'Fees and charges link'), ('event_fees_and_charges', 'Event Fees and charges link'), ('commercial_filming_handbook', 'Commercial Filming Handbook link'), ('park_stay_link', 'Park Stay Link'), ('event_traffic_code_of_practice', 'Event traffic code of practice'), ('trail_section_map', 'Trail section map'), ('dwer_application_form', 'DWER Application Form')], max_length=255),
),
]
| 53.857143 | 702 | 0.702034 |
aceaff60734f2ad20078168e888d63fe9db7ecfb | 930 | py | Python | pontoon/batch/tests/test_utils.py | foss4/pontoon | 0503cc78f00e1e9d23c1ca885fe74a627563fc82 | [
"BSD-3-Clause"
] | 1,145 | 2015-05-15T01:08:16.000Z | 2022-03-31T14:23:45.000Z | pontoon/batch/tests/test_utils.py | foss4/pontoon | 0503cc78f00e1e9d23c1ca885fe74a627563fc82 | [
"BSD-3-Clause"
] | 1,365 | 2015-05-04T21:54:18.000Z | 2022-03-30T16:53:49.000Z | pontoon/batch/tests/test_utils.py | foss4/pontoon | 0503cc78f00e1e9d23c1ca885fe74a627563fc82 | [
"BSD-3-Clause"
] | 667 | 2015-05-04T21:33:45.000Z | 2022-03-30T10:25:33.000Z | from fluent.syntax import FluentParser, FluentSerializer
from pontoon.batch.utils import ftl_find_and_replace
parser = FluentParser()
serializer = FluentSerializer()
def normalize(string):
# Serialize strings using default Fluent serializer rules
# to avoid differences caused by different formatting style.
return serializer.serialize_entry(parser.parse_entry(string))
def test_ftl_find_and_replace():
simple_string = normalize("key = find")
simple_replaced = normalize("key = replace")
assert ftl_find_and_replace(simple_string, "find", "replace") == simple_replaced
# Perform find and replace on text values only
complex_string = normalize(
"""find =
.placeholder = find
"""
)
complex_replaced = normalize(
"""find =
.placeholder = replace
"""
)
assert ftl_find_and_replace(complex_string, "find", "replace") == complex_replaced
| 26.571429 | 86 | 0.710753 |
aceb008023eb6c123bb2fbe50777b892bbbe764c | 2,658 | py | Python | tests/local/warehouse/scans/test_scan_result.py | fbertsch/soda-sql | 112fd2efad64795b03dc55143fbf6a8385c93c8c | [
"Apache-2.0"
] | null | null | null | tests/local/warehouse/scans/test_scan_result.py | fbertsch/soda-sql | 112fd2efad64795b03dc55143fbf6a8385c93c8c | [
"Apache-2.0"
] | null | null | null | tests/local/warehouse/scans/test_scan_result.py | fbertsch/soda-sql | 112fd2efad64795b03dc55143fbf6a8385c93c8c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Soda
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sodasql.exceptions.exceptions import ERROR_CODE_TEST_FAILED
from sodasql.scan.scan_yml_parser import KEY_METRICS, KEY_TESTS
from tests.common.sql_test_case import SqlTestCase
class TestScanResult(SqlTestCase):
def setUp(self) -> None:
super().setUp()
self.sql_recreate_table(
[f"name {self.dialect.data_type_varchar_255}"],
["('one')",
"('two')",
"('three') ",
"('no value')",
"(null)"])
def test_scan_result_with_test_error(self):
test_name = 'my_test'
scan_yml_dict = {
KEY_METRICS: [
'row_count'
],
KEY_TESTS: {
test_name: '10 < error < 20',
}
}
scan_result = self.scan(scan_yml_dict)
self.assertTrue(scan_result.has_failures())
self.assertIsNotNone(scan_result.error)
self.assertIsNotNone(scan_result.error["code"])
self.assertEqual(scan_result.error["code"], ERROR_CODE_TEST_FAILED)
self.assertIsNotNone(scan_result.error["message"])
self.assertEqual(scan_result.error["message"], "Soda-sql test failed with error: name 'error' is not defined")
def test_scan_result_with_test_errors(self):
test_name = 'my_test'
second_test_name = 'my_second_test'
scan_yml_dict = {
KEY_METRICS: [
'row_count'
],
KEY_TESTS: {
test_name: '10 < error < 20',
second_test_name: '10 < error < 20',
}
}
scan_result = self.scan(scan_yml_dict)
self.assertTrue(scan_result.has_failures())
self.assertIsNotNone(scan_result.error)
self.assertIsNotNone(scan_result.error["code"])
self.assertEqual(scan_result.error["code"], ERROR_CODE_TEST_FAILED)
self.assertIsNotNone(scan_result.error["message"])
self.assertEqual(scan_result.error["message"], "2 soda-sql tests failed with errors: name 'error' is not defined, name 'error' is not defined")
| 39.671642 | 151 | 0.637321 |
aceb00b31db936ff180484c5df26afb06d996625 | 3,833 | py | Python | rplugin/python3/nvim_pfix.py | mreinhardt/nvim-pfix | b35a4abe1a429ac05654c4d2df5f562345fb2a38 | [
"MIT"
] | 1 | 2018-03-01T14:02:20.000Z | 2018-03-01T14:02:20.000Z | rplugin/python3/nvim_pfix.py | mreinhardt/nvim-pfix | b35a4abe1a429ac05654c4d2df5f562345fb2a38 | [
"MIT"
] | null | null | null | rplugin/python3/nvim_pfix.py | mreinhardt/nvim-pfix | b35a4abe1a429ac05654c4d2df5f562345fb2a38 | [
"MIT"
] | null | null | null | """nvim-pfix neovim plugin."""
from collections import defaultdict
import subprocess
import neovim
from neovim.api.nvim import NvimError
@neovim.plugin
class Main(object):
"""Plugin Main"""
PFIX_BUFFER = '_-_PFIX_-_'
def __init__(self, vim):
self.vim = vim
self.commands = None
@neovim.command('Pfind', range='', nargs='*', sync=True)
def project_find(self, args, range):
"""Find all instances of search query in project files and load into
buffer self.PFIX_BUFFER."""
self.commands = defaultdict(bool)
if not args:
try:
args.append(self._ask('Enter search query: '))
except NvimError:
return
cmd = self.vim.vars.get('nvim_pfix_pfind', 'grep')
cmd_opts = self.vim.vars.get(
'nvim_pfix_pfind_opts', '--recursive --line-number').split()
cmd_opts.insert(0, cmd)
cmd_opts.append(' '.join(args))
cmd_opts.append('.')
s = subprocess.run(cmd_opts, stdout=subprocess.PIPE)
out = s.stdout.decode('utf-8').split('\n')
buffer_height = int(self.vim.vars.get('nvim_pfix_buffer_height', 10))
self.vim.command('split {0}'.format(self.PFIX_BUFFER))
self.vim.command('buffer {0}'.format(self.PFIX_BUFFER))
self.vim.command('resize {0}'.format(buffer_height))
self.vim.command('set buftype=nofile')
self.vim.current.buffer[:] = out
@neovim.command('Pfix', range='', nargs='*', sync=True)
def project_replace(self, args, range):
"""Replace lines in project files with edits from self.PFIX_BUFFER."""
if self.vim.funcs.bufloaded(self.PFIX_BUFFER):
self.vim.command('buffer {0}'.format(self.PFIX_BUFFER))
else:
self.vim.command('echohl WarningMsg')
self.vim.command('echo ":Pfind must be run before :Pfix"')
self.vim.command('echohl None')
return
for line in self.vim.current.buffer:
choice = ''
try:
filename, lineno, contents = line.split(':', 2)
except ValueError:
continue
try:
self.vim.command('buffer +{0} {1}'.format(lineno, filename))
except neovim.api.nvim.NvimError:
self.vim.command('edit +{0} {1}'.format(lineno, filename))
self.vim.command('redraw')
if contents == self.vim.current.line:
continue
if self.commands['doall']:
self.vim.current.line = contents
continue
choice = self._ask(
"Replace with `{0}`? [y/n/a/Q] ".format(contents))
choice = choice.lower()
if choice in ('y', 'a'):
self.vim.current.line = contents
self.commands['doall'] = choice == 'a'
self.commands['changes'] = True
elif choice == 'n':
continue
else: # q or any unknown input
break
try:
choice = ''
self.vim.command('redraw')
if self.commands['changes']:
choice = self._ask("Write all buffers? [y/N] ")
if choice.lower() == 'y':
self.vim.command('wall')
self.vim.command('buffer {0}'.format(self.PFIX_BUFFER))
self.vim.command('bdelete {0}'.format(self.PFIX_BUFFER))
except NvimError:
pass
def _ask(self, question):
"""Prompt user for input with `question`."""
self.vim.command('echohl Question')
try:
choice = self.vim.funcs.input(question)
except NvimError:
choice = ''
finally:
self.vim.command('echohl None')
return choice
| 34.845455 | 78 | 0.549439 |
aceb01853a908a5402072a4afd81d9eca0b0423a | 2,144 | py | Python | pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/schemas/functions/tests/test_procedure_put.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/schemas/functions/tests/test_procedure_put.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/schemas/functions/tests/test_procedure_put.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression.python_test_utils import test_utils as utils
from . import utils as funcs_utils
class ProcedurePutTestCase(BaseTestGenerator):
""" This class will update new procedure under schema node. """
skip_on_database = ['gpdb']
scenarios = [
# Fetching default URL for procedure node.
('Fetch Procedure Node URL',
dict(url='/browser/procedure/obj/'))
]
def runTest(self):
""" This function will update procedure under database node. """
super(ProcedurePutTestCase, self).setUp()
self = funcs_utils.set_up(self)
if self.server_type == "pg" and\
self.server_version < 110000:
message = "Procedures are not supported by PG < 110000."
self.skipTest(message)
func_name = "test_procedure_put_%s" % str(uuid.uuid4())[1:8]
proc_info = funcs_utils.create_procedure(
self.server, self.db_name, self.schema_name, func_name,
self.server_type, self.server_version)
proc_id = proc_info[0]
data = {
"description": "This is procedure update comment",
"id": proc_id
}
put_response = self.tester.put(
self.url + str(utils.SERVER_GROUP) +
'/' + str(self.server_id) + '/' + str(self.db_id) + '/' +
str(self.schema_id) + '/' +
str(proc_id),
data=json.dumps(data),
follow_redirects=True)
self.assertEqual(put_response.status_code, 200)
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
def tearDown(self):
pass
| 34.031746 | 76 | 0.59375 |
aceb0271542a3a631c76af66f0de503d87812005 | 27,219 | py | Python | ai_economist/foundation/components/covid19_components.py | alan-man/ai-economist | fbd30e19aed7901e2b342cc7f4c2c7c94be58c90 | [
"BSD-3-Clause"
] | 795 | 2020-08-06T04:58:00.000Z | 2022-03-30T05:14:59.000Z | ai_economist/foundation/components/covid19_components.py | alan-man/ai-economist | fbd30e19aed7901e2b342cc7f4c2c7c94be58c90 | [
"BSD-3-Clause"
] | 30 | 2020-08-06T20:24:05.000Z | 2022-03-04T00:59:30.000Z | ai_economist/foundation/components/covid19_components.py | alan-man/ai-economist | fbd30e19aed7901e2b342cc7f4c2c7c94be58c90 | [
"BSD-3-Clause"
] | 150 | 2020-08-06T20:34:25.000Z | 2022-03-30T13:28:20.000Z | # Copyright (c) 2021, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
from datetime import datetime
import GPUtil
import numpy as np
from ai_economist.foundation.base.base_component import (
BaseComponent,
component_registry,
)
try:
num_gpus_available = len(GPUtil.getAvailable())
print(f"Inside covid19_components.py: {num_gpus_available} GPUs are available.")
if num_gpus_available == 0:
print("No GPUs found! Running the simulation on a CPU.")
else:
from warp_drive.utils.constants import Constants
from warp_drive.utils.data_feed import DataFeed
_OBSERVATIONS = Constants.OBSERVATIONS
_ACTIONS = Constants.ACTIONS
except ModuleNotFoundError:
print(
"Warning: The 'WarpDrive' package is not found and cannot be used! "
"If you wish to use WarpDrive, please run "
"'pip install rl-warp-drive' first."
)
except ValueError:
print("No GPUs found! Running the simulation on a CPU.")
@component_registry.add
class ControlUSStateOpenCloseStatus(BaseComponent):
"""
Sets the open/close stringency levels for states.
Args:
n_stringency_levels (int): number of stringency levels the states can chose
from. (Must match the number in the model constants dictionary referenced by
the parent scenario.)
action_cooldown_period (int): action cooldown period in days.
Once a stringency level is set, the state(s) cannot switch to another level
for a certain number of days (referred to as the "action_cooldown_period")
"""
name = "ControlUSStateOpenCloseStatus"
required_entities = []
agent_subclasses = ["BasicMobileAgent"]
def __init__(
self,
*base_component_args,
n_stringency_levels=10,
action_cooldown_period=28,
**base_component_kwargs,
):
self.action_cooldown_period = action_cooldown_period
super().__init__(*base_component_args, **base_component_kwargs)
self.np_int_dtype = np.int32
self.n_stringency_levels = int(n_stringency_levels)
assert self.n_stringency_levels >= 2
self._checked_n_stringency_levels = False
self.masks = dict()
self.default_agent_action_mask = [1 for _ in range(self.n_stringency_levels)]
self.no_op_agent_action_mask = [0 for _ in range(self.n_stringency_levels)]
self.masks["a"] = np.repeat(
np.array(self.no_op_agent_action_mask)[:, np.newaxis],
self.n_agents,
axis=-1,
)
# (This will be overwritten during reset; see below)
self.action_in_cooldown_until = None
def get_additional_state_fields(self, agent_cls_name):
return {}
def additional_reset_steps(self):
# Store the times when the next set of actions can be taken.
self.action_in_cooldown_until = np.array(
[self.world.timestep for _ in range(self.n_agents)]
)
def get_n_actions(self, agent_cls_name):
if agent_cls_name == "BasicMobileAgent":
return self.n_stringency_levels
return None
def generate_masks(self, completions=0):
for agent in self.world.agents:
if self.world.use_real_world_policies:
self.masks["a"][:, agent.idx] = self.default_agent_action_mask
else:
if self.world.timestep < self.action_in_cooldown_until[agent.idx]:
# Keep masking the actions
self.masks["a"][:, agent.idx] = self.no_op_agent_action_mask
else: # self.world.timestep == self.action_in_cooldown_until[agent.idx]
# Cooldown period has ended; unmask the "subsequent" action
self.masks["a"][:, agent.idx] = self.default_agent_action_mask
return self.masks
def get_data_dictionary(self):
"""
Create a dictionary of data to push to the GPU (device).
"""
data_dict = DataFeed()
data_dict.add_data(
name="action_cooldown_period",
data=self.action_cooldown_period,
)
data_dict.add_data(
name="action_in_cooldown_until",
data=self.action_in_cooldown_until,
save_copy_and_apply_at_reset=True,
)
data_dict.add_data(
name="num_stringency_levels",
data=self.n_stringency_levels,
)
data_dict.add_data(
name="default_agent_action_mask",
data=[1] + self.default_agent_action_mask,
)
data_dict.add_data(
name="no_op_agent_action_mask",
data=[1] + self.no_op_agent_action_mask,
)
return data_dict
def get_tensor_dictionary(self):
"""
Create a dictionary of (Pytorch-accessible) data to push to the GPU (device).
"""
tensor_dict = DataFeed()
return tensor_dict
def component_step(self):
if self.world.use_cuda:
self.world.cuda_component_step[self.name](
self.world.cuda_data_manager.device_data("stringency_level"),
self.world.cuda_data_manager.device_data("action_cooldown_period"),
self.world.cuda_data_manager.device_data("action_in_cooldown_until"),
self.world.cuda_data_manager.device_data("default_agent_action_mask"),
self.world.cuda_data_manager.device_data("no_op_agent_action_mask"),
self.world.cuda_data_manager.device_data("num_stringency_levels"),
self.world.cuda_data_manager.device_data(f"{_ACTIONS}_a"),
self.world.cuda_data_manager.device_data(
f"{_OBSERVATIONS}_a_{self.name}-agent_policy_indicators"
),
self.world.cuda_data_manager.device_data(
f"{_OBSERVATIONS}_a_action_mask"
),
self.world.cuda_data_manager.device_data(
f"{_OBSERVATIONS}_p_{self.name}-agent_policy_indicators"
),
self.world.cuda_data_manager.device_data("_timestep_"),
self.world.cuda_data_manager.meta_info("n_agents"),
self.world.cuda_data_manager.meta_info("episode_length"),
block=self.world.cuda_function_manager.block,
grid=self.world.cuda_function_manager.grid,
)
else:
if not self._checked_n_stringency_levels:
if self.n_stringency_levels != self.world.n_stringency_levels:
raise ValueError(
"The environment was not configured correctly. For the given "
"model fit, you need to set the number of stringency levels to "
"be {}".format(self.world.n_stringency_levels)
)
self._checked_n_stringency_levels = True
for agent in self.world.agents:
if self.world.use_real_world_policies:
# Use the action taken in the previous timestep
action = self.world.real_world_stringency_policy[
self.world.timestep - 1, agent.idx
]
else:
action = agent.get_component_action(self.name)
assert 0 <= action <= self.n_stringency_levels
# We only update the stringency level if the action is not a NO-OP.
self.world.global_state["Stringency Level"][
self.world.timestep, agent.idx
] = (
self.world.global_state["Stringency Level"][
self.world.timestep - 1, agent.idx
]
* (action == 0)
+ action
)
agent.state[
"Current Open Close Stringency Level"
] = self.world.global_state["Stringency Level"][
self.world.timestep, agent.idx
]
# Check if the action cooldown period has ended, and set the next
# time until action cooldown. If current action is a no-op
# (i.e., no new action was taken), the agent can take an action
# in the very next step, otherwise it needs to wait for
# self.action_cooldown_period steps. When in the action cooldown
# period, whatever actions the agents take are masked out,
# so it's always a NO-OP (see generate_masks() above)
# The logic below influences the action masks.
if self.world.timestep == self.action_in_cooldown_until[agent.idx] + 1:
if action == 0: # NO-OP
self.action_in_cooldown_until[agent.idx] += 1
else:
self.action_in_cooldown_until[
agent.idx
] += self.action_cooldown_period
def generate_observations(self):
# Normalized observations
obs_dict = dict()
agent_policy_indicators = self.world.global_state["Stringency Level"][
self.world.timestep
]
obs_dict["a"] = {
"agent_policy_indicators": agent_policy_indicators
/ self.n_stringency_levels
}
obs_dict[self.world.planner.idx] = {
"agent_policy_indicators": agent_policy_indicators
/ self.n_stringency_levels
}
return obs_dict
@component_registry.add
class FederalGovernmentSubsidy(BaseComponent):
"""
Args:
subsidy_interval (int): The number of days over which the total subsidy amount
is evenly rolled out.
Note: shortening the subsidy interval increases the total amount of money
that the planner could possibly spend. For instance, if the subsidy
interval is 30, the planner can create a subsidy every 30 days.
num_subsidy_levels (int): The number of subsidy levels.
Note: with max_annual_subsidy_per_person=10000, one round of subsidies at
the maximum subsidy level equals an expenditure of roughly $3.3 trillion
(given the US population of 330 million).
If the planner chooses the maximum subsidy amount, the $3.3 trillion
is rolled out gradually over the subsidy interval.
max_annual_subsidy_per_person (float): The maximum annual subsidy that may be
allocated per person.
"""
name = "FederalGovernmentSubsidy"
required_entities = []
agent_subclasses = ["BasicPlanner"]
def __init__(
self,
*base_component_args,
subsidy_interval=90,
num_subsidy_levels=20,
max_annual_subsidy_per_person=20000,
**base_component_kwargs,
):
self.subsidy_interval = int(subsidy_interval)
assert self.subsidy_interval >= 1
self.num_subsidy_levels = int(num_subsidy_levels)
assert self.num_subsidy_levels >= 1
self.max_annual_subsidy_per_person = float(max_annual_subsidy_per_person)
assert self.max_annual_subsidy_per_person >= 0
self.np_int_dtype = np.int32
# (This will be overwritten during component_step; see below)
self._subsidy_amount_per_level = None
self._subsidy_level_array = None
super().__init__(*base_component_args, **base_component_kwargs)
self.default_planner_action_mask = [1 for _ in range(self.num_subsidy_levels)]
self.no_op_planner_action_mask = [0 for _ in range(self.num_subsidy_levels)]
# (This will be overwritten during reset; see below)
self.max_daily_subsidy_per_state = np.array(
self.n_agents, dtype=self.np_int_dtype
)
def get_additional_state_fields(self, agent_cls_name):
if agent_cls_name == "BasicPlanner":
return {"Total Subsidy": 0, "Current Subsidy Level": 0}
return {}
def additional_reset_steps(self):
# Pre-compute maximum state-specific subsidy levels
self.max_daily_subsidy_per_state = (
self.world.us_state_population * self.max_annual_subsidy_per_person / 365
)
def get_n_actions(self, agent_cls_name):
if agent_cls_name == "BasicPlanner":
# Number of non-zero subsidy levels
# (the action 0 pertains to the no-subsidy case)
return self.num_subsidy_levels
return None
def generate_masks(self, completions=0):
masks = {}
if self.world.use_real_world_policies:
masks[self.world.planner.idx] = self.default_planner_action_mask
else:
if self.world.timestep % self.subsidy_interval == 0:
masks[self.world.planner.idx] = self.default_planner_action_mask
else:
masks[self.world.planner.idx] = self.no_op_planner_action_mask
return masks
def get_data_dictionary(self):
"""
Create a dictionary of data to push to the device
"""
data_dict = DataFeed()
data_dict.add_data(
name="subsidy_interval",
data=self.subsidy_interval,
)
data_dict.add_data(
name="num_subsidy_levels",
data=self.num_subsidy_levels,
)
data_dict.add_data(
name="max_daily_subsidy_per_state",
data=self.max_daily_subsidy_per_state,
)
data_dict.add_data(
name="default_planner_action_mask",
data=[1] + self.default_planner_action_mask,
)
data_dict.add_data(
name="no_op_planner_action_mask",
data=[1] + self.no_op_planner_action_mask,
)
return data_dict
def get_tensor_dictionary(self):
"""
Create a dictionary of (Pytorch-accessible) data to push to the device
"""
tensor_dict = DataFeed()
return tensor_dict
def component_step(self):
if self.world.use_cuda:
self.world.cuda_component_step[self.name](
self.world.cuda_data_manager.device_data("subsidy_level"),
self.world.cuda_data_manager.device_data("subsidy"),
self.world.cuda_data_manager.device_data("subsidy_interval"),
self.world.cuda_data_manager.device_data("num_subsidy_levels"),
self.world.cuda_data_manager.device_data("max_daily_subsidy_per_state"),
self.world.cuda_data_manager.device_data("default_planner_action_mask"),
self.world.cuda_data_manager.device_data("no_op_planner_action_mask"),
self.world.cuda_data_manager.device_data(f"{_ACTIONS}_p"),
self.world.cuda_data_manager.device_data(
f"{_OBSERVATIONS}_a_{self.name}-t_until_next_subsidy"
),
self.world.cuda_data_manager.device_data(
f"{_OBSERVATIONS}_a_{self.name}-current_subsidy_level"
),
self.world.cuda_data_manager.device_data(
f"{_OBSERVATIONS}_p_{self.name}-t_until_next_subsidy"
),
self.world.cuda_data_manager.device_data(
f"{_OBSERVATIONS}_p_{self.name}-current_subsidy_level"
),
self.world.cuda_data_manager.device_data(
f"{_OBSERVATIONS}_p_action_mask"
),
self.world.cuda_data_manager.device_data("_timestep_"),
self.world.cuda_data_manager.meta_info("n_agents"),
self.world.cuda_data_manager.meta_info("episode_length"),
block=self.world.cuda_function_manager.block,
grid=self.world.cuda_function_manager.grid,
)
else:
if self.world.use_real_world_policies:
if self._subsidy_amount_per_level is None:
self._subsidy_amount_per_level = (
self.world.us_population
* self.max_annual_subsidy_per_person
/ self.num_subsidy_levels
* self.subsidy_interval
/ 365
)
self._subsidy_level_array = np.zeros((self._episode_length + 1))
# Use the action taken in the previous timestep
current_subsidy_amount = self.world.real_world_subsidy[
self.world.timestep - 1
]
if current_subsidy_amount > 0:
_subsidy_level = np.round(
(current_subsidy_amount / self._subsidy_amount_per_level)
)
for t_idx in range(
self.world.timestep - 1,
min(
len(self._subsidy_level_array),
self.world.timestep - 1 + self.subsidy_interval,
),
):
self._subsidy_level_array[t_idx] += _subsidy_level
subsidy_level = self._subsidy_level_array[self.world.timestep - 1]
else:
# Update the subsidy level only every self.subsidy_interval, since the
# other actions are masked out.
if (self.world.timestep - 1) % self.subsidy_interval == 0:
subsidy_level = self.world.planner.get_component_action(self.name)
else:
subsidy_level = self.world.planner.state["Current Subsidy Level"]
assert 0 <= subsidy_level <= self.num_subsidy_levels
self.world.planner.state["Current Subsidy Level"] = np.array(
subsidy_level
).astype(self.np_int_dtype)
# Update subsidy level
subsidy_level_frac = subsidy_level / self.num_subsidy_levels
daily_statewise_subsidy = (
subsidy_level_frac * self.max_daily_subsidy_per_state
)
self.world.global_state["Subsidy"][
self.world.timestep
] = daily_statewise_subsidy
self.world.planner.state["Total Subsidy"] += np.sum(daily_statewise_subsidy)
def generate_observations(self):
# Allow the agents/planner to know when the next subsidy might come.
# Obs should = 0 when the next timestep could include a subsidy
t_since_last_subsidy = self.world.timestep % self.subsidy_interval
# (this is normalized to 0<-->1)
t_until_next_subsidy = self.subsidy_interval - t_since_last_subsidy
t_vec = t_until_next_subsidy * np.ones(self.n_agents)
current_subsidy_level = self.world.planner.state["Current Subsidy Level"]
sl_vec = current_subsidy_level * np.ones(self.n_agents)
# Normalized observations
obs_dict = dict()
obs_dict["a"] = {
"t_until_next_subsidy": t_vec / self.subsidy_interval,
"current_subsidy_level": sl_vec / self.num_subsidy_levels,
}
obs_dict[self.world.planner.idx] = {
"t_until_next_subsidy": t_until_next_subsidy / self.subsidy_interval,
"current_subsidy_level": current_subsidy_level / self.num_subsidy_levels,
}
return obs_dict
@component_registry.add
class VaccinationCampaign(BaseComponent):
"""
Implements a (passive) component for delivering vaccines to agents once a certain
amount of time has elapsed.
Args:
daily_vaccines_per_million_people (int): The number of vaccines available per
million people everyday.
delivery_interval (int): The number of days between vaccine deliveries.
vaccine_delivery_start_date (string): The date (YYYY-MM-DD) when the
vaccination begins.
"""
name = "VaccinationCampaign"
required_entities = []
agent_subclasses = ["BasicMobileAgent"]
def __init__(
self,
*base_component_args,
daily_vaccines_per_million_people=4500,
delivery_interval=1,
vaccine_delivery_start_date="2020-12-22",
observe_rate=False,
**base_component_kwargs,
):
self.daily_vaccines_per_million_people = int(daily_vaccines_per_million_people)
assert 0 <= self.daily_vaccines_per_million_people <= 1e6
self.delivery_interval = int(delivery_interval)
assert 1 <= self.delivery_interval <= 5000
try:
self.vaccine_delivery_start_date = datetime.strptime(
vaccine_delivery_start_date, "%Y-%m-%d"
)
except ValueError:
print("Incorrect data format, should be YYYY-MM-DD")
# (This will be overwritten during component_step (see below))
self._time_when_vaccine_delivery_begins = None
self.np_int_dtype = np.int32
self.observe_rate = bool(observe_rate)
super().__init__(*base_component_args, **base_component_kwargs)
# (This will be overwritten during reset; see below)
self._num_vaccines_per_delivery = None
# Convenience for obs (see usage below):
self._t_first_delivery = None
@property
def num_vaccines_per_delivery(self):
if self._num_vaccines_per_delivery is None:
# Pre-compute dispersal numbers
millions_of_residents = self.world.us_state_population / 1e6
daily_vaccines = (
millions_of_residents * self.daily_vaccines_per_million_people
)
num_vaccines_per_delivery = np.floor(
self.delivery_interval * daily_vaccines
)
self._num_vaccines_per_delivery = np.array(
num_vaccines_per_delivery, dtype=self.np_int_dtype
)
return self._num_vaccines_per_delivery
@property
def time_when_vaccine_delivery_begins(self):
if self._time_when_vaccine_delivery_begins is None:
self._time_when_vaccine_delivery_begins = (
self.vaccine_delivery_start_date - self.world.start_date
).days
return self._time_when_vaccine_delivery_begins
def get_additional_state_fields(self, agent_cls_name):
if agent_cls_name == "BasicMobileAgent":
return {"Total Vaccinated": 0, "Vaccines Available": 0}
return {}
def additional_reset_steps(self):
pass
def get_n_actions(self, agent_cls_name):
return # Passive component
def generate_masks(self, completions=0):
return {} # Passive component
def get_data_dictionary(self):
"""
Create a dictionary of data to push to the device
"""
data_dict = DataFeed()
data_dict.add_data(
name="num_vaccines_per_delivery",
data=self.num_vaccines_per_delivery,
)
data_dict.add_data(
name="delivery_interval",
data=self.delivery_interval,
)
data_dict.add_data(
name="time_when_vaccine_delivery_begins",
data=self.time_when_vaccine_delivery_begins,
)
data_dict.add_data(
name="num_vaccines_available_t",
data=np.zeros(self.n_agents),
save_copy_and_apply_at_reset=True,
)
return data_dict
def get_tensor_dictionary(self):
"""
Create a dictionary of (Pytorch-accessible) data to push to the device
"""
tensor_dict = DataFeed()
return tensor_dict
def component_step(self):
if self.world.use_cuda:
self.world.cuda_component_step[self.name](
self.world.cuda_data_manager.device_data("vaccinated"),
self.world.cuda_data_manager.device_data("num_vaccines_per_delivery"),
self.world.cuda_data_manager.device_data("num_vaccines_available_t"),
self.world.cuda_data_manager.device_data("delivery_interval"),
self.world.cuda_data_manager.device_data(
"time_when_vaccine_delivery_begins"
),
self.world.cuda_data_manager.device_data(
f"{_OBSERVATIONS}_a_{self.name}-t_until_next_vaccines"
),
self.world.cuda_data_manager.device_data(
f"{_OBSERVATIONS}_p_{self.name}-t_until_next_vaccines"
),
self.world.cuda_data_manager.device_data("_timestep_"),
self.world.cuda_data_manager.meta_info("n_agents"),
self.world.cuda_data_manager.meta_info("episode_length"),
block=self.world.cuda_function_manager.block,
grid=self.world.cuda_function_manager.grid,
)
else:
# Do nothing if vaccines are not available yet
if self.world.timestep < self.time_when_vaccine_delivery_begins:
return
# Do nothing if this is not the start of a delivery interval.
# Vaccines are delivered at the start of each interval.
if (self.world.timestep % self.delivery_interval) != 0:
return
# Deliver vaccines to each state
for aidx, vaccines in enumerate(self.num_vaccines_per_delivery):
self.world.agents[aidx].state["Vaccines Available"] += vaccines
def generate_observations(self):
# Allow the agents/planner to know when the next vaccines might come.
# Obs should = 0 when the next timestep will deliver vaccines
# (this is normalized to 0<-->1)
if self._t_first_delivery is None:
self._t_first_delivery = int(self.time_when_vaccine_delivery_begins)
while (self._t_first_delivery % self.delivery_interval) != 0:
self._t_first_delivery += 1
next_t = self.world.timestep + 1
if next_t <= self._t_first_delivery:
t_until_next_vac = np.minimum(
1, (self._t_first_delivery - next_t) / self.delivery_interval
)
next_vax_rate = 0.0
else:
t_since_last_vac = next_t % self.delivery_interval
t_until_next_vac = self.delivery_interval - t_since_last_vac
next_vax_rate = self.daily_vaccines_per_million_people / 1e6
t_vec = t_until_next_vac * np.ones(self.n_agents)
r_vec = next_vax_rate * np.ones(self.n_agents)
# Normalized observations
obs_dict = dict()
obs_dict["a"] = {
"t_until_next_vaccines": t_vec / self.delivery_interval,
}
obs_dict[self.world.planner.idx] = {
"t_until_next_vaccines": t_until_next_vac / self.delivery_interval,
}
if self.observe_rate:
obs_dict["a"]["next_vaccination_rate"] = r_vec
obs_dict["p"]["next_vaccination_rate"] = float(next_vax_rate)
return obs_dict
| 40.869369 | 88 | 0.616738 |
aceb0281139a5f493a26a33cc2a97677d4dcd494 | 3,097 | py | Python | src/pymake3/template/c/cl.py | shlomimatichin/Pymake3 | 1d7cd99c639dff208edd0a9873292e0d2d6e152e | [
"MIT"
] | 2 | 2021-04-22T17:14:51.000Z | 2022-02-26T10:52:06.000Z | src/pymake3/template/c/cl.py | shlomimatichin/Pymake3 | 1d7cd99c639dff208edd0a9873292e0d2d6e152e | [
"MIT"
] | null | null | null | src/pymake3/template/c/cl.py | shlomimatichin/Pymake3 | 1d7cd99c639dff208edd0a9873292e0d2d6e152e | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
Template make script for Microsoft's C compiler cl.
"""
#---------------------------------------
# IMPORTS
#---------------------------------------
import os
from pymake3 import *
#---------------------------------------
# CONSTANTS
#---------------------------------------
# Path to the csc compiler to use.
CL = r'C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64\cl.exe'
#---------------------------------------
# GLOBALS
#---------------------------------------
# Default configuration settings.
conf = makeconf.from_dict({
'name' : 'main.exe',
'cflags': [ '/DNDEBUG', '/DUNICODE', '/O2', '/Wall' ],
'lflags': [ '/MACHINE:AMD64' ],
'includepaths': [ r'C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\include',
r'C:\Program Files (x86)\Windows Kits\10\Include\10.0.10586.0\shared',
r'C:\Program Files (x86)\Windows Kits\10\Include\10.0.10586.0\ucrt',
r'C:\Program Files (x86)\Windows Kits\10\Include\10.0.10586.0\um' ],
'libpaths': [ r'C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\lib\amd64',
r'C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10586.0\ucrt\x64',
r'C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10586.0\um\x64' ],
'libs': [ 'kernel32.lib', 'user32.lib' ],
'bindir': 'bin',
'objdir': 'obj',
'srcdir': 'src'
})
#---------------------------------------
# FUNCTIONS
#---------------------------------------
@target(conf=conf)
def clean(conf):
"""
Cleans the build by deleting the bin and obj directories.
"""
delete_dir(conf.bindir)
delete_dir(conf.objdir)
@target(conf=conf)
def compile(conf):
"""
Compiles the executable program from its sources in the source directory.
"""
exe_file = os.path.join(conf.bindir, conf.name)
if os.path.isfile(exe_file):
mtime = os.path.getmtime(exe_file)
skip = True
for s in find_files(conf.srcdir, [ '*.c', '*.h' ]):
if os.path.getmtime(s) > mtime:
skip = False
break
if skip:
# No source files have changed since the last compile, so we don't
# need to recompile.
return
create_dir(conf.bindir)
create_dir(conf.objdir)
options = (
[ '/nologo' ] +
conf.cflags +
[ '/Fe' + os.path.join(conf.bindir, conf.name) ] +
[ '/Fo' + conf.objdir + '\\' ] +
[ '/I' + s for s in conf.includepaths] +
find_files(conf.srcdir, '*.c') +
conf.libs +
[ '/link' ] +
conf.lflags +
[ '/LIBPATH:' + s for s in conf.libpaths ]
)
run_program(CL, options)
@target(conf=conf)
def run(conf):
"""
Runs the target executable. This target has no dependencies, so the program
needs to be built first.
"""
os.chdir(conf.bindir)
run_program(conf.name)
#---------------------------------------
# SCRIPT
#---------------------------------------
if __name__ == '__main__':
pymake3()
| 27.40708 | 92 | 0.499193 |
aceb02f1653d0634eb5de9823f91db4befa0a0d7 | 213 | py | Python | wp_app/config/desktop.py | avtserver/wp_app | 4a0e75b8362bf6908e73a5ba58f064dd1c9c8c78 | [
"MIT"
] | null | null | null | wp_app/config/desktop.py | avtserver/wp_app | 4a0e75b8362bf6908e73a5ba58f064dd1c9c8c78 | [
"MIT"
] | null | null | null | wp_app/config/desktop.py | avtserver/wp_app | 4a0e75b8362bf6908e73a5ba58f064dd1c9c8c78 | [
"MIT"
] | null | null | null | from frappe import _
def get_data():
return [
{
"module_name": "WinnersPie App",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("WinnersPie App")
}
]
| 16.384615 | 44 | 0.596244 |
aceb02f55a426b86c54ef62012793ab262e86af0 | 6,002 | py | Python | test/functional/interface_zmq.py | aurarad/Auroracoin | a1425f148a7b78670b2ac12017dcea2e89eba618 | [
"MIT"
] | 35 | 2016-03-14T02:24:46.000Z | 2021-08-12T16:19:19.000Z | test/functional/interface_zmq.py | Izham5354/Auroracoin | a1425f148a7b78670b2ac12017dcea2e89eba618 | [
"MIT"
] | 40 | 2016-03-11T20:21:12.000Z | 2022-03-29T11:07:54.000Z | test/functional/interface_zmq.py | Izham5354/Auroracoin | a1425f148a7b78670b2ac12017dcea2e89eba618 | [
"MIT"
] | 34 | 2016-03-10T13:31:43.000Z | 2022-01-01T06:05:56.000Z | #!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ notification interface."""
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE
from test_framework.test_framework import DigiByteTestFramework
from test_framework.messages import CTransaction, hash256
from test_framework.util import assert_equal, connect_nodes
from io import BytesIO
from time import sleep
def hash256_reversed(byte_str):
return hash256(byte_str)[::-1]
class ZMQSubscriber:
def __init__(self, socket, topic):
self.sequence = 0
self.socket = socket
self.topic = topic
import zmq
self.socket.setsockopt(zmq.SUBSCRIBE, self.topic)
def receive(self):
topic, body, seq = self.socket.recv_multipart()
# Topic should match the subscriber topic.
assert_equal(topic, self.topic)
# Sequence should be incremental.
assert_equal(struct.unpack('<I', seq)[-1], self.sequence)
self.sequence += 1
return body
class ZMQTest (DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_py3_zmq()
self.skip_if_no_digibyted_zmq()
def run_test(self):
import zmq
self.ctx = zmq.Context()
try:
self.test_basic()
self.test_reorg()
finally:
# Destroy the ZMQ context.
self.log.debug("Destroying ZMQ context")
self.ctx.destroy(linger=None)
def test_basic(self):
# All messages are received in the same socket which means
# that this test fails if the publishing order changes.
# Note that the publishing order is not defined in the documentation and
# is subject to change.
import zmq
# Invalid zmq arguments don't take down the node, see #17185.
self.restart_node(0, ["-zmqpubrawtx=foo", "-zmqpubhashtx=bar"])
address = 'tcp://127.0.0.1:28332'
socket = self.ctx.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
# Subscribe to all available topics.
hashblock = ZMQSubscriber(socket, b"hashblock")
hashtx = ZMQSubscriber(socket, b"hashtx")
rawblock = ZMQSubscriber(socket, b"rawblock")
rawtx = ZMQSubscriber(socket, b"rawtx")
self.restart_node(0, ["-zmqpub%s=%s" % (sub.topic.decode(), address) for sub in [hashblock, hashtx, rawblock, rawtx]])
connect_nodes(self.nodes[0], 1)
socket.connect(address)
# Relax so that the subscriber is ready before publishing zmq messages
sleep(0.2)
num_blocks = 5
self.log.info("Generate %(n)d blocks (and %(n)d coinbase txes)" % {"n": num_blocks})
genhashes = self.nodes[0].generatetoaddress(num_blocks, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_all()
for x in range(num_blocks):
# Should receive the coinbase txid.
txid = hashtx.receive()
# Should receive the coinbase raw transaction.
hex = rawtx.receive()
tx = CTransaction()
tx.deserialize(BytesIO(hex))
tx.calc_sha256()
assert_equal(tx.hash, txid.hex())
# Should receive the generated block hash.
hash = hashblock.receive().hex()
assert_equal(genhashes[x], hash)
# The block should only have the coinbase txid.
assert_equal([txid.hex()], self.nodes[1].getblock(hash)["tx"])
# Should receive the generated raw block.
block = rawblock.receive()
assert_equal(genhashes[x], hash256_reversed(block[:80]).hex())
if self.is_wallet_compiled():
self.log.info("Wait for tx from second node")
payment_txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# Should receive the broadcasted txid.
txid = hashtx.receive()
assert_equal(payment_txid, txid.hex())
# Should receive the broadcasted raw transaction.
hex = rawtx.receive()
assert_equal(payment_txid, hash256_reversed(hex).hex())
self.log.info("Test the getzmqnotifications RPC")
assert_equal(self.nodes[0].getzmqnotifications(), [
{"type": "pubhashblock", "address": address, "hwm": 1000},
{"type": "pubhashtx", "address": address, "hwm": 1000},
{"type": "pubrawblock", "address": address, "hwm": 1000},
{"type": "pubrawtx", "address": address, "hwm": 1000},
])
assert_equal(self.nodes[1].getzmqnotifications(), [])
def test_reorg(self):
import zmq
address = 'tcp://127.0.0.1:28333'
socket = self.ctx.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
hashblock = ZMQSubscriber(socket, b'hashblock')
# Should only notify the tip if a reorg occurs
self.restart_node(0, ['-zmqpub%s=%s' % (hashblock.topic.decode(), address)])
socket.connect(address)
# Relax so that the subscriber is ready before publishing zmq messages
sleep(0.2)
# Generate 1 block in nodes[0] and receive all notifications
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
assert_equal(self.nodes[0].getbestblockhash(), hashblock.receive().hex())
# Generate 2 blocks in nodes[1]
self.nodes[1].generatetoaddress(2, ADDRESS_BCRT1_UNSPENDABLE)
# nodes[0] will reorg chain after connecting back nodes[1]
connect_nodes(self.nodes[0], 1)
# Should receive nodes[1] tip
assert_equal(self.nodes[1].getbestblockhash(), hashblock.receive().hex())
if __name__ == '__main__':
ZMQTest().main()
| 37.279503 | 126 | 0.635788 |
aceb03e0a1251eb36dd2f39fc0d8c8c256136b55 | 3,063 | py | Python | code/pytorch/LAMPO/core/config.py | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 | [
"MIT"
] | 1 | 2021-11-22T07:45:28.000Z | 2021-11-22T07:45:28.000Z | code/pytorch/LAMPO/core/config.py | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 | [
"MIT"
] | null | null | null | code/pytorch/LAMPO/core/config.py | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 | [
"MIT"
] | null | null | null | # from code.pytorch.LAMPO.core.augmented_tasks.tasks import ReachTarget, CloseDrawer, WaterPlants
# from code.pytorch.LAMPO.core.lab_connection import TCPTask
from code.pytorch.LAMPO.core.rl_bench_box import *
# from code.pytorch.LAMPO.core.rl_bench_box import Reacher2D, ObstacleReacher2d, MujocoEnv
from envs.gym_kuka_mujoco.envs.percision_peg_insertion_env import *
# RLBenchBox,
config = {
"reach_target": {
"task_class": MujocoEnv,
"task_box": PercisionPegInsertionEnv,
"n_cluster": 6,
"latent_dim": 3,
"n_features": 20,
"state_dim": 3,
"n_samples": 1000
},
"dmp_assitive": {
"task_class": PybulletEnv,
"task_box": PercisionPegInsertionEnv,
"n_cluster": 6,
"latent_dim": 3,
"n_features": 20,
"state_dim": 3,
"n_samples": 1000
},
# "tcp": {
# "task_box": lambda headless: TCPTask(5056, 20),
# "n_features": 20,
# "n_cluster": 4,
# "latent_dim": 2,
# "state_dim": 3,
# "n_samples": 50
# },
# "tcp_pouring": {
# "task_box": lambda headless: TCPTask(5056, 20),
# "n_features": 20,
# "n_cluster": 4,
# "latent_dim": 4,
# "state_dim": 1,
# "n_samples": 100
# },
"reacher2d_1": {
"task_box": lambda headless: Reacher2D(20, 1, headless),
"n_features": 20,
"n_cluster": 1,
"latent_dim": 2,
"state_dim": 2,
"n_samples": 100
},
"reacher2d_2": {
"task_box": lambda headless: Reacher2D(20, 2, headless),
"n_features": 20,
"n_cluster": 2,
"latent_dim": 2,
"state_dim": 2,
"n_samples": 100
},
"reacher2d_3": {
"task_box": lambda headless: Reacher2D(20, 3, headless),
"n_features": 20,
"n_cluster": 3,
"latent_dim": 2,
"state_dim": 2,
"n_samples": 100
},
"reacher2d_4": {
"task_box": lambda headless: Reacher2D(20, 4, headless),
"n_features": 20,
"n_cluster": 4,
"latent_dim": 2,
"state_dim": 2,
"n_samples": 100
},
# "reacher2d_obstacle": {
# "task_box": lambda headless: ObstacleReacher2d(20, headless),
# "n_features": 20,
# "n_cluster": 25,
# "latent_dim": 5,
# "state_dim": 2,
# "n_samples": 9000
# },
"reacher2d_obstacle": {
"task_box": lambda headless: ObstacleReacher2d(20, headless),
"n_features": 20,
"n_cluster": 40,
"latent_dim": 5,
"state_dim": 2,
"n_samples": 16500
},
# "close_drawer": {
# "task_class": CloseDrawer, # TODO: remove
# "task_box": lambda headless: RLBenchBox(CloseDrawer, 94, 20, headless),
# "n_cluster": 10,
# "latent_dim": 4,
# "n_features": 20, # TODO: remove
# "state_dim": 94, # TODO: remove
# "n_samples": 200
# },
# "water_plants": {
# "task_class": WaterPlants, # TODO: remove
# "task_box": lambda headless: RLBenchBox(WaterPlants, 84, 20, headless),
# "n_cluster": 2,
# "latent_dim": 10,
# "n_features": 20, # TODO: remove
# "state_dim": 84, # TODO: remove
# "n_samples": 1000
# },
# "reach_target": {
# "task_class": ReachTarget,
# "task_box": lambda headless: RLBenchBox(ReachTarget, 3, 20, headless),
# "n_cluster": 6,
# "latent_dim": 3,
# "n_features": 20,
# "state_dim": 3,
# "n_samples": 1000
# }
}
| 25.525 | 97 | 0.632387 |
aceb044f8de6f6dcceb87ca4f422976fac8d08d6 | 261 | py | Python | md2book/modules/__init__.py | webalorn/md2book | d907aa33b56bf93ecb4b58c2cbb25a6651753902 | [
"MIT"
] | 1 | 2021-03-24T00:02:52.000Z | 2021-03-24T00:02:52.000Z | md2book/modules/__init__.py | webalorn/md2book | d907aa33b56bf93ecb4b58c2cbb25a6651753902 | [
"MIT"
] | null | null | null | md2book/modules/__init__.py | webalorn/md2book | d907aa33b56bf93ecb4b58c2cbb25a6651753902 | [
"MIT"
] | null | null | null | from .base import *
from .mdfilemods import *
from .stylemods import *
ALL_MODULES = [
MainTargetDatasModule,
ThemeModule,
StyleModule,
SepModule,
FontModule,
MetadataModule,
LatexModule,
ImagesModule,
TitlePageModule,
TocModule,
HtmlBlocksModule,
] | 15.352941 | 25 | 0.777778 |
aceb05d2df83514e909c50539208935d0881dbf3 | 4,197 | py | Python | swagger_client/models/inline_response_201_1.py | scubawhere/scubawhere-api-python-client | 9f8578e251492c7667f785df7b7c9d66e71f5c8e | [
"Apache-2.0"
] | null | null | null | swagger_client/models/inline_response_201_1.py | scubawhere/scubawhere-api-python-client | 9f8578e251492c7667f785df7b7c9d66e71f5c8e | [
"Apache-2.0"
] | null | null | null | swagger_client/models/inline_response_201_1.py | scubawhere/scubawhere-api-python-client | 9f8578e251492c7667f785df7b7c9d66e71f5c8e | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Scubawhere API Documentation
This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API
OpenAPI spec version: 1.0.0
Contact: bryan@scubawhere.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class InlineResponse2011(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, status=None, payment=None):
"""
InlineResponse2011 - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'status': 'str',
'payment': 'Payment'
}
self.attribute_map = {
'status': 'status',
'payment': 'payment'
}
self._status = status
self._payment = payment
@property
def status(self):
"""
Gets the status of this InlineResponse2011.
:return: The status of this InlineResponse2011.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this InlineResponse2011.
:param status: The status of this InlineResponse2011.
:type: str
"""
self._status = status
@property
def payment(self):
"""
Gets the payment of this InlineResponse2011.
:return: The payment of this InlineResponse2011.
:rtype: Payment
"""
return self._payment
@payment.setter
def payment(self, payment):
"""
Sets the payment of this InlineResponse2011.
:param payment: The payment of this InlineResponse2011.
:type: Payment
"""
self._payment = payment
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.611842 | 227 | 0.575173 |
aceb063afdb13874ca8d76541667c6c3e00648c0 | 1,066 | py | Python | session2/nqueen1.py | MrCsabaToth/IK | 713f91c28af7b4a964ba854ede9fec73bf0c4682 | [
"Apache-2.0"
] | null | null | null | session2/nqueen1.py | MrCsabaToth/IK | 713f91c28af7b4a964ba854ede9fec73bf0c4682 | [
"Apache-2.0"
] | null | null | null | session2/nqueen1.py | MrCsabaToth/IK | 713f91c28af7b4a964ba854ede9fec73bf0c4682 | [
"Apache-2.0"
] | null | null | null | def nqueen_helper(cols, rows, colinv, rowinv, n):
# Leaf node
if len(cols) == n:
sol = sorted(["({}x{})".format(pos[0], pos[1]) for pos in zip(cols, rows)])
return set(sol)
result = set()
# Internal node
for i in colinv[:]:
for j in rowinv[:]:
attack = False
for k in range(len(cols)):
dcol = abs(i - cols[k])
drow = abs(j - rows[k])
if dcol == drow:
attack = True
break
if not attack:
cols.append(i)
colinv.remove(i)
rows.append(j)
rowinv.remove(j)
result = result.union(nqueen_helper(cols, rows, colinv, rowinv, n))
rowinv.append(j)
rows.pop()
colinv.append(i)
cols.pop()
return result
def nqueen(n):
cols = []
rows = []
colinv = range(1, n + 1)
rowinv = range(1, n + 1)
return nqueen_helper(cols, rows, colinv, rowinv, n)
| 27.333333 | 83 | 0.453096 |
aceb070c94b32c442d4e65c7b337b3a29e147723 | 11,946 | py | Python | utils/data_reader.py | yoongi0428/spotify_recSys_challenge_2018 | e4cd527a3c24502201345475cb660d43c01a4ea1 | [
"Apache-2.0"
] | 1 | 2021-01-18T14:37:11.000Z | 2021-01-18T14:37:11.000Z | utils/data_reader.py | yoongi0428/spotify_recSys_challenge_2018 | e4cd527a3c24502201345475cb660d43c01a4ea1 | [
"Apache-2.0"
] | null | null | null | utils/data_reader.py | yoongi0428/spotify_recSys_challenge_2018 | e4cd527a3c24502201345475cb660d43c01a4ea1 | [
"Apache-2.0"
] | null | null | null | import random
import json
import numpy as np
import time
class data_reader:
def __init__(self, data_dir, filename, batch_size):
with open(data_dir+'/'+filename) as data_file:
data_tr = json.load(data_file)
self.num_tracks = len(data_tr['track_uri2id'])
self.num_items = self.num_tracks + len(data_tr['artist_uri2id'])
self.max_title_len = data_tr['max_title_len']
self.num_char = data_tr['num_char']
self.playlists = data_tr['playlists']
self.class_divpnt = data_tr['class_divpnt']
del data_tr
self.batch_size = batch_size
self.train_idx = 0
def next_batch(self):
trk_positions = []
art_positions = []
titles = []
for i in range(self.batch_size):
train_trk, train_art, train_title = self.playlists[self.train_idx]
tmp = train_trk
trks = np.array([tmp]).T
playlist = np.full_like(trks, fill_value=i, dtype=np.int)
conc = np.concatenate((playlist, trks), axis=1)
trk_positions.append(conc)
tmp = train_art
arts = np.array([tmp]).T
playlist = np.full_like(arts, fill_value=i, dtype=np.int)
conc = np.concatenate((playlist, arts), axis=1)
art_positions.append(conc)
titles.append(train_title)
self.train_idx += 1
if self.train_idx == len(self.playlists):
self.train_idx = 0
random.shuffle(self.playlists)
trk_positions = np.concatenate(trk_positions)
art_positions = np.concatenate(art_positions)
y_positions = np.concatenate((trk_positions, art_positions), 0)
trk_val = [1] * len(trk_positions)
art_val = [1] * len(art_positions)
return trk_positions, art_positions, y_positions, titles, trk_val, art_val
class data_reader_firstN:
def __init__(self, data_dir, filename, batch_size, from_to):
with open(data_dir + '/' + filename) as data_file:
data_tr = json.load(data_file)
self.num_tracks = len(data_tr['track_uri2id'])
self.num_items = self.num_tracks + len(data_tr['artist_uri2id'])
self.max_title_len = data_tr['max_title_len']
self.num_char = data_tr['num_char']
self.playlists = data_tr['playlists']
del data_tr
self.batch_size = batch_size
self.train_idx = 0
self.from_to = from_to
def next_batch(self):
trk_positions = []
art_positions = []
trk_val = []
art_val = []
titles = []
for i in range(self.batch_size):
train_trk, train_art, train_title = self.playlists[self.train_idx]
len_t = len(train_trk)
if len_t != 0:
if self.from_to[0] >= 1:
n = int(self.from_to[0])
m = int(min(len_t, self.from_to[1]))
else:
n = int(max(len_t * self.from_to[0], 1))
m = int(max(len_t * self.from_to[1], 1))
given_num = random.randrange(n, m + 1)
tmp = train_trk
trks = np.array([tmp]).T
playlist = np.full_like(trks, fill_value=i, dtype=np.int)
conc = np.concatenate((playlist, trks), axis=1)
trk_positions.append(conc)
val = [1] * given_num + [0] * (len_t - given_num)
trk_val += val
len_a = len(train_art)
if len_a != 0:
if self.from_to[0] >= 1:
n = int(self.from_to[0])
m = int(min(len_a, self.from_to[1]))
else:
n = int(max(len_a * self.from_to[0], 1))
m = int(max(len_a * self.from_to[1], 1))
given_num = random.randrange(n, m + 1)
tmp = train_art
arts = np.array([tmp]).T
playlist = np.full_like(arts, fill_value=i, dtype=np.int)
conc = np.concatenate((playlist, arts), axis=1)
art_positions.append(conc)
val = [1] * given_num + [0] * (len_a - given_num)
art_val += val
titles.append(train_title)
self.train_idx += 1
if self.train_idx == len(self.playlists):
self.train_idx = 0
random.shuffle(self.playlists)
trk_positions = np.concatenate(trk_positions)
art_positions = np.concatenate(art_positions)
y_positions = np.concatenate((trk_positions, art_positions), 0)
return trk_positions, art_positions, y_positions, titles, trk_val, art_val
class data_reader_test:
def __init__(self, data_dir, filename, batch_size, test_num):
print("now processing: " + filename)
with open(data_dir + '/' + filename) as data_file:
data_te = json.load(data_file)
self.playlists = data_te['playlists'][:test_num]
del data_te
test_num = test_num
if test_num > len(self.playlists):
test_num = len(self.playlists)
print("the number of test will be changed to %d" % test_num)
self.batch_size = batch_size
self.test_idx = 0
def next_batch_test(self):
trk_positions = []
answers_for_grad = []
# art_positions = []
test_seed = []
test_answer = []
test_answer_cls = []
# start_time = time.time()
for i in range(self.batch_size):
seed, seed_art, answer, seed_cls, answer_cls = self.playlists[self.test_idx]
trks = np.array([seed], dtype=np.int64).T
playlist = np.full_like(trks, fill_value=i, dtype=np.int)
conc = np.concatenate((playlist, trks), axis=1)
trk_positions.append(conc)
test_seed.append(seed)
test_answer.append(answer)
test_answer_cls.append(answer_cls)
answer_for_grad = seed[:]
for a in answer:
if a != -1:
answer_for_grad.append(i)
trks = np.array([answer_for_grad], dtype=np.int64).T
playlist = np.full_like(trks, fill_value=i, dtype=np.int)
conc = np.concatenate((playlist, trks), axis=1)
answers_for_grad.append(conc)
'''
arts = np.array([seed_art]).T
playlist = np.full_like(arts, fill_value=i, dtype=np.int)
conc = np.concatenate((playlist, arts), axis=1)
art_positions.append(conc)
'''
self.test_idx += 1
if self.test_idx == len(self.playlists):
self.test_idx = 0
break
# print(self.test_idx)
trk_positions = np.concatenate(trk_positions)
answers_for_grad = np.concatenate(answers_for_grad)
# art_positions = np.concatenate(art_positions)
# x_positions = np.concatenate((trk_positions, art_positions), 0)
# x_ones = [1]*len(trk_positions) + [0.5]*len(art_positions)
return trk_positions, test_seed, test_answer, test_answer_cls, answers_for_grad
def next_batch_test_cls(self, cls_list):
trk_positions = []
answers_for_grad = []
# art_positions = []
test_seed = []
test_answer = []
test_answer_cls = []
test_titles = []
# start_time = time.time()
for i in range(self.batch_size):
seed, seed_art, answer, seed_cls, answer_cls = self.playlists[self.test_idx]
_seed = []
for c, s in zip(seed_cls, seed):
if c in cls_list:
_seed.append(s)
seed = _seed
trks = np.array([seed], dtype=np.int64).T
playlist = np.full_like(trks, fill_value=i, dtype=np.int)
conc = np.concatenate((playlist, trks), axis=1)
trk_positions.append(conc)
test_seed.append(seed)
test_answer.append(answer)
test_answer_cls.append(answer_cls)
answer_for_grad = test_seed[:]
for i in test_answer:
if i != -1:
answer_for_grad.append(i)
trks = np.array([answer_for_grad], dtype=np.int64).T
playlist = np.full_like(trks, fill_value=i, dtype=np.int)
conc = np.concatenate((playlist, trks), axis=1)
answers_for_grad.append(conc)
arts = np.array([seed_art]).T
playlist = np.full_like(arts, fill_value=i, dtype=np.int)
conc = np.concatenate((playlist, arts), axis=1)
art_positions.append(conc)
test_titles.append(title)
self.test_idx += 1
if self.test_idx == len(self.playlists):
self.test_idx = 0
break
# print(self.test_idx)
trk_positions = np.concatenate(trk_positions)
<<<<<<< HEAD
art_positions = np.concatenate(art_positions)
x_positions = np.concatenate((trk_positions, art_positions), 0)
x_ones = [1]*len(trk_positions) + [0.5]*len(art_positions)
return x_positions, test_seed, test_answer, test_titles, x_ones
class data_reader_challenge:
def __init__(self, data_dir, filename, batch_size):
print("now processing: " + filename)
with open(data_dir + '/' + filename) as data_file:
data_ch = json.load(data_file)
self.playlists = data_ch['playlists']
self.id2uri = data_ch['id2uri']
self.num_tracks = data_ch['num_tracks']
self.num_items = data_ch['num_items']
self.is_in_order = data_ch['in_order']
self.max_title_len = data_ch['max_title_len']
self.num_char = data_ch['num_char']
del data_ch
self.batch_size = batch_size
self.ch_idx = 0
def next_batch(self):
trk_positions = []
trk_ones = []
art_positions = []
ch_seed = []
ch_titles = []
ch_titles_exist = []
ch_pid = []
# start_time = time.time()
for i in range(self.batch_size):
seed, seed_art, title, title_exist, pid = self.playlists[self.ch_idx]
len_s = len(seed)
if (len_s > 50) and self.is_in_order:
trk_ones += [0.15]*(len_s-15) + [1.0]*15
else:
trk_ones += [1.0] * len_s
trks = np.array([seed]).T
playlist = np.full_like(trks, fill_value=i, dtype=np.int)
conc = np.concatenate((playlist, trks), axis=1)
trk_positions.append(conc)
ch_seed.append(seed)
arts = np.array([seed_art]).T
playlist = np.full_like(arts, fill_value=i, dtype=np.int)
conc = np.concatenate((playlist, arts), axis=1)
art_positions.append(conc)
ch_titles.append(title)
ch_titles_exist.append(title_exist)
ch_pid.append(pid)
self.ch_idx += 1
if self.ch_idx == len(self.playlists):
self.ch_idx = 0
break
trk_positions = np.concatenate(trk_positions)
art_positions = np.concatenate(art_positions)
x_positions = np.concatenate((trk_positions, art_positions), 0)
x_ones = trk_ones + [0.5] * len(art_positions)
return x_positions, ch_seed, ch_titles, ch_titles_exist, ch_pid, x_ones
=======
answers_for_grad = np.concatenate(answers_for_grad)
# art_positions = np.concatenate(art_positions)
# x_positions = np.concatenate((trk_positions, art_positions), 0)
# x_ones = [1]*len(trk_positions) + [0.5]*len(art_positions)
return trk_positions, test_seed, test_answer, test_answer_cls, answers_for_grad
>>>>>>> e699f7b47d3f20b6e90a780eb2af4224ea75800b
| 36.31003 | 88 | 0.566549 |
aceb07ec2fa1b871308d07e2d6b17de7c3ada33b | 2,094 | py | Python | getinfoheroku.py | katie608/softdes_final_project | 083ad052ab32a7df0664ff943780891197cf3f5d | [
"MIT"
] | null | null | null | getinfoheroku.py | katie608/softdes_final_project | 083ad052ab32a7df0664ff943780891197cf3f5d | [
"MIT"
] | null | null | null | getinfoheroku.py | katie608/softdes_final_project | 083ad052ab32a7df0664ff943780891197cf3f5d | [
"MIT"
] | null | null | null | """ Code to display a different image based on the user's scene selection
on heroku. Will only display one of 4 images with same day background.
This is just for demonstration purposes and does not actually generate an image
"""
def get_hour(date):
"""takes in date from javascript/flask, and sets the variable hour to
what the hour is
>>> get_hour("Tue Apr 23 2019 23:19:57 GMT-0400 (Eastern Daylight Time)")
'23'
>>> get_hour("Wed Apr 24 2019 06:59:38 GMT+0300 (Asia Qatar Standard Time)")
'06'
"""
a = date.find(":")
hour = date[a-2:a]
return hour
"""________________________Code for making Flask work________________________"""
# code for heroku deployment
import os
import time
HOST = '0.0.0.0' if 'PORT' in os.environ else '127.0.0.1'
PORT = int(os.environ.get('PORT', 5000))
# imports flask class
from flask import Flask, render_template, request
import requests
# created an instance of the flask class
# first argument is the name of the application’s module or package
app = Flask(__name__)
# tells which url should trigger our function. In this case, it is / because
# it is the homepage of the website.
@app.route('/', methods = ["GET","POST"])
def get_input():
return render_template("index.html")
@app.route('/output', methods = ["GET","POST"])
def display_output():
# gets variables from html form in html template
scene = request.form.get('scene')
date = request.form.get('date')
dims = request.form.get("dims")
hour = get_hour(date)
hour = int(hour)
if hour <= 4 or hour > 21:
pic = "static/"+"Night/"+str(scene)+".png"
elif hour >= 10 and hour <= 15:
pic = "static/"+"Day/"+str(scene)+".png"
else:
pic = "static/"+str(hour)+"/"+str(scene)+".png"
time.sleep(0.5)
return render_template('output.html', scene=scene, date=date, dims=dims, pic=pic)
if __name__ == '__main__':
# runs all doctests
import doctest
doctest.testmod()
# runs local server with flask app
# app.run()
# runs on heroku
app.run(host=HOST, port=PORT)
| 27.92 | 85 | 0.664756 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.