max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
asset.py | ModdingClass/import_daz-v1.5.0-20200918_custom | 0 | 12762051 | # Copyright (c) 2016-2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import os
#from urllib.parse import quote, unquote
import json
import gzip
import copy
from .error import reportError
from .utils import *
#-------------------------------------------------------------
# Accessor base class
#-------------------------------------------------------------
class Accessor:
def __init__(self, fileref):
self.fileref = fileref
self.caller = None
self.rna = None
def getRna(self, context):
return self.rna
global theRnas
if self.rna is None:
if self.name in theRnas.keys():
return theRnas[self.name]
else:
print("Did not find RNA", self.name)
return self.rna
def storeRna(self, rna):
global theRnas
theRnas[self.name] = rna
return
if hasattr(rna, "type"):
print("Store", rna.type, self.name, rna)
else:
print("Store RNA", self.name, rna)
def getAsset(self, id, strict=True):
global theAssets, theOtherAssets
if isinstance(id, Asset):
return id
id = normalizeRef(id)
if "?" in id:
# Attribute. Return None
return None
ref = getRef(id, self.fileref)
try:
return theAssets[ref]
except KeyError:
pass
if id[0] == "#":
if self.caller:
ref = getRef(id, self.caller.fileref)
try:
return theAssets[ref]
except KeyError:
pass
ref = getRef(id, self.fileref)
try:
return theAssets[ref]
except KeyError:
pass
try:
return theOtherAssets[ref]
except KeyError:
pass
msg = ("Missing local asset:\n '%s'\n" % ref)
if self.caller:
msg += ("in file:\n '%s'\n" % self.caller.fileref)
if not strict:
return None
reportError(msg)
return None
else:
return self.getNewAsset(id, ref, strict)
def getNewAsset(self, id, ref, strict=True):
from .files import parseAssetFile
from .load_json import loadJson
fileref = id.split("#")[0]
filepath = getDazPath(fileref)
file = None
if filepath:
struct = loadJson(filepath)
file = parseAssetFile(struct, fileref=fileref)
try:
return theAssets[ref]
except KeyError:
pass
else:
msg = ("Cannot open file:\n '%s' " % normalizePath(fileref))
reportError(msg, warnPaths=True, trigger=(3,4))
return None
LS.missingAssets[ref] = True
if strict and LS.useStrict:
msg =("Missing asset:\n '%s'\n" % ref +
"Fileref\n %s\n" % fileref +
"Filepath:\n '%s'\n" % filepath +
"File asset:\n %s\n" % file )
reportError(msg, warnPaths=True, trigger=(3,4))
return None
def getOldAsset(self, id):
global theAssets
ref = getRef(id, self.fileref)
try:
return theAssets[ref]
except KeyError:
pass
return self.getNewAsset(id, ref)
def getTypedAsset(self, id, type):
asset = self.getAsset(id)
if (asset is None or
type is None or
isinstance(asset,type)):
return asset
msg = (
"Asset of type %s not found:\n %s\n" % (type, id) +
"File ref:\n '%s'\n" % self.fileref
)
return reportError(msg, warnPaths=True)
def parseUrlAsset(self, struct, type=None):
if "url" not in struct.keys():
msg = ("URL asset failure: No URL.\n" +
"Type: %s\n" % type +
"File ref:\n '%s'\n" % self.fileref +
"Id: '%s'\n" % struct["id"] +
"Keys:\n %s\n" % list(struct.keys()))
reportError(msg, warnPaths=True, trigger=(2,3))
return None
asset = self.getTypedAsset(struct["url"], type)
if isinstance(asset, Asset):
asset.caller = self
asset.update(struct)
self.saveAsset(struct, asset)
return asset
elif asset is not None:
msg = ("Empty asset:\n %s " % struct["url"])
return reportError(msg, warnPaths=True)
else:
asset = self.getAsset(struct["url"])
msg = ("URL asset failure:\n" +
"URL: '%s'\n" % struct["url"] +
"Type: %s\n" % type +
"File ref:\n '%s'\n" % self.fileref +
"Found asset:\n %s\n" % asset)
return reportError(msg, warnPaths=True, trigger=(3,4))
return None
def saveAsset(self, struct, asset):
global theAssets
ref = ref2 = normalizeRef(asset.id)
if self.caller:
if "id" in struct.keys():
ref = getId(struct["id"], self.caller.fileref)
else:
print("No id", struct.keys())
try:
asset2 = theAssets[ref]
except KeyError:
asset2 = None
if asset2 and asset2 != asset:
msg = ("Duplicate asset definition\n" +
" Asset 1: %s\n" % asset +
" Asset 2: %s\n" % asset2 +
" Ref: %s\n" % ref)
return reportError(msg, trigger=(3,4))
theAssets[ref] = theAssets[ref2] = asset
return
if asset.caller:
ref2 = lowerPath(asset.caller.id) + "#" + struct["id"]
ref2 = normalizeRef(ref2)
if ref2 in theAssets.keys():
asset2 = theAssets[ref2]
if asset != asset2 and GS.verbosity > 1:
msg = ("Duplicate asset definition\n" +
" Asset 1: %s\n" % asset +
" Asset 2: %s\n" % asset2 +
" Caller: %s\n" % asset.caller +
" Ref 1: %s\n" % ref +
" Ref 2: %s\n" % ref2)
return reportError(msg)
else:
print("REF2", ref2)
print(" ", asset)
theAssets[ref2] = asset
#-------------------------------------------------------------
# Asset base class
#-------------------------------------------------------------
class Asset(Accessor):
def __init__(self, fileref):
Accessor.__init__(self, fileref)
self.id = None
self.url = None
self.name = None
self.label = None
self.type = None
self.parent = None
self.children = []
self.source = None
self.drivable = True
self.isSourced = False
def __repr__(self):
return ("<Asset %s t: %s r: %s>" % (self.id, self.type, self.rna))
def selfref(self):
return ("#" + self.id.rsplit("#", 2)[-1])
def getLabel(self, inst=None):
if inst and inst.label:
return inst.label
elif self.label:
return self.label
else:
return self.name
def getName(self):
if self.id is None:
return "None"
words = os.path.splitext(os.path.basename(self.id))
if len(words) == 2:
base,ext = words
else:
base,ext = words[0],None
string = base
if ext:
words = ext.split("#")
if len(words) > 1:
string = words[-1]
return getName(string)
def copySource(self, asset):
for key in dir(asset):
if hasattr(self, key) and key[0] != "_":
attr = getattr(self, key)
try:
setattr(asset, key, attr)
except RuntimeError:
pass
def copySourceFile(self, source):
global theAssets, theSources
file = source.rsplit("#", 1)[0]
asset = self.parseUrlAsset({"url": source})
if asset is None:
return None
old = asset.id.rsplit("#", 1)[0]
new = self.id.rsplit("#", 1)[0]
self.copySourceAssets(old, new)
if old not in theSources.keys():
theSources[old] = []
for other in theSources[old]:
self.copySourceAssets(other, new)
theSources[old].append(new)
return asset
def copySourceAssets(self, old, new):
nold = len(old)
nnew = len(new)
adds = []
assets = []
for key,asset in theAssets.items():
if key[0:nold] == old:
adds.append((new + key[nold:], asset))
for key,asset in adds:
if key not in theOtherAssets.keys():
theOtherAssets[key] = asset
assets.append(asset)
def parse(self, struct):
self.source = struct
if "id" in struct.keys():
self.id = getId(struct["id"], self.fileref)
else:
self.id = "?"
msg = ("Asset without id\nin file \"%s\":\n%s " % (self.fileref, struct))
reportError(msg, trigger=(1,2))
if "url" in struct.keys():
self.url = struct["url"]
elif "id" in struct.keys():
self.url = struct["id"]
if "type" in struct.keys():
self.type = struct["type"]
if "name" in struct.keys():
self.name = struct["name"]
elif "id" in struct.keys():
self.name = struct["id"]
elif self.url:
self.name = self.url
else:
self.name = "Noname"
if "label" in struct.keys():
self.label = struct["label"]
if "parent" in struct.keys():
self.parent = self.getAsset(struct["parent"])
if self.parent:
self.parent.children.append(self)
if "source" in struct.keys():
asset = self.copySourceFile(struct["source"])
if asset and not asset.isSourced:
self.copySource(asset)
asset.isSourced = True
return self
def update(self, struct):
for key,value in struct.items():
if key == "type":
self.type = value
elif key == "name":
self.name = value
elif key == "url":
self.url = value
elif key == "label":
self.label = value
elif key == "parent":
if self.parent is None and self.caller:
self.parent = self.caller.getAsset(struct["parent"])
elif key == "channel":
self.value = getCurrentValue(value)
return self
def build(self, context, inst=None):
return
raise NotImplementedError("Cannot build %s yet" % self.type)
def buildData(self, context, inst, cscale, center):
print("BDATA", self)
if self.rna is None:
self.build(context)
def postprocess(self, context, inst):
return
def connect(self, struct):
pass
def getAssetFromStruct(struct, fileref):
id = getId(struct["id"], fileref)
try:
return theAssets[id]
except KeyError:
return None
def getExistingFile(fileref):
global theAssets
ref = normalizeRef(fileref)
if ref in theAssets.keys():
#print("Reread", fileref, ref)
return theAssets[ref]
else:
return None
#-------------------------------------------------------------
#
#-------------------------------------------------------------
def storeAsset(asset, fileref):
global theAssets
theAssets[fileref] = asset
def getId(id, fileref):
id = normalizeRef(id)
if id[0] == "/":
return id
else:
return fileref + "#" + id
def getRef(id, fileref):
id = normalizeRef(id)
if id[0] == "#":
return fileref + id
else:
return id
def lowerPath(path):
#return path
if len(path) > 0 and path[0] == "/":
words = path.split("#",1)
if len(words) == 1:
return tolower(words[0])
else:
return tolower(words[0]) + "#" + words[1]
else:
return path
def normalizeRef(id):
from urllib.parse import quote
ref= lowerPath(undoQuote(quote(id)))
return ref.replace("//", "/")
def undoQuote(ref):
ref = ref.replace("%23","#").replace("%25","%").replace("%2D", "-").replace("%2E", ".").replace("%2F", "/").replace("%3F", "?")
return ref.replace("%5C", "/").replace("%5F", "_").replace("%7C", "|")
def clearAssets():
global theAssets, theOtherAssets, theSources, theRnas
theAssets = {}
theOtherAssets = {}
theSources = {}
theRnas = {}
clearAssets()
#-------------------------------------------------------------
# Paths
#-------------------------------------------------------------
def setDazPaths(scn):
from .error import DazError
global theDazPaths
filepaths = []
for path in GS.getDazPaths():
if path:
if not os.path.exists(path):
msg = ("The DAZ library path\n" +
"%s \n" % path +
"does not exist. Check and correct the\n" +
"Paths to DAZ library section in the Settings panel." +
"For more details see\n" +
"http://diffeomorphic.blogspot.se/p/settings-panel_17.html. ")
print(msg)
raise DazError(msg)
else:
filepaths.append(path)
if os.path.isdir(path):
for fname in os.listdir(path):
if "." not in fname:
numname = "".join(fname.split("_"))
if numname.isdigit():
subpath = path + "/" + fname
filepaths.append(subpath)
theDazPaths = filepaths
def fixBrokenPath(path):
"""
many asset file paths assume a case insensitive file system, try to fix here
:param path:
:return:
"""
path_components = []
head = path
while True:
head, tail = os.path.split(head)
if tail != "":
path_components.append(tail)
else:
if head != "":
path_components.append(head)
path_components.reverse()
break
check = path_components[0]
for pc in path_components[1:]:
if not os.path.exists(check):
return check
cand = os.path.join(check, pc)
if not os.path.exists(cand):
corrected = [f for f in os.listdir(check) if f.lower() == pc.lower()]
if len(corrected) > 0:
cand = os.path.join(check, corrected[0])
else:
msg = ("Broken path: '%s'\n" % path +
" Folder: '%s'\n" % check +
" File: '%s'\n" % pc +
" Files: %s" % os.listdir(check))
reportError(msg, trigger=(3,4))
check = cand
return check
def normalizePath(ref):
from urllib.parse import unquote
return unquote(ref)
def getRelativeRef(ref):
global theDazPaths
path = normalizePath(ref)
for dazpath in theDazPaths:
n = len(dazpath)
if path[0:n].lower() == dazpath.lower():
return ref[n:]
print("Not a relative path:\n '%s'" % path)
return ref
def getDazPath(ref):
global theDazPaths
path = normalizePath(ref)
if path[2] == ":":
filepath = path[1:]
if GS.verbosity > 2:
print("Load", filepath)
elif path[0] == "/":
for folder in theDazPaths:
filepath = folder + path
if os.path.exists(filepath):
return filepath
elif GS.caseSensitivePaths:
filepath = fixBrokenPath(filepath)
if os.path.exists(filepath):
return filepath
else:
filepath = path
if os.path.exists(filepath):
if GS.verbosity > 2:
print("Found", filepath)
return filepath
LS.missingAssets[ref] = True
msg = ("Did not find path:\n\"%s\"\nRef:\"%s\"" % (filepath, ref))
reportError(msg, trigger=(3,4))
return None
| 1.023438 | 1 |
Tutorial/channels.py | ccicconetti/netsquid | 7 | 12762052 | <gh_stars>1-10
"""Example inspired from Netsquid's Modelling of network components tutorial:
https://docs.netsquid.org/latest-release/tutorial.components.html
Send a message on a channel, start simulation, then receive the message
and get the delay, which depends on the delay model set on that channel.
Note that the receive() method of a channel only returns the elements
that are available at the given simulation time: by calling it after the
simulation has started, we collect _all_ the messages ever sent with
the send() method.
"""
import numpy as np
import logging
import pydynaa
import netsquid as ns
from netsquid.components import Channel
from netsquid.components.models.delaymodels import (
FixedDelayModel,
GaussianDelayModel,
FibreDelayModel,
)
from netsquid.components.models.qerrormodels import FibreLossModel
from netsquid.components.qchannel import QuantumChannel
def single_run(channel_model, run_id):
# clear from previous run
ns.sim_reset()
rng = np.random.default_rng(seed=run_id)
ns.set_random_state(seed=run_id)
channel = Channel(name="ExampleChannel", length=3.0)
if channel_model == "fixed_delay":
fixed_model = FixedDelayModel(delay=10)
channel.models["delay_model"] = fixed_model
elif channel_model == "gaussian_delay":
gaussian_model = GaussianDelayModel(delay_mean=10, delay_std=1, rng=rng)
channel.models["delay_model"] = gaussian_model
elif channel_model == "fibre":
fibre_model = FibreDelayModel()
fibre_model.properties["c"] = 3e8
channel.models["delay_model"] = fibre_model
else:
raise Exception(f"unknown channel model {channel_model}")
channel.send("hi")
stats = ns.sim_run()
__, delay = channel.receive()
logging.info(stats)
return delay
# configuration
num_repetitions = 10
channel_models = ["fixed_delay", "gaussian_delay", "fibre"]
logging.basicConfig(level=logging.WARN)
for channel_model in channel_models:
delays = []
for run in range(num_repetitions):
delays.append(single_run(channel_model, run))
print(
"model {}, delays: {}".format(
channel_model, ",".join([f"{x:.2f}" for x in delays])
)
)
| 3.015625 | 3 |
tests/djongo_tests/test_project/main_test/admin.py | vaimdev/djongo | 0 | 12762053 | <reponame>vaimdev/djongo
from django.contrib import admin
from main_test.models.array_models import ArrayEntry
from main_test.models.basic_models import Entry, Author, Blog
from main_test.models.embedded_models import EmbeddedEntry, EmbeddedDateEntry
from main_test.models.misc_models import ListEntry, DictEntry
from main_test.models.reference_models import ReferenceEntry, ReferenceAuthor
# Register your models here.
# admin.site.register(BlogPost)
# admin.site.register(main_test2)
# admin.site.register(MultipleBlogPosts)
admin.site.register(Author)
admin.site.register(Blog)
admin.site.register(Entry)
admin.site.register(ArrayEntry)
admin.site.register(EmbeddedEntry)
admin.site.register(EmbeddedDateEntry)
admin.site.register(ReferenceEntry)
admin.site.register(ReferenceAuthor)
admin.site.register(ListEntry)
admin.site.register(DictEntry)
| 1.617188 | 2 |
src/id3vx/codec.py | suspectpart/id3vx | 0 | 12762054 | class Codec:
ENCODING: str
SEPARATOR: bytes
WIDTH: int
@staticmethod
def default():
"""Default codec specified for id3v2.3 (Latin1 / ISO 8859-1)"""
return _CODECS.get(0)
@staticmethod
def get(key):
"""Get codec by magic number specified in id3v2.3
0: Latin1 / ISO 8859-1
1: UTF-16
2: UTF-16BE
3: UTF-8
"""
return _CODECS[key]
def read(self, stream, length=1):
"""Read chars from stream, according to encoding"""
return stream.read(self.WIDTH * length)
def decode(self, byte_string):
"""Decode byte_string with given encoding"""
return byte_string.decode(self.ENCODING)
def encode(self, byte_string):
"""Decode byte_string with given encoding"""
return byte_string.encode(self.ENCODING) + self.SEPARATOR
def __str__(self):
return self.ENCODING
def __eq__(self, other):
return str(self) == str(other)
class Latin1Codec(Codec):
ENCODING = "latin1"
SEPARATOR = b'\x00'
WIDTH = 1
class UTF8Codec(Codec):
ENCODING = "utf_8"
SEPARATOR = b'\x00'
WIDTH = 1
class UTF16BECodec(Codec):
ENCODING = "utf_16_be"
SEPARATOR = b'\x00\x00'
WIDTH = 2
class UTF16Codec(Codec):
ENCODING = "utf_16"
SEPARATOR = b'\x00\x00'
WIDTH = 2
_CODECS = {
0: Latin1Codec(),
1: UTF16Codec(),
2: UTF16BECodec(),
3: UTF8Codec(),
}
| 3.515625 | 4 |
cracking-the-code-interview/arrays/07-rotate-matrix.py | vtemian/interviews-prep | 8 | 12762055 | <filename>cracking-the-code-interview/arrays/07-rotate-matrix.py
from typing import List
def ppmatrix(matrix: List[List[int]]):
for line in matrix:
print(line)
print('######################')
def rotate_matrix(matrix: List[List[int]]) -> List[List[int]]:
quadrant = 0
while quadrant <= len(matrix) // 2:
count = 0
end = len(matrix) // ((quadrant * 2) or 1) - 1
while count < end:
l_index = quadrant
c_index = quadrant + count
value = matrix[quadrant][quadrant + count]
value, matrix[quadrant + count][end - quadrant] = matrix[quadrant + count][end - quadrant], value
ppmatrix(matrix)
value, matrix[end - quadrant][end - quadrant - count] = matrix[end - quadrant][end - quadrant - count], value
ppmatrix(matrix)
value, matrix[end - quadrant - count][quadrant] = matrix[end - quadrant - count][quadrant], value
ppmatrix(matrix)
value, matrix[quadrant][quadrant + count] = matrix[quadrant][quadrant + count], value
ppmatrix(matrix)
count += 1
quadrant += 1
return matrix
for test_case, expected_result in [
(
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
], [
[7, 4, 1],
[8, 5, 2],
[9, 6, 3],
]
),
(
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
], [
[13, 9, 5, 1],
[14, 10, 6, 2],
[15, 11, 7, 3],
[16, 12, 8, 4]
]
),
]:
result = rotate_matrix(test_case)
assert True
#assert result == expected_result, "{} != {}".format(result, expected_result)
| 3.640625 | 4 |
8_delete.py | ghwls12356/RPA | 1 | 12762056 | <gh_stars>1-10
from openpyxl import load_workbook
wb = load_workbook("sample.xlsx")
ws = wb.active
#ws.delete_rows(8) # 8번째 줄에 있는 7번 학생 데이터 삭제
ws.delete_rows(8, 3) # 8번째 줄부터 7,8,9 번 학생 데이터 3줄 삭제
wb.save("sample_delete_row.xlsx")
#ws.delete_cols(2) # 2번째 열(B) 삭제
ws.delete_cols(2, 2) # 2번째 열부터 2열 삭제
wb.save("sample_delete_cols.xlsx") | 2.703125 | 3 |
classCoinMarket.py | cris2123/CryptoTools | 0 | 12762057 | #!/usr/bin/python
import requests
import requests_cache
import time
import os
from itertools import chain
from sys import exit
from userExceptions import InvalidType, NotCoinSelected, FiatInvalidType, FiatNotValid
class coinMarket:
def __init__(self,fiat=""):
""" For now will be empty
fiat: A set of fiat currencies used to check the value of our crypto
againts it.
"""
self.fiat={"AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK",\
"EUR","GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN",\
"MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB",\
"TRY", "TWD", "ZAR"}
self.response=""
self.coinNames={}
self.wholeContent=""
self._setCache()
#requests_cache.install_cache(cache_name='coinMarket_cache', backend='sqlite', expire_after=120)
## funcion para obtener todos las monedas en coin market
now = time.ctime(int(time.time()))
self.getCoinNames(fiat)
def _setCache(self):
folderName="/CryptoToolCache"
cacheFileName="coinMarket_cache"
root_os= os.path.abspath(os.sep)
cache_dir= os.path.join(root_os,"tmp"+folderName)
if(not os.path.exists(cache_dir)):
os.makedirs(cache_dir, exist_ok=True)
requests_cache.install_cache(
cache_name=os.path.join(cache_dir, cacheFileName),backend='sqlite',expire_after=120
)
def _checkValidFiat(self,fiat):
""" Source code to check if fiat currency is valid"""
currencyFiat=""
try:
if(fiat!=""):
if(type(fiat)!=str):
print("Fiat invalid type")
raise FiatInvalidType
else:
if(fiat not in self.fiat):
raise FiatNotValid
else:
currencyFiat="convert="+fiat
except FiatInvalidType:
print("Fiat type must be a string")
exit(0)
except FiatNotValid:
print("Fiat type not available")
exit(0)
else:
return(currencyFiat)
def _checkValidCoin(self,coin):
isValidCoin=False
try:
if not coin:
raise NotCoinSelected
else:
if(type(coin) is not (str)):
raise InvalidType
except NotCoinSelected:
print("You need to input a coin")
exit(0)
except InvalidType:
print("Coin value must be a string")
exit(0)
else:
isValidCoin=True
return(isValidCoin)
def getCoinNames(self,fiat=""):
allData=self.getAllCoins(fiat=fiat)
with open("currencyData.json",'w') as jsonFile:
jsonFile.write(str(allData))
if(allData):
for data in allData:
self.coinNames[data["name"]]=(data["symbol"],data["id"])
self.wholeContent=allData
def getAllCoins(self,fiat=""):
currencyFiat=self._checkValidFiat(fiat)
if(currencyFiat):
URL="https://api.coinmarketcap.com/v1/ticker/?"+str(currencyFiat)+"&limit=0"
else:
URL="https://api.coinmarketcap.com/v1/ticker/?limit=0"
try:
now = time.ctime(int(time.time()))
self.response=requests.get(URL)
print ("Time: {0} / Used Cache: {1}".format(now, self.response.from_cache))
if(self.response.status_code != requests.codes.ok):
self.response.raise_for_status()
else:
return(self.response.json())
except Exception as e:
print(e)
exit(0)
def getCoin(self,coin,fiat=""):
""" Get a specific coin data that do you want to explore
coin: string value which represent a coin you could input. Coin abreviation
or coin name (work on in soon)
"""
isValidCoin = self._checkValidCoin(coin)
currencyFiat = self._checkValidFiat(fiat)
if(isValidCoin):
if coin in self.coinNames.keys():
(_,coinId)=self.coinNames[str(coin)]
else:
results =list(chain.from_iterable( (coinList[1], coin in coinList )
for coinList in self.coinNames.values() if coin in coinList ))
if(len(results)!=0):
coinId=results[0]
else:
coinId=None
print("La moneda no existe")
if(currencyFiat):
URL="https://api.coinmarketcap.com/v1/ticker/"+str(coinId)+"/?"+currencyFiat
else:
URL="https://api.coinmarketcap.com/v1/ticker/"+str(coinId)+"/"
try:
self.response=requests.get(URL)
if(self.response.status_code != requests.codes.ok):
self.response.raise_for_status()
else:
data=self.response.json()
self.parseData(data[0],fiat)
except Exception as e:
print(e)
def getListCoins(self,coins,fiat=""):
arrayValidCoins=[]
for coin in coins:
if coin in self.coinNames.keys():
arrayValidCoins.append((self.coinNames[coin][1],True))
else:
results =list(chain.from_iterable( (coinList[1], coin in coinList )
for coinList in self.coinNames.values() if coin in coinList ))
if(len(results)==0):
arrayValidCoins.append((coin,False))
else:
arrayValidCoins.append(tuple(results))
currencyFiat=self._checkValidFiat(fiat)
coinInformation=[]
for tupleCoin in arrayValidCoins:
if(tupleCoin[1]==True):
for item in self.wholeContent:
if(item["id"]==tupleCoin[0]):
coinInformation.append(item)
else:
#coinInformation.append("coin: "+ tupleCoin[0]+" is not a valid one")
print("coin: "+ tupleCoin[0]+" is not a valid one")
for coin in coinInformation:
self.parseData(coin,fiat)
def getGlobalData(self,fiat=""):
currencyFiat=self._checkValidFiat(fiat)
if(currencyFiat):
URL="https://api.coinmarketcap.com/v1/global/"+"?"+currencyFiat
else:
URL="https://api.coinmarketcap.com/v1/global/"
try:
self.response = requests.get(URL)
if(self.response.status_code != requests.codes.ok):
self.response.raise_for_status()
else:
data = self.response.json()
with open("global.json",'w') as jsonFile:
jsonFile.write(str(data))
print("\n")
print("Total Market Cap USD: "+str(data["total_market_cap_usd"]))
if(fiat!=""):
market_cap="total_market_cap_" + fiat.lower()
print("Total Market Cap "+str(fiat)+": "+ str(data[market_cap]))
print("Active currencies: "+str(data["active_currencies"]))
print("Active assets: "+str(data["active_assets"]))
print("Active Markets: "+str(data["active_markets"]))
print("\n")
except Exception as e:
print(e)
exit(0)
def parseData(self,coin,fiat=""):
print("\n")
print("ID: "+str(coin["id"]))
print("Name: "+ str(coin["name"]))
print("Symbol: " +str(coin["symbol"]))
print("Rank: "+str(coin["rank"]))
print("Available Supply: "+str(coin["available_supply"]))
print("Total Supply: "+str(coin["total_supply"]))
print("Price USD: " +str(coin["price_usd"]))
print("Price BTC: "+str(coin["price_btc"]))
print("Market Cap USD: "+str(coin["market_cap_usd"]))
print("Percent Change for 1 hour : "+str(coin["percent_change_1h"]))
print("Percent Change for 24 hour : "+str(coin["percent_change_24h"]))
print("Percent Change for 7 days : "+str(coin["percent_change_7d"]))
if(fiat!=""):
price_string="price_"
market_string="market_cap_"
lowerFiat=fiat.lower()
price_string=price_string+lowerFiat
market_string=market_string+lowerFiat
print("Price "+str(fiat)+": "+str(coin[price_string]))
print("Market Cap "+str(fiat)+": "+str(coin[market_string]))
print("\n")
| 2.59375 | 3 |
sample-ecommerce/sampleecommerce/tests/functional/test_stroller2_manage_user_address.py | axant/tgapp-stroller2 | 0 | 12762058 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from nose.tools import eq_, ok_
from sampleecommerce.tests import TestController
from sampleecommerce.model import DBSession
from tgext.pluggable import app_model
class TestManageUserAddressController(TestController):
def setUp(self):
super(TestManageUserAddressController, self).setUp()
self.address = app_model.UserAddress(
user_id=app_model.User.query.find({'user_name': 'manager'}).first()._id,
shipping_address={
'receiver': '<NAME>',
'address': 'Viale Roma 99',
'city': 'Roma',
'province': 'RM',
'state': 'Lazio',
'country': 'Italy',
'zip': '20049',
'details': '<NAME>'
}
)
DBSession.flush()
def test_create_and_index_address(self):
response = self.app.get(
'/commerce/manage/address/new',
extra_environ=self.admin_environ,
status=200
)
form = response.form
form['receiver'] = 'Mr. Mister',
form['address'] = 'Viale Milano 69',
form['city'] = 'Milano',
form['province'] = 'MI',
form['state'] = 'Lombardy',
form['country'] = 'Italy',
form['zip'] = '60049',
form['details'] = '<NAME>'
submission = form.submit(
extra_environ=self.admin_environ,
status=302
)
redirection = submission.follow(
extra_environ=self.admin_environ
)
redirection.showbrowser()
assert 'Viale Milano 69' in redirection.body.decode('utf-8')
assert 'Viale Roma 99' in redirection.body.decode('utf-8')
assert 'New address' in redirection.body.decode('utf-8')
def test_edit_address(self):
response = self.app.get(
'/commerce/manage/address/edit',
extra_environ=self.admin_environ,
params=dict(address_id=str(self.address._id)),
status=200
)
form = response.form
form['receiver'] = self.address.shipping_address['receiver'] + ' modificato'
form['address'] = self.address.shipping_address['address'] + ' modificato'
form['city'] = self.address.shipping_address['city'] + ' modificato'
form['province'] = self.address.shipping_address['province'] + ' modificato'
form['state'] = self.address.shipping_address['state'] + ' modificato'
form['country'] = self.address.shipping_address['country'] + ' modificato'
form['zip'] = self.address.shipping_address['zip'] + ' modificato'
form['details'] = self.address.shipping_address['details'] + ' modificato'
submission = form.submit(
extra_environ=self.admin_environ,
status=302
)
redirection = submission.follow(
extra_environ=self.admin_environ,
status=200
)
assert 'Viale Roma 99 modificato' in redirection.body.decode('utf-8')
assert 'Address updated succesfully' in redirection.body.decode('utf-8')
def test_delete_address(self):
response = self.app.get(
'/commerce/manage/address/delete',
params=dict(address_id=str(self.address._id)),
extra_environ=self.admin_environ,
status=302
)
redirection = response.follow(
extra_environ=self.admin_environ,
status=200
)
assert 'Viale Roma 99' not in redirection.body.decode('utf-8')
assert 'Address deleted' in redirection.body.decode('utf-8')
| 1.945313 | 2 |
Frame/gui/ProgressBar.py | PyRectangle/GreyRectangle | 3 | 12762059 | from pygameImporter import pygame
from Frame.baseFunctions import *
from Frame.gui.Gui import Gui
class ProgressBar(Gui):
def __init__(self, fillPercentage, fillColor, *args, **kwargs):
super().__init__(*args, **kwargs)
output("Progress Bar: Creating " + self.text + " progress bar...", "debug")
self.progress = fillPercentage
self.fillColor = fillColor
self.touchable = False
def setProgress(self, progress):
output("Progress Bar: Setting progress to " + str(progress) + "%...", "debug")
if progress > 100:
progress = 100
self.progress = progress
def render(self):
super().render(False)
output("Progress Bar: Getting points for drawing...", "complete")
points = [[self.coords[0] + 1, self.coords[1] + 1], [self.coords[0] + self.coords[2] * self.progress / 100 - 1, self.coords[1] + 1],
[self.coords[0] + self.coords[2] * self.progress / 100 - 1, self.coords[1] + self.coords[3] - 1],
[self.coords[0] + 1, self.coords[1] + self.coords[3] - 1]]
output("Progress Bar: Drawing...", "complete")
pygame.draw.polygon(self.window.surface, self.fillColor, points)
output("Progress Bar: Rendering text...", "complete")
try:
if not self.writable and self.text == "":
self.renderObj.text(self.fontFile, int(self.textSize + self.height - self.startCoords[3]), self.enterText, self.antialias, self.textColor, None,
self.window.surface, width = self.width, height = self.height, addX = self.x, addY = self.y)
except AttributeError:
self.renderObj.text(self.fontFile, int(self.textSize + self.height - self.startCoords[3]), self.text, self.antialias, self.textColor, None,
self.window.surface, width = self.width, height = self.height, addX = self.x, addY = self.y)
| 2.96875 | 3 |
src/medius/mediuspackets/disbandclan.py | Metroynome/robo | 8 | 12762060 | from enums.enums import MediusEnum, CallbackStatus
from utils import utils
from medius.mediuspackets.disbandclanresponse import DisbandClanResponseSerializer
class DisbandClanSerializer:
data_dict = [
{'name': 'mediusid', 'n_bytes': 2, 'cast': None},
{'name': 'message_id', 'n_bytes': MediusEnum.MESSAGEID_MAXLEN, 'cast': None},
{'name': 'session_key', 'n_bytes': MediusEnum.SESSIONKEY_MAXLEN, 'cast': None},
{'name': 'buf', 'n_bytes': 2, 'cast': None},
{'name': 'clan_id', 'n_bytes': 4, 'cast': utils.bytes_to_int_little},
]
class DisbandClanHandler:
def process(self, serialized, monolith, con):
client_manager = monolith.get_client_manager()
client_manager.disband_clan(serialized['clan_id'])
return [DisbandClanResponseSerializer.build(
serialized['message_id'],
CallbackStatus.SUCCESS
)] | 2.421875 | 2 |
coding-exercises-avairds/week2-image-operation/part2-intermediate/image-bitwise.py | KyawThuHtun/OpenCV-with-Python | 2 | 12762061 | <reponame>KyawThuHtun/OpenCV-with-Python
import numpy as np
import cv2 as cv
rectangle = np.zeros((200, 200), dtype="uint8")
cv.rectangle(rectangle, (25, 25), (175, 175), 255, -1)
circle = np.zeros((200, 200), dtype="uint8")
cv.circle(circle, (100, 100), 100, 255, -1)
AND = cv.bitwise_and(rectangle,circle)
cv.imshow("AND", AND)
OR = cv.bitwise_or(rectangle, circle)
cv.imshow("OR", OR)
XOR = cv.bitwise_xor(rectangle, circle)
cv.imshow("XOR", XOR)
NOT = cv.bitwise_not(rectangle, circle)
cv.imshow("NOT", NOT)
cv.waitKey()
cv.destroyAllWindows()
| 3.28125 | 3 |
docs/conf.py | michelp/cxxheaderparser | 12 | 12762062 | <filename>docs/conf.py
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import os
import pkg_resources
# -- Project information -----------------------------------------------------
project = "cxxheaderparser"
copyright = "2020-2021, <NAME>"
author = "<NAME>"
# The full version, including alpha/beta/rc tags
release = pkg_resources.get_distribution("cxxheaderparser").version
# -- RTD configuration ------------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx_autodoc_typehints",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = "default"
always_document_param_types = True
| 1.851563 | 2 |
rgtk/FSM.py | SavagePencil/RetroGraphicsToolkit | 8 | 12762063 | <filename>rgtk/FSM.py
from typing import Optional
class State:
@staticmethod
def on_enter(context: object) -> Optional['State']:
# Default to no transition.
return None
@staticmethod
def on_update(context: object) -> Optional['State']:
# Default to no transition
return None
@staticmethod
def on_exit(context: object):
# Exiting can't initiate a transition
return
class FSM:
def __init__(self, context: object):
self._context = context
self._current_state = None
def start(self, initial_state: State):
# Enter the initial state.
self.transition_state(initial_state)
def get_current_state(self) -> State:
return self._current_state
def transition_state(self, new_state: State):
while new_state != None:
# Exit the current state.
if self._current_state is not None:
self._current_state.on_exit(self._context)
# Update current
self._current_state = new_state
# Enter the now-current state
new_state = self._current_state.on_enter(self._context)
def update(self):
new_state = self._current_state.on_update(self._context)
if new_state is not None:
self.transition_state(new_state)
| 2.78125 | 3 |
trainval.py | prlz77/haven-ai | 0 | 12762064 | <reponame>prlz77/haven-ai<filename>trainval.py
import tqdm, job_config
import os
from haven import haven_examples as he
from haven import haven_wizard as hw
from haven import haven_results as hr
# 1. define the training and validation function
def trainval(exp_dict, savedir, args):
"""
exp_dict: dictionary defining the hyperparameters of the experiment
savedir: the directory where the experiment will be saved
args: arguments passed through the command line
"""
# 2. Create data loader and model
train_loader = he.get_loader(name=exp_dict['dataset'], split='train',
datadir=os.path.dirname(savedir),
exp_dict=exp_dict)
model = he.get_model(name=exp_dict['model'], exp_dict=exp_dict)
# 3. load checkpoint
chk_dict = hw.get_checkpoint(savedir)
# 4. Add main loop
for epoch in tqdm.tqdm(range(chk_dict['epoch'], 10),
desc="Running Experiment"):
# 5. train for one epoch
train_dict = model.train_on_loader(train_loader, epoch=epoch)
# 6. get and save metrics
score_dict = {'epoch':epoch, 'acc': train_dict['train_acc'],
'loss':train_dict['train_loss']}
chk_dict['score_list'] += [score_dict]
images = model.vis_on_loader(train_loader)
hw.save_checkpoint(savedir, score_list=chk_dict['score_list'], images=[images])
print('Experiment done\n')
# 7. create main
if __name__ == '__main__':
# 8. define a list of experiments
exp_list = []
for lr in [1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5]:
exp_list += [{'lr':lr, 'dataset':'mnist', 'model':'linear'}]
# 9. Launch experiments using magic command
hw.run_wizard(func=trainval, exp_list=exp_list, job_config=job_config.JOB_CONFIG)
| 2.53125 | 3 |
src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/elastic_pool_performance_level_capability_py3.py | Mannan2812/azure-cli-extensions | 207 | 12762065 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ElasticPoolPerformanceLevelCapability(Model):
"""The Elastic Pool performance level capability.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar performance_level: The performance level for the pool.
:vartype performance_level:
~azure.mgmt.sql.models.PerformanceLevelCapability
:ivar sku: The sku.
:vartype sku: ~azure.mgmt.sql.models.Sku
:ivar supported_license_types: List of supported license types.
:vartype supported_license_types:
list[~azure.mgmt.sql.models.LicenseTypeCapability]
:ivar max_database_count: The maximum number of databases supported.
:vartype max_database_count: int
:ivar included_max_size: The included (free) max size for this performance
level.
:vartype included_max_size: ~azure.mgmt.sql.models.MaxSizeCapability
:ivar supported_max_sizes: The list of supported max sizes.
:vartype supported_max_sizes:
list[~azure.mgmt.sql.models.MaxSizeRangeCapability]
:ivar supported_per_database_max_sizes: The list of supported per database
max sizes.
:vartype supported_per_database_max_sizes:
list[~azure.mgmt.sql.models.MaxSizeRangeCapability]
:ivar supported_per_database_max_performance_levels: The list of supported
per database max performance levels.
:vartype supported_per_database_max_performance_levels:
list[~azure.mgmt.sql.models.ElasticPoolPerDatabaseMaxPerformanceLevelCapability]
:ivar status: The status of the capability. Possible values include:
'Visible', 'Available', 'Default', 'Disabled'
:vartype status: str or ~azure.mgmt.sql.models.CapabilityStatus
:param reason: The reason for the capability not being available.
:type reason: str
"""
_validation = {
'performance_level': {'readonly': True},
'sku': {'readonly': True},
'supported_license_types': {'readonly': True},
'max_database_count': {'readonly': True},
'included_max_size': {'readonly': True},
'supported_max_sizes': {'readonly': True},
'supported_per_database_max_sizes': {'readonly': True},
'supported_per_database_max_performance_levels': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'performance_level': {'key': 'performanceLevel', 'type': 'PerformanceLevelCapability'},
'sku': {'key': 'sku', 'type': 'Sku'},
'supported_license_types': {'key': 'supportedLicenseTypes', 'type': '[LicenseTypeCapability]'},
'max_database_count': {'key': 'maxDatabaseCount', 'type': 'int'},
'included_max_size': {'key': 'includedMaxSize', 'type': 'MaxSizeCapability'},
'supported_max_sizes': {'key': 'supportedMaxSizes', 'type': '[MaxSizeRangeCapability]'},
'supported_per_database_max_sizes': {'key': 'supportedPerDatabaseMaxSizes', 'type': '[MaxSizeRangeCapability]'},
'supported_per_database_max_performance_levels': {'key': 'supportedPerDatabaseMaxPerformanceLevels', 'type': '[ElasticPoolPerDatabaseMaxPerformanceLevelCapability]'},
'status': {'key': 'status', 'type': 'CapabilityStatus'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(self, *, reason: str=None, **kwargs) -> None:
super(ElasticPoolPerformanceLevelCapability, self).__init__(**kwargs)
self.performance_level = None
self.sku = None
self.supported_license_types = None
self.max_database_count = None
self.included_max_size = None
self.supported_max_sizes = None
self.supported_per_database_max_sizes = None
self.supported_per_database_max_performance_levels = None
self.status = None
self.reason = reason
| 1.695313 | 2 |
BAK_CATCH_PLANNED_COURSE.py | ghy20001114/zucc_xk_ZhenFang | 25 | 12762066 | # coding=utf-8
from bs4 import BeautifulSoup
import copy
import time
import json
import LOGIN
import MENU
class PlannedCourseInfo:
def __init__(self, main_num=None, name=None, code=None, margin=None, detail=None, url=None, course_dic=None):
if course_dic is None:
self.num = str(main_num)
self.name = str(name)
self.code = str(code)
self.margin = str(margin)
self.url = url
self.detail = copy.deepcopy(detail)
else:
self.num = course_dic["num"]
self.name = course_dic["name"]
self.code = course_dic["code"]
self.margin = course_dic["margin"]
self.url = course_dic["url"]
self.detail = course_dic["detail"]
def show_course_summary(self):
print("主编号:" + self.num
+ "\t名称:" + self.name
+ "\t代码:" + self.code)
def show_course_info(self):
for item in self.detail:
print(" ∟____ 辅编号:" + item["secondary_num"] + "\t教师:" + item["teacher"]
+ "\t时间:" + item["time"])
# print(self.code)
def to_json(self):
"""
将本类的数据转换为一个json,并返回字符串
"""
js = {"name": self.name, "num": self.num, "code": self.code, "margin": self.margin, "url": self.url,
"detail": self.detail}
return json.dumps(js)
class PlannedCourse:
"""
思路:
1:登录
2:进入选课界面
3:抓取课程信息并保存
4:用户输入想要抢的一门或几门课程
5:开始抢课
"""
def __init__(self, account):
"""初始化登录"""
self.account = account
self.english_course = []
self.professional_course = []
self.target = ""
def init_menu(self):
"""输出菜单,并输入想要抢的课程"""
menu_dic = {
"-1": "更新数据(需要等待一分半左右)",
"1": "本专业课程",
"2": "大学英语扩展课",
"0": "退出",
}
menu = MENU.MENU(menu_dic=menu_dic)
menu.print_list()
while True:
_key = input(">>>")
if int(_key) == 1:
# 设置本专业课程target
self.get_professional_course()
print("输入课程编号选择课程,0返回")
for item in self.professional_course:
item.show_course_summary()
length = len(self.professional_course)
while True:
i_key = input("(主编号)>>>")
if 0 < int(i_key) <= length:
print("你选择了", self.professional_course[int(i_key) - 1].name)
self.professional_course[int(i_key) - 1].show_course_info()
item_length = len(self.professional_course[int(i_key) - 1].detail)
while True:
j_key = input("(辅编号)>>>")
if 1 <= int(j_key) <= item_length:
detail = self.professional_course[int(i_key) - 1].detail[int(j_key) - 1]
print("你选择了: 辅编号:", detail["secondary_num"], "\t教师:", detail["teacher"],
"\t时间:", detail["time"])
tmp = i_key + ":" + j_key
self.target = tmp
self.attack_professional()
return
elif int(j_key) == 0:
break
else:
print("请输入正确的数字")
elif int(i_key) == 0:
break
elif int(i_key) == -1:
self.update_course()
else:
print("请输入正确的数字")
elif int(_key) == 2:
# 设置英语扩展课课程target
self.get_english_course()
print("输入课程编号选择课程,0返回")
for item in self.english_course:
item.show_course_summary()
length = len(self.english_course)
while True:
i_key = input("(主编号)>>>")
if 0 < int(i_key) <= length:
print("你选择了", self.english_course[int(i_key) - 1].name)
self.english_course[int(i_key) - 1].show_course_info()
item_length = len(self.english_course[int(i_key) - 1].detail)
while True:
j_key = input("(辅编号)>>>")
if 1 <= int(j_key) <= item_length:
detail = self.english_course[int(i_key) - 1].detail[int(j_key) - 1]
print("你选择了: 辅编号:", detail["secondary_num"], "\t教师:", detail["teacher"],
"\t时间:", detail["time"])
tmp = i_key + ":" + j_key
self.target = tmp
self.attack_english()
return
elif int(j_key) == 0:
break
else:
print("请输入正确的数字")
elif int(i_key) == 0:
break
elif int(i_key) == -1:
self.update_course()
else:
print("请输入正确的数字")
# elif int(_key) == 3:
# pass
elif int(_key) == -1:
self.update_course()
elif int(_key) == 0:
return
else:
print("请输入正确的数字")
def __catch_view_state(self):
"""抓取 HTML中的 VIEWSTATE"""
url = LOGIN.ZUCC.PlanCourageURL + "?xh=" + self.account.account_data[
"username"] + "&xm=" + self.account.name + "&gnmkdm=N121101"
header = LOGIN.ZUCC.InitHeader
header["Referer"] = LOGIN.ZUCC.PlanCourageURL + "?xh=" + self.account.account_data["username"]
response = self.account.session.get(url=url, headers=header)
while response.status_code == 302:
response = self.account.session.get(url=url, headers=header)
time.sleep(0.2)
self.account.soup = BeautifulSoup(response.text, "lxml")
# print(response.status_code)
def __enter_english_page(self):
"""进入计划内选课--英语页面,为抓取数据做准备"""
self.__catch_view_state()
url = LOGIN.ZUCC.PlanCourageURL + "?xh=" + self.account.account_data["username"]
post_data = {"__EVENTTARGET": "", "__EVENTARGUMENT": "", "__LASTFOCUS": "", "__VIEWSTATEGENERATOR": "4842AF95",
"zymc": "0121%E8%AE%A1%E7%AE%97%E6%9C%BA%E7%A7%91%E5%AD%A6%E4%B8%8E%E6%8A%80%E6%9C%AF%E4%B8%BB%E4%BF%AE%E4%B8%93%E4%B8%9A%7C%7C2019",
"xx": "", "Button3": "大学英语拓展课",
"__VIEWSTATE": self.account.soup.find(name='input', id="__VIEWSTATE")["value"]}
response = self.account.session.post(url=url, data=post_data)
self.account.soup = BeautifulSoup(response.text, "lxml")
links = self.account.soup.find_all(name="tr")
return links
def __enter_professional_course(self):
"""进入计划内选课--本专业页面,为抓取数据做准备"""
self.__catch_view_state()
url = LOGIN.ZUCC.PlanCourageURL + "?xh=" + self.account.account_data["username"]
post_data = {"__EVENTTARGET": "", "__EVENTARGUMENT": "", "__LASTFOCUS": "", "__VIEWSTATEGENERATOR": "4842AF95",
"xx": "", "Button5": "本专业选课",
"__VIEWSTATE": self.account.soup.find(name='input', id="__VIEWSTATE")["value"]}
response = self.account.session.post(url=url, data=post_data)
# print(response.text)
self.account.soup = BeautifulSoup(response.text, "lxml")
links = self.account.soup.find_all(name="tr")
return links
def get_english_course(self):
"""从文件中取得课程数据"""
js_file = open("english_information.json", "r", encoding='utf-8')
js_list = json.load(js_file)
js_file.close()
for course in js_list:
tmp = PlannedCourseInfo(course_dic=course)
self.english_course.append(tmp)
def get_professional_course(self):
"""从文件中取得课程数据"""
js_file = open("professional_information.json", "r", encoding='utf-8')
js_list = json.load(js_file)
js_file.close()
for course in js_list:
tmp = PlannedCourseInfo(course_dic=course)
self.professional_course.append(tmp)
def update_course(self):
"""更新课程信息并保存到文件"""
links = self.__enter_english_page()
course_list = []
i = 1
# 遍历10种英语课程
for link in links[1:-1]:
tmp = link.find_all("td")
detail = []
url = "http://" + LOGIN.ZUCC.DOMAIN + tmp[0].find(name="a")["onclick"][21:-8]
header = LOGIN.ZUCC.InitHeader
header["Referer"] = "http://xk.zucc.edu.cn/xs_main.aspx?xh="+self.account.account_data['username']
time.sleep(4)
item_response = self.account.session.get(url=url, headers=header)
item_soup = BeautifulSoup(item_response.text, "lxml")
item_trs = item_soup.find_all(name="tr")
j = 1
print('.', end='')
# 遍历所以的教学班
for item_tr in item_trs[1:-1]:
tds = item_tr.find_all("td")
detail_td = {"secondary_num": str(j), "code": tds[0].find(name="input")["value"],
"teacher": tds[2].find(name="a").text,
"time": tds[3].text,
"margin": str(int(tds[11].text) - int(tds[13].text)) + "/" + tds[11].text}
# 将教学班信息打包成列表
detail.append(detail_td)
j += 1
tmp = link.find_all("td")
course_list.append(
PlannedCourseInfo(main_num=i, name=tmp[1].find(name="a").text, code=tmp[0].find(name="a").text,
margin=tmp[9].text, detail=detail, url=url))
i += 1
js_str = "["
flag = True
for course in course_list:
if flag:
js_str += course.to_json()
flag = False
else:
js_str += "," + course.to_json()
js_str += "]"
# 缓存在文件
english_file = open("english_information.json", "w", encoding='utf-8')
english_file.write(js_str)
english_file.close()
links = self.__enter_professional_course()
course_list = []
i = 1
# 遍历专业课程
for link in links[1:-1]:
tmp = link.find_all("td")
detail = []
url = "http://" + LOGIN.ZUCC.DOMAIN + "/clsPage/xsxjs.aspx?" + "xkkh=" + \
tmp[0].find(name="a")["onclick"].split("=")[1][0:-3] + "&xh=" + self.account.account_data["username"]
header = LOGIN.ZUCC.InitHeader
header["Referer"] = "http://xk.zucc.edu.cn/xs_main.aspx?xh=31901040"
time.sleep(4)
# print(url)
item_response = self.account.session.get(url=url, headers=header)
# print(item_response.text)
item_soup = BeautifulSoup(item_response.text, "lxml")
item_trs = item_soup.find_all(name="tr")
j = 1
print('.', end='')
# 遍历所以的教学班
for item_tr in item_trs[1:-1]:
tds = item_tr.find_all("td")
detail_td = {"secondary_num": str(j), "code": tds[0].find(name="input")["value"],
"teacher": tds[2].find(name="a").text,
"time": tds[3].text,
"margin": str(int(tds[11].text) - int(tds[13].text)) + "/" + tds[11].text}
# 将教学班信息打包成列表
detail.append(detail_td)
j += 1
tmp = link.find_all("td")
course_list.append(
PlannedCourseInfo(main_num=i, name=tmp[1].find(name="a").text, code=tmp[0].find(name="a").text,
margin=tmp[9].text, detail=detail, url=url))
i += 1
js_str = "["
flag = True
for course in course_list:
if flag:
js_str += course.to_json()
flag = False
else:
js_str += "," + course.to_json()
js_str += "]"
# 缓存在文件
professional_file = open("professional_information.json", "w", encoding='utf-8')
professional_file.write(js_str)
professional_file.close()
print("\n更新完成!")
def attack_english(self):
self.get_english_course()
self.__enter_english_page()
course_xy = self.target.split(":")
x = int(course_xy[0])
y = int(course_xy[1])
header = LOGIN.ZUCC.InitHeader
header["Referer"] = "http://xk.zucc.edu.cn/xs_main.aspx?xh=31901040"
response = self.account.session.get(url=self.english_course[x - 1].url, headers=header)
# print(self.english_course[x - 1].url)
self.account.soup = BeautifulSoup(response.text, "lxml")
post_data = {"__EVENTTARGET": "Button1",
"__VIEWSTATEGENERATOR": "55DF6E88",
"RadioButtonList1": "1",
"xkkh": self.english_course[x - 1].detail[y - 1]["code"],
"__VIEWSTATE": self.account.soup.find_all(name='input', id="__VIEWSTATE")[0]["value"]}
while True:
response = self.account.session.post(url=self.english_course[x - 1].url, data=post_data)
soup = BeautifulSoup(response.text, "lxml")
try:
reply = soup.find(name="script").text.split("'")[1]
except BaseException:
reply = "未知错误"
print(reply+"\t\t"+str(time.strftime('%m-%d-%H-%M-%S',time.localtime(time.time()))))
if reply == "选课成功!":
return
def attack_professional(self):
self.get_professional_course()
self.__enter_professional_course()
course_xy = self.target.split(":")
x = int(course_xy[0])
y = int(course_xy[1])
header = LOGIN.ZUCC.InitHeader
header["Referer"] = "http://xk.zucc.edu.cn/xs_main.aspx?xh=31901040"
response = self.account.session.get(url=self.professional_course[x - 1].url, headers=header)
# print(self.professional_course[x - 1].url)
# print(response.text)
self.account.soup = BeautifulSoup(response.text, "lxml")
post_data = {"__EVENTTARGET": "Button1",
"__VIEWSTATEGENERATOR": "55DF6E88",
"RadioButtonList1": "1",
"xkkh": self.professional_course[x - 1].detail[y - 1]["code"],
"__VIEWSTATE": self.account.soup.find_all(name='input', id="__VIEWSTATE")[0]["value"]}
while True:
response = self.account.session.post(url=self.professional_course[x - 1].url, data=post_data)
soup = BeautifulSoup(response.text, "lxml")
try:
reply = soup.find(name="script").text.split("'")[1]
except BaseException:
reply = "未知错误"
print(reply)
if reply == "选课成功!":
return
if __name__ == "__main__":
account = LOGIN.Account()
account.login()
planned_course_spider = PlannedCourse(account)
# planned_course_spider.update_course()
planned_course_spider.init_menu()
# planned_course_spider.catch_english_course()
# planned_course_spider.update_course()
| 3.046875 | 3 |
visual_novel/core/models.py | dolamroth/visual_novel | 9 | 12762067 | import os
from bitfield import BitField
from constance import config
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db.models.signals import post_save
import django.db.models.options as options
from django.dispatch import receiver
from timezone_field import TimeZoneField
from notifications.vk import VK
ALL_WEEKDAYS_BITMAP = 127
options.DEFAULT_NAMES = options.DEFAULT_NAMES + ('file_fields',)
class PublishModel(models.Model):
is_published = models.BooleanField(verbose_name='публикация', default=True)
class Meta:
abstract = True
def publish(self):
self.is_published = True
super(PublishModel, self).save()
def unpublish(self):
self.is_published = False
super(PublishModel, self).save()
class PublishFileQuerySet(models.query.QuerySet):
def delete(self):
for d in self:
list_of_image_fields = [f['field_name'] for f in d._meta.__dict__.get('file_fields', [])]
d.delete_files(list_of_image_fields)
super(PublishFileQuerySet, self).delete()
class PublishFileManager(models.Manager):
def get_queryset(self):
return PublishFileQuerySet(self.model, using=self._db)
class PublishFileModel(PublishModel):
objects = PublishFileManager()
class Meta:
abstract = True
def delete_files(self, list_of_fieldnames=list()):
model = self.__class__
try:
obj = model.objects.get(pk=self.pk)
except model.DoesNotExist:
return
# Delete all selected image fields within a model
for field in list_of_fieldnames:
try:
# path = obj._meta.get_field(field).path
path = getattr(obj, field).path
if os.path.isfile(path):
os.remove(path)
except ValueError:
pass
def get_old_file_path_if_changed(self):
model = self.__class__
list_of_field_names = list()
try:
instance = model.objects.get(pk=self.pk)
except model.DoesNotExist:
return list()
for field in instance._meta.__dict__.get('file_fields', []):
fieldname = field['field_name']
try:
new_path = getattr(self, fieldname).path
except ValueError:
new_path = ''
try:
old_path = getattr(instance, fieldname).path
except ValueError:
old_path = ''
if new_path != old_path:
list_of_field_names.append(fieldname)
return list_of_field_names
def additional_action_on_save(self, list_of_changed_image_fields, created):
"""
To be overwritten in child models.
"""
pass
def save(self, *args, **kwargs):
created = not self.id
list_of_changed_image_fields = self.get_old_file_path_if_changed()
self.delete_files(list_of_changed_image_fields)
super(PublishModel, self).save(*args, **kwargs)
self.additional_action_on_save(list_of_changed_image_fields, created)
super(PublishModel, self).save()
def delete(self, *args, **kwargs):
list_of_image_fields = [d['field_name'] for d in self._meta.__dict__.get('file_fields', [])]
self.delete_files(list_of_image_fields)
super(PublishModel, self).save(*args, **kwargs)
super(PublishModel, self).delete(*args, **kwargs)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, verbose_name='Пользователь')
timezone = TimeZoneField(default=settings.DEFAULT_TIME_ZONE, verbose_name='Временная зона')
email_confirmed = models.BooleanField(default=False, verbose_name='Email подтвержден')
send_distributions = models.BooleanField(verbose_name='Отправлять рассылку', default=False)
send_hour = models.IntegerField(verbose_name='Час рассылки', default=16,
validators=[MaxValueValidator(23), MinValueValidator(0)])
weekdays = BitField(verbose_name='Битовый код дней рассылки',
flags=(('monday', 'Понедельник'), ('tuesday', 'Вторник'), ('wednesday', 'Среда'),
('thursday', 'Четверг'), ('friday', 'Пятница'), ('saturday', 'Суббота'),
('sunday', 'Воскресенье')), default=ALL_WEEKDAYS_BITMAP)
class Meta:
db_table = 'user_profile'
verbose_name = 'Профиль пользователя'
verbose_name_plural = 'Профили пользователей'
def __str__(self):
return self.user.username
def is_staff(self):
return self.user.is_staff
is_staff.short_description = 'Модератор'
def is_superuser(self):
return self.user.is_superuser
is_superuser.short_description = 'Суперпользователь'
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(
user=instance,
email_confirmed=instance.is_staff
)
instance.profile.save()
| 1.828125 | 2 |
doc/tutorial_python/src/tut5-3/tut.py | XpressAI/frovedis | 63 | 12762068 | import os
import numpy as np
from frovedis.exrpc.server import FrovedisServer
from frovedis.matrix.dvector import FrovedisDvector
from frovedis.matrix.dense import FrovedisRowmajorMatrix
FrovedisServer.initialize("mpirun -np 2 {}".format(os.environ['FROVEDIS_SERVER']))
dv = FrovedisDvector([1,2,3,4,5,6,7,8],dtype=np.float64)
dv.debug_print()
FrovedisServer.shut_down()
| 2.28125 | 2 |
flask_piwikapi.py | myles/flask-piwikapi | 0 | 12762069 | class FlaskRequest(object):
"""
A Request class to connect the Piwik API to Flask
"""
def __init__(self, request):
"""
:param request: Flask request object.
:type request: flask.Request
:rtype: None
"""
self.request = request
@property
def META(self):
"""
Return request headers.
:rtype: dict
"""
return self.request.headers
def is_secure(self):
"""
Returns a boolean, if the connection is secured.
:rtype: bool
"""
return self.request.is_secure
| 3.265625 | 3 |
manageXML/management/commands/import_giella_xml.py | mikahama/verdd | 5 | 12762070 | from manageXML.management.commands._giella_xml import GiellaXML
from django.core.management.base import BaseCommand, CommandError
import os, glob, sys
from manageXML.models import *
from django.conf import settings
from collections import defaultdict
ignore_affiliations = False
def create_lexeme(ll: GiellaXML.Item, lang: Language, datafile: DataFile = None):
try:
_l = Lexeme.objects.get(lexeme=ll.text.strip(), pos=ll.pos.strip(), homoId=ll.homoId, language=lang)
except:
_l = Lexeme.objects.create(
lexeme=ll.text.strip(), pos=ll.pos.strip(), homoId=ll.homoId, language=lang,
contlex=ll.contlex.strip(),
imported_from=datafile)
_filtered_attributes = ll.filtered_attributes()
for _k, _v in _filtered_attributes.items():
_metadata_type = None
if _k == 'gen':
_metadata_type = GENDER
elif _k == 'type':
_metadata_type = LEXEME_TYPE
elif _k == 'ignore':
_metadata_type = IGNORE_TAG
else:
_v = "{},{}".format(_k, _v.strip())
_lmd, created = LexemeMetadata.objects.get_or_create(lexeme=_l, type=_metadata_type, text=_v)
if ignore_affiliations:
return _l
title = _l.find_akusanat_affiliation()
# link it
if title:
a, created = Affiliation.objects.get_or_create(lexeme=_l, title=title, type=AKUSANAT,
link="{}{}".format(settings.WIKI_URL, title))
return _l
def parseXML(filename, filepos):
print("processing: " + filename)
g = GiellaXML.parse_file(filename)
gl = Language.objects.get(id=g.lang) # src_language
langs = {
g.lang: gl
}
filename_only = os.path.splitext(os.path.basename(filename))[0]
df = DataFile(lang_source=gl, lang_target=None, name=filename_only)
df.save()
for e in g.elements:
_ll = None
_l = None
try:
for lg in e.get('lg', []):
_l = lg.get('l', [])
if not _l:
continue
# Add ignore=fst to the lexeme
if e.ignore:
_l.attributes['ignore'] = e.ignore
_ll = create_lexeme(_l[0], gl, df) # create the lemma
for stg in lg.get('stg', []):
for st in stg.get('st', []): # stems
s, created = Stem.objects.get_or_create(lexeme=_ll, text=st.text.strip(), homoId=st.homoId,
contlex=st.contlex) # add the stems
if not _ll: # shouldn't happen but if it did, then we shouldn't get it there
continue
for mg in e.get('mg', []):
l_relations = defaultdict(list)
for tg in mg.get('tg', []): # translations
_lang = tg.attributes.get('xml:lang')
if _lang and _lang not in langs:
try:
langs[_lang] = Language.objects.get(id=_lang)
except:
continue
for t in tg.get('t', []):
_t = create_lexeme(t, langs[_lang], df)
r, created = Relation.objects.get_or_create(lexeme_from=_ll, lexeme_to=_t)
l_relations[_lang].append(r)
for xg in mg.get('xg', []): # examples
x = xg.get('x', [])
if not x:
continue
x = x[0].text
_xl, created = Example.objects.get_or_create(lexeme=_ll, text=x)
for xt in xg.get('xt', []):
_lang = xt.attributes.get('xml:lang')
if _lang not in l_relations:
continue
_r = l_relations[_lang].pop(0)
re_src, created = RelationExample.objects.get_or_create(relation=_r, text=x, language=gl)
xtt = xt.text
re_tgt, created = RelationExample.objects.get_or_create(relation=_r, text=xtt,
language=langs[_lang])
# add the link between the relations here
# RelationExampleRelation.objects.get_or_create(...)
for semantic in mg.get('semantics', []):
pass
for defNative in mg.get('defNative', []):
if not defNative or not defNative.text:
continue
_lmd, created = LexemeMetadata.objects.get_or_create(lexeme=_ll, type=DEF_NATIVE,
text=defNative.text.strip())
for source in e.get('sources', []):
pass
except Exception as err:
sys.stderr.write("Error @ %s: %s\n" % (str(_l[0].text) if _l and len(_l) > 0 else '', str(err)))
class Command(BaseCommand):
'''
Example: python manage.py import_xml -d ../saame/
Add --ignore-affiliations when debugging and want to speed up imports.
'''
help = 'This command imports the content of a all Giella XML documents in a directory.'
def add_arguments(self, parser):
parser.add_argument('-d', '--dir', type=str, help='The directory containing XML files.', )
parser.add_argument('--ignore-affiliations', dest='ignore_affiliations', action='store_true')
parser.set_defaults(ignore_affiliations=False)
def handle(self, *args, **options):
global ignore_affiliations
xml_dir = options['dir'] # the directory containing the XML files
ignore_affiliations = options['ignore_affiliations']
if not os.path.isdir(xml_dir):
raise CommandError('Directory "%s" does not exist.' % xml_dir)
for filename in glob.glob(os.path.join(xml_dir, '*.xml')): # read each file and parse it
filepos = filename.split('/')[-1].split('_')[:-1]
try:
parseXML(filename, filepos)
except Exception as err:
self.stderr.write(self.style.ERROR('Error processing %s: %s' % (filename, str(err))))
self.stdout.write(self.style.SUCCESS('Successfully imported the files in %s.' % (xml_dir,)))
| 1.890625 | 2 |
data/templates/account/mycyberweb.mako.py | sumukh210991/Cyberweb | 0 | 12762071 | <reponame>sumukh210991/Cyberweb
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1467226902.691616
_enable_loop = True
_template_filename = '/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/account/mycyberweb.mako'
_template_uri = '/account/mycyberweb.mako'
_source_encoding = 'utf-8'
from webhelpers.html import escape
_exports = ['col1main']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/1col.mako', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
__M_writer(u'\n\n')
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_col1main(context):
__M_caller = context.caller_stack._push_frame()
try:
c = context.get('c', UNDEFINED)
reversed = context.get('reversed', UNDEFINED)
len = context.get('len', UNDEFINED)
enumerate = context.get('enumerate', UNDEFINED)
__M_writer = context.writer()
__M_writer(u'\n <style type="text/css">\n .infobar {\n background:#cccccc;\n padding-left:2px;\n margin-bottom:2px;\n }\n\n table, td, th {\n border:1px solid black;\n }\n th {\n vertical-align:top;\n }\n td {\n vertical-align:top;\n }\n </style>\n\n<h3>MyCyberWeb: ')
__M_writer(escape(c.title))
__M_writer(u' </h3>\n<hr>\n\n<table width=90%>\n <tr>\n <!---------------- LEFT COL ------------------->\n <td>\n <table style="width:350px">\n <tr style="text-align:top;"> <td>\n <div class="infobar">My Information</div>\n <br>Last login: ')
__M_writer(escape(c.info['Last login']))
__M_writer(u' \n <br>from ip address: ')
__M_writer(escape(c.info['from']))
__M_writer(u'\n </td> </tr>\n <tr> <td>\n <div class="infobar">My Groups & Projects</div>\n No group information available at this time.\n </td> </tr>\n <tr> <td>\n <div class="infobar">My Remote Accounts </div>\n ')
l = len(c.user_resources )
__M_writer(u'\n [length(c.user_resources)] = [- ')
__M_writer(escape(l))
__M_writer(u' -] <br>\n <hr>\n')
if l == 0 :
__M_writer(u' You currently have no SSH connected resources.<br>\n To add compute resource accounts, see MyCyberWeb-->Authentication. \n')
else:
for index, item in enumerate(c.user_resources):
__M_writer(u' ')
__M_writer(escape(item['account_name']))
__M_writer(u' @ ')
__M_writer(escape(item['hostname']))
__M_writer(u' <br>\n')
__M_writer(u' </td> </tr>\n <tr> <td>\n <div class="infobar">Recent Messages </div>\n')
if len(c.messages):
__M_writer(u' <table>\n <tr>\n')
for j in c.messageheaders:
__M_writer(u' <th>')
__M_writer(escape(j))
__M_writer(u'</th>\n')
__M_writer(u' </tr>\n')
for i in c.messages:
__M_writer(u' <tr>\n')
for j in c.messageheaders:
if i.has_key(j):
__M_writer(u' <td>')
__M_writer(escape(i[j]))
__M_writer(u'</td>\n')
else:
__M_writer(u' <td></td>\n')
__M_writer(u' </tr>\n')
__M_writer(u' </table>\n')
else:
__M_writer(u' No messages. \n')
__M_writer(u' [More >]\n </td> </tr>\n </table>\n </td>\n\n <!---------------- RIGHT COL ------------------->\n <td>\n <table>\n <tr align=left valign=top> <td>\n <div class="infobar">MyJobs</div>\n </td></tr>\n <tr align=left valign=top> <td>\n <form action="/user" method="post">\n <input type="submit" name="jobsummary" value="Update Jobs" />\n </form>\n </td></tr>\n <tr align=left valign=top><td>\n <table>\n <tr align=left valign=top>\n <th>ID</th>\n <th>Job Name</th>\n <th>Status</th>\n <th>Resource</th>\n <th>Submit Time</th>\n <th>Start Time</th>\n <th>End Time</th>\n </tr>\n')
sort_on = "Name"
jsort = [(dict_[sort_on], dict_) for dict_ in c.jobs]
jsort.sort()
sorted_jobs = [dict_ for (key, dict_) in jsort]
##% for job in c.jobs:
__M_writer(u'\n')
for job in reversed(sorted_jobs):
__M_writer(u' <tr align=center valign=top>\n <td>')
__M_writer(escape(job['ID']))
__M_writer(u'</td>\n <td>')
__M_writer(escape(job['Name']))
__M_writer(u'</td>\n <td>')
__M_writer(escape(job['StatusKey']))
__M_writer(u' </td>\n <td>')
__M_writer(escape(job['Resource']))
__M_writer(u'</td>\n <td>')
__M_writer(escape(job['Submit Time']))
__M_writer(u'</td>\n <td>')
__M_writer(escape(job['Start Time']))
__M_writer(u'</td>\n <td>')
__M_writer(escape(job['End Time']))
__M_writer(u'</td>\n </tr>\n')
__M_writer(u' </table>\n\n </td> </tr>\n <tr align=left valign=top>\n <td>\n <div class="infobar">My Resources & Services</div>\n\n </td> </tr>\n </table>\n\n\n <!------- end right column -->\n </td> </tr>\n <!------- end main table ----->\n</table>\n\n')
sort_on = "Name"
jsort = [(dict_[sort_on], dict_) for dict_ in c.jobs]
jsort.sort()
sorted_jobs = [dict_ for (key, dict_) in jsort]
__M_writer(u'\n<hr>\n===========================================================<br>\n')
for j in reversed(sorted_jobs):
__M_writer(u'JOB: ')
__M_writer(escape(j['Name']))
__M_writer(u' <br>\n')
__M_writer(u'<hr>\n===========================================================<br>\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"source_encoding": "utf-8", "line_map": {"128": 148, "129": 150, "135": 129, "28": 0, "33": 2, "34": 152, "40": 4, "48": 4, "49": 23, "50": 23, "51": 33, "52": 33, "53": 34, "54": 34, "55": 42, "57": 42, "58": 43, "59": 43, "60": 45, "61": 46, "62": 48, "63": 49, "64": 50, "65": 50, "66": 50, "67": 50, "68": 50, "69": 53, "70": 56, "71": 57, "72": 59, "73": 60, "74": 60, "75": 60, "76": 62, "77": 63, "78": 64, "79": 65, "80": 66, "81": 67, "82": 67, "83": 67, "84": 68, "85": 69, "86": 72, "87": 74, "88": 75, "89": 76, "90": 78, "91": 105, "99": 111, "100": 112, "101": 113, "102": 114, "103": 114, "104": 115, "105": 115, "106": 116, "107": 116, "108": 117, "109": 117, "110": 118, "111": 118, "112": 119, "113": 119, "114": 120, "115": 120, "116": 123, "117": 139, "124": 144, "125": 147, "126": 148, "127": 148}, "uri": "/account/mycyberweb.mako", "filename": "/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/account/mycyberweb.mako"}
__M_END_METADATA
"""
| 2.078125 | 2 |
intro-to-programming/python-for-everyone/14-web-services/solution1.py | udpsunil/computer-science | 0 | 12762072 | <filename>intro-to-programming/python-for-everyone/14-web-services/solution1.py
import urllib.request, urllib.response, urllib.error
import xml.etree.ElementTree as ET
service_url = input("Enter location: ")
uh = urllib.request.urlopen(service_url)
data = uh.read()
print("Retrieved {} characters".format(len(data)))
xml_tree = ET.fromstring(data)
results = xml_tree.findall('.//comment')
count_list = [int(result.find('count').text) for result in results]
print('Count: {}'.format(len(count_list)))
print('Sum: {}'.format(sum(count_list)))
| 3.859375 | 4 |
cache/models.py | ZHAISHENKING/django_caching | 1 | 12762073 | <gh_stars>1-10
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.signals import (
pre_save,
pre_delete,
m2m_changed,
)
from cache.listeners import (
invalidate_model_cache,
invalidate_m2m_cache,
)
class Invalidation(models.Model):
"""
Invalidation is for storing the cached object and the related keys
An db object could be cached for in many queries, for example:
1. Problem.cached_objects.get(id=1)
2. Problem.cached_objects.get(unique_name='a-b-plus')
3. Problem.cached_objects.all()
we will generate three keys and store three different results in cache.
once the object is changed, we need to invalidate the three related keys
in the cache.
so we can query from the Invalidation table and find the three keys.
"""
key = models.CharField(max_length=255, help_text='cache key', db_index=True)
object_id = models.IntegerField()
content_type = models.ForeignKey(ContentType, null=True)
cached_object = GenericForeignKey('content_type', 'object_id')
# for admin
sql = models.TextField(null=True)
count = models.IntegerField(null=True)
created_at = models.DateTimeField(null=True, auto_now_add=True)
# deprecated
class_name = models.CharField(max_length=255, null=True)
class Meta:
index_together = ['content_type', 'object_id']
def __str__(self):
return '{}.{}.{}'.format(self.content_type, self.object_id, self.key)
def __unicode__(self):
return u'{}'.format(self.__str__())
pre_save.connect(invalidate_model_cache)
pre_delete.connect(invalidate_model_cache)
m2m_changed.connect(invalidate_m2m_cache)
| 2.09375 | 2 |
mountequist/clients/__init__.py | ginjeni1/mountequist | 0 | 12762074 | from mountequist.clients.httpclient import Http
| 1.140625 | 1 |
posts/forms.py | AlexxSandbox/MySite | 1 | 12762075 | from django import forms
from .models import Post, Comment
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('group','title', 'text', 'image')
labels = {'group': 'Group', 'title': 'Title', 'text': 'Description', 'image': 'Picture'}
help_texts = {
'group': 'Here you choose what your post will be about.',
'title': 'Write title to you log.',
'text': 'Write something interesting here and don’t forget to save.',
'image': 'Add picture to your log.'
}
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('text',)
labels = {'text': 'Comment text'}
help_texts = {'text': 'Think about it well'}
| 2.5625 | 3 |
continual_rl/policies/prototype/prototype_policy.py | AGI-Labs/continual_rl | 19 | 12762076 | from continual_rl.policies.policy_base import PolicyBase
from .prototype_policy_config import PrototypePolicyConfig # Switch to your config type
class PrototypePolicy(PolicyBase):
"""
A simple implementation of policy as a sample of how policies can be created.
Refer to policy_base itself for more detailed descriptions of the method signatures.
"""
def __init__(self, config: PrototypePolicyConfig, observation_space, action_spaces): # Switch to your config type
super().__init__()
self._config = config
self._observation_space = observation_space
self._action_spaces = action_spaces
def get_environment_runner(self, task_spec):
raise NotImplementedError
def compute_action(self, observation, task_id, action_space_id, last_timestep_data, eval_mode):
raise NotImplementedError
def train(self, storage_buffer):
raise NotImplementedError
def save(self, output_path_dir, cycle_id, task_id, task_total_steps):
raise NotImplementedError
def load(self, output_path_dir):
raise NotImplementedError
| 2.453125 | 2 |
src/test/subscriber/subscriberTest.py | huseyinbolt/cord-tester | 0 | 12762077 | <reponame>huseyinbolt/cord-tester
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from nose.tools import *
from nose.twistedtools import reactor, deferred
from twisted.internet import defer
import time, monotonic
import os, sys
import tempfile
import random
import threading
from Stats import Stats
from OnosCtrl import OnosCtrl
from DHCP import DHCPTest
from EapTLS import TLSAuthTest
from Channels import Channels, IgmpChannel
from subscriberDb import SubscriberDB
from threadPool import ThreadPool
from portmaps import g_subscriber_port_map
from OltConfig import *
from CordContainer import *
from CordTestServer import cord_test_radius_restart
from CordLogger import CordLogger
from CordTestUtils import log_test as log
import copy
log.setLevel('INFO')
DEFAULT_NO_CHANNELS = 1
class Subscriber(Channels):
PORT_TX_DEFAULT = 2
PORT_RX_DEFAULT = 1
INTF_TX_DEFAULT = 'veth2'
INTF_RX_DEFAULT = 'veth0'
STATS_RX = 0
STATS_TX = 1
STATS_JOIN = 2
STATS_LEAVE = 3
SUBSCRIBER_SERVICES = 'DHCP IGMP TLS'
def __init__(self, name = 'sub', service = SUBSCRIBER_SERVICES, port_map = None,
num = 1, channel_start = 0,
tx_port = PORT_TX_DEFAULT, rx_port = PORT_RX_DEFAULT,
iface = INTF_RX_DEFAULT, iface_mcast = INTF_TX_DEFAULT,
mcast_cb = None, loginType = 'wireless'):
self.tx_port = tx_port
self.rx_port = rx_port
self.port_map = port_map or g_subscriber_port_map
try:
self.tx_intf = self.port_map[tx_port]
self.rx_intf = self.port_map[rx_port]
except:
self.tx_intf = self.port_map[self.PORT_TX_DEFAULT]
self.rx_intf = self.port_map[self.PORT_RX_DEFAULT]
Channels.__init__(self, num, channel_start = channel_start,
iface = self.rx_intf, iface_mcast = self.tx_intf, mcast_cb = mcast_cb)
self.name = name
self.service = service
self.service_map = {}
services = self.service.strip().split(' ')
for s in services:
self.service_map[s] = True
self.loginType = loginType
##start streaming channels
self.join_map = {}
##accumulated join recv stats
self.join_rx_stats = Stats()
def has_service(self, service):
if self.service_map.has_key(service):
return self.service_map[service]
if self.service_map.has_key(service.upper()):
return self.service_map[service.upper()]
return False
def channel_join_update(self, chan, join_time):
self.join_map[chan] = ( Stats(), Stats(), Stats(), Stats() )
self.channel_update(chan, self.STATS_JOIN, 1, t = join_time)
def channel_join(self, chan = 0, delay = 2):
'''Join a channel and create a send/recv stats map'''
if self.join_map.has_key(chan):
del self.join_map[chan]
self.delay = delay
chan, join_time = self.join(chan)
self.channel_join_update(chan, join_time)
return chan
def channel_join_next(self, delay = 2):
'''Joins the next channel leaving the last channel'''
if self.last_chan:
if self.join_map.has_key(self.last_chan):
del self.join_map[self.last_chan]
self.delay = delay
chan, join_time = self.join_next()
self.channel_join_update(chan, join_time)
return chan
def channel_jump(self, delay = 2):
'''Jumps randomly to the next channel leaving the last channel'''
log.info("Jumps randomly to the next channel leaving the last channel")
if self.last_chan is not None:
if self.join_map.has_key(self.last_chan):
del self.join_map[self.last_chan]
self.delay = delay
chan, join_time = self.jump()
self.channel_join_update(chan, join_time)
return chan
def channel_leave(self, chan = 0):
if self.join_map.has_key(chan):
del self.join_map[chan]
self.leave(chan)
def channel_update(self, chan, stats_type, packets, t=0):
if type(chan) == type(0):
chan_list = (chan,)
else:
chan_list = chan
for c in chan_list:
if self.join_map.has_key(c):
self.join_map[c][stats_type].update(packets = packets, t = t)
def channel_receive(self, chan, cb = None, count = 1):
log.info('Subscriber %s receiving from group %s, channel %d' %(self.name, self.gaddr(chan), chan))
self.recv(chan, cb = cb, count = count)
def recv_channel_cb(self, pkt):
##First verify that we have received the packet for the joined instance
log.debug('Packet received for group %s, subscriber %s' %(pkt[IP].dst, self.name))
chan = self.caddr(pkt[IP].dst)
assert_equal(chan in self.join_map.keys(), True)
recv_time = monotonic.monotonic() * 1000000
join_time = self.join_map[chan][self.STATS_JOIN].start
delta = recv_time - join_time
self.join_rx_stats.update(packets=1, t = delta, usecs = True)
self.channel_update(chan, self.STATS_RX, 1, t = delta)
log.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
class subscriber_pool:
def __init__(self, subscriber, test_cbs, test_status):
self.subscriber = subscriber
self.test_cbs = test_cbs
self.test_status = test_status
def pool_cb(self):
for cb in self.test_cbs:
if cb:
self.test_status = cb(self.subscriber)
# cb(self.subscriber)
if self.test_status is not True:
log.info('This service is failed and other services will not run for this subscriber')
break
log.info('This Subscriber is tested for multiple service elgibility ')
self.test_status = True
class subscriber_exchange(CordLogger):
apps = [ 'org.opencord.aaa', 'org.onosproject.dhcp' ]
dhcp_app = 'org.onosproject.dhcp'
olt_apps = [ 'org.opencord.igmp', 'org.opencord.cordmcast' ]
dhcp_server_config = {
"ip": "10.1.11.50",
"mac": "ca:fe:ca:fe:ca:fe",
"subnet": "255.255.252.0",
"broadcast": "10.1.11.255",
"router": "10.1.8.1",
"domain": "8.8.8.8",
"ttl": "63",
"delay": "2",
"startip": "10.1.11.51",
"endip": "10.1.11.100"
}
aaa_loaded = False
INTF_TX_DEFAULT = 'veth2'
INTF_RX_DEFAULT = 'veth0'
SUBSCRIBER_TIMEOUT = 20
CLIENT_CERT = """-----BEGIN CER<KEY>
-----END CERTIFICATE-----"""
CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
<KEY>
-----END CERTIFICATE-----'''
def setUp(self):
'''Load the OLT config and activate relevant apps'''
super(subscriber_exchange, self).setUp()
self.olt = OltConfig()
self.port_map, _ = self.olt.olt_port_map()
##if no olt config, fall back to ovs port map
if not self.port_map:
self.port_map = g_subscriber_port_map
else:
log.info('Using OLT Port configuration for test setup')
log.info('Configuring CORD OLT access device information')
OnosCtrl.cord_olt_config(self.olt)
self.activate_apps(self.olt_apps)
self.activate_apps(self.apps)
def tearDown(self):
'''Deactivate the dhcp app'''
super(subscriber_exchange, self).tearDown()
for app in self.apps:
onos_ctrl = OnosCtrl(app)
onos_ctrl.deactivate()
log.info('Restarting the Radius container in the setup after running every subscriber test cases by default')
cord_test_radius_restart()
#os.system('ifconfig '+INTF_RX_DEFAULT+' up')
def activate_apps(self, apps):
for app in apps:
onos_ctrl = OnosCtrl(app)
status, _ = onos_ctrl.activate()
assert_equal(status, True)
time.sleep(2)
def onos_aaa_load(self):
if self.aaa_loaded:
return
OnosCtrl.aaa_load_config()
self.aaa_loaded = True
def onos_dhcp_table_load(self, config = None):
dhcp_dict = {'apps' : { 'org.onosproject.dhcp' : { 'dhcp' : copy.copy(self.dhcp_server_config) } } }
dhcp_config = dhcp_dict['apps']['org.onosproject.dhcp']['dhcp']
if config:
for k in config.keys():
if dhcp_config.has_key(k):
dhcp_config[k] = config[k]
self.onos_load_config('org.onosproject.dhcp', dhcp_dict)
def send_recv(self, mac = None, update_seed = False, validate = True):
cip, sip = self.dhcp.discover(mac = mac, update_seed = update_seed)
if validate:
assert_not_equal(cip, None)
assert_not_equal(sip, None)
log.info('Got dhcp client IP %s from server %s for mac %s' %
(cip, sip, self.dhcp.get_mac(cip)[0]))
return cip,sip
def onos_load_config(self, app, config):
status, code = OnosCtrl.config(config)
if status is False:
log.info('JSON config request for app %s returned status %d' %(app, code))
assert_equal(status, True)
time.sleep(2)
def dhcp_sndrcv(self, dhcp, update_seed = False):
cip, sip = dhcp.discover(update_seed = update_seed)
assert_not_equal(cip, None)
assert_not_equal(sip, None)
log.info('Got dhcp client IP %s from server %s for mac %s' %
(cip, sip, dhcp.get_mac(cip)[0]))
return cip,sip
def dhcp_request(self, subscriber, seed_ip = '10.10.10.1', update_seed = False):
config = {'startip':'10.10.10.20', 'endip':'10.10.10.200',
'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
self.onos_dhcp_table_load(config)
dhcp = DHCPTest(seed_ip = seed_ip, iface = subscriber.iface)
cip, sip = self.dhcp_sndrcv(dhcp, update_seed = update_seed)
return cip, sip
def recv_channel_cb(self, pkt):
##First verify that we have received the packet for the joined instance
chan = self.subscriber.caddr(pkt[IP].dst)
assert_equal(chan in self.subscriber.join_map.keys(), True)
recv_time = monotonic.monotonic() * 1000000
join_time = self.subscriber.join_map[chan][self.subscriber.STATS_JOIN].start
delta = recv_time - join_time
self.subscriber.join_rx_stats.update(packets=1, t = delta, usecs = True)
self.subscriber.channel_update(chan, self.subscriber.STATS_RX, 1, t = delta)
log.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
self.test_status = True
def tls_verify(self, subscriber):
if subscriber.has_service('TLS'):
time.sleep(2)
tls = TLSAuthTest()
log.info('Running subscriber %s tls auth test' %subscriber.name)
tls.runTest()
self.test_status = True
return self.test_status
def dhcp_verify(self, subscriber):
cip, sip = self.dhcp_request(subscriber, update_seed = True)
log.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
subscriber.src_list = [cip]
self.test_status = True
return self.test_status
def dhcp_jump_verify(self, subscriber):
cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.200.1')
log.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
subscriber.src_list = [cip]
self.test_status = True
return self.test_status
def dhcp_next_verify(self, subscriber):
cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.150.1')
log.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
subscriber.src_list = [cip]
self.test_status = True
return self.test_status
def igmp_verify(self, subscriber):
chan = 0
if subscriber.has_service('IGMP'):
for i in range(5):
log.info('Joining channel %d for subscriber %s' %(chan, subscriber.name))
subscriber.channel_join(chan, delay = 0)
subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
log.info('Leaving channel %d for subscriber %s' %(chan, subscriber.name))
subscriber.channel_leave(chan)
time.sleep(3)
log.info('Interface %s Join RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name,subscriber.join_rx_stats))
self.test_status = True
return self.test_status
def igmp_verify_multiChannel(self, subscriber):
if subscriber.has_service('IGMP'):
for chan in range(DEFAULT_NO_CHANNELS):
log.info('Joining channel %d for subscriber %s' %(chan, subscriber.name))
subscriber.channel_join(chan, delay = 0)
subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
log.info('Leaving channel %d for subscriber %s' %(chan, subscriber.name))
subscriber.channel_leave(chan)
time.sleep(3)
log.info('Interface %s Join RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name,subscriber.join_rx_stats))
self.test_status = True
return self.test_status
def igmp_jump_verify(self, subscriber):
if subscriber.has_service('IGMP'):
for i in xrange(subscriber.num):
log.info('Subscriber %s jumping channel' %subscriber.name)
chan = subscriber.channel_jump(delay=0)
subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
log.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
time.sleep(3)
log.info('Interface %s Jump RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
self.test_status = True
return self.test_status
def igmp_next_verify(self, subscriber):
if subscriber.has_service('IGMP'):
for i in xrange(subscriber.num):
if i:
chan = subscriber.channel_join_next(delay=0)
else:
chan = subscriber.channel_join(i, delay=0)
log.info('Joined next channel %d for subscriber %s' %(chan, subscriber.name))
subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count=1)
log.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
time.sleep(3)
log.info('Interface %s Join Next RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
self.test_status = True
return self.test_status
def generate_port_list(self, subscribers, channels):
port_list = []
for i in xrange(subscribers):
if channels > 1:
rx_port = 2*i+1
tx_port = 2*i+2
else:
rx_port = Subscriber.PORT_RX_DEFAULT
tx_port = Subscriber.PORT_TX_DEFAULT
port_list.append((tx_port, rx_port))
return port_list
def subscriber_load(self, create = True, num = 10, num_channels = 1, channel_start = 0, port_list = []):
'''Load the subscriber from the database'''
self.subscriber_db = SubscriberDB(create = create)
if create is True:
self.subscriber_db.generate(num)
self.subscriber_info = self.subscriber_db.read(num)
self.subscriber_list = []
if not port_list:
port_list = self.generate_port_list(num, num_channels)
index = 0
for info in self.subscriber_info:
self.subscriber_list.append(Subscriber(name=info['Name'],
service=info['Service'],
port_map = self.port_map,
num=num_channels,
channel_start = channel_start,
tx_port = port_list[index][0],
rx_port = port_list[index][1]))
if num_channels > 1:
channel_start += num_channels
index += 1
#load the ssm list for all subscriber channels
igmpChannel = IgmpChannel()
ssm_groups = map(lambda sub: sub.channels, self.subscriber_list)
ssm_list = reduce(lambda ssm1, ssm2: ssm1+ssm2, ssm_groups)
igmpChannel.igmp_load_ssm_config(ssm_list)
#load the subscriber to mcast port map for cord
cord_port_map = {}
for sub in self.subscriber_list:
for chan in sub.channels:
cord_port_map[chan] = (sub.tx_port, sub.rx_port)
igmpChannel.cord_port_table_load(cord_port_map)
def subscriber_join_verify( self, num_subscribers = 10, num_channels = 1,
channel_start = 0, cbs = None, port_list = [], negative_subscriber_auth = None):
self.test_status = False
self.num_subscribers = num_subscribers
self.sub_loop_count = num_subscribers
self.subscriber_load(create = True, num = num_subscribers,
num_channels = num_channels, channel_start = channel_start, port_list = port_list)
self.onos_aaa_load()
self.thread_pool = ThreadPool(min(100, self.num_subscribers), queue_size=1, wait_timeout=1)
if cbs and negative_subscriber_auth is None:
cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify)
cbs_negative = cbs
for subscriber in self.subscriber_list:
subscriber.start()
if negative_subscriber_auth is 'half' and self.sub_loop_count%2 is not 0:
cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify)
elif negative_subscriber_auth is 'onethird' and self.sub_loop_count%3 is not 0:
cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify)
else:
cbs = cbs_negative
self.sub_loop_count = self.sub_loop_count - 1
pool_object = subscriber_pool(subscriber, cbs, self.test_status)
self.thread_pool.addTask(pool_object.pool_cb)
self.thread_pool.cleanUpThreads()
for subscriber in self.subscriber_list:
subscriber.stop()
print "self.test_status %s\n"%(self.test_status)
return self.test_status
def tls_invalid_cert(self, subscriber):
if subscriber.has_service('TLS'):
time.sleep(2)
log.info('Running subscriber %s tls auth test' %subscriber.name)
tls = TLSAuthTest(client_cert = self.CLIENT_CERT_INVALID)
tls.runTest()
if tls.failTest == True:
self.test_status = False
return self.test_status
def tls_no_cert(self, subscriber):
if subscriber.has_service('TLS'):
time.sleep(2)
log.info('Running subscriber %s tls auth test' %subscriber.name)
tls = TLSAuthTest(client_cert = '')
tls.runTest()
if tls.failTest == True:
self.test_status = False
return self.test_status
def tls_self_signed_cert(self, subscriber):
if subscriber.has_service('TLS'):
time.sleep(2)
log.info('Running subscriber %s tls auth test' %subscriber.name)
tls = TLSAuthTest(client_cert = self.CLIENT_CERT)
tls.runTest()
if tls.failTest == False:
self.test_status = True
return self.test_status
def tls_Nsubscribers_use_same_valid_cert(self, subscriber):
if subscriber.has_service('TLS'):
time.sleep(2)
log.info('Running subscriber %s tls auth test' %subscriber.name)
num_users = 3
for i in xrange(num_users):
tls = TLSAuthTest(intf = 'veth{}'.format(i*2))
tls.runTest()
if tls.failTest == False:
self.test_status = True
return self.test_status
def dhcp_discover_scenario(self, subscriber):
if subscriber.has_service('DHCP'):
time.sleep(2)
log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
t1 = self.subscriber_dhcp_1release()
self.test_status = True
return self.test_status
def subscriber_dhcp_1release(self, iface = INTF_RX_DEFAULT):
config = {'startip':'10.10.100.20', 'endip':'10.10.100.21',
'ip':'10.10.100.2', 'mac': "ca:fe:ca:fe:8a:fe",
'subnet': '255.255.255.0', 'broadcast':'10.10.100.255', 'router':'10.10.100.1'}
self.onos_dhcp_table_load(config)
self.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
cip, sip = self.send_recv()
log.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcp.release(cip), True)
log.info('Triggering DHCP discover again after release')
cip2, sip2 = self.send_recv(update_seed = True)
log.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
def dhcp_client_reboot_scenario(self, subscriber):
if subscriber.has_service('DHCP'):
time.sleep(2)
log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
tl = self.subscriber_dhcp_client_request_after_reboot()
self.test_status = True
return self.test_status
def subscriber_dhcp_client_request_after_reboot(self, iface = INTF_RX_DEFAULT):
#''' Client sends DHCP Request after reboot.'''
config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
self.onos_dhcp_table_load(config)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, lval = self.dhcp.only_discover()
log.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
if (cip == None and mac != None):
log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
assert_not_equal(cip, None)
else:
new_cip, new_sip = self.dhcp.only_request(cip, mac)
if new_cip == None:
log.info("Got DHCP server NAK.")
os.system('ifconfig '+iface+' down')
log.info('Client goes down.')
log.info('Delay for 5 seconds.')
time.sleep(5)
os.system('ifconfig '+iface+' up')
log.info('Client is up now.')
new_cip, new_sip = self.dhcp.only_request(cip, mac)
if new_cip == None:
log.info("Got DHCP server NAK.")
assert_not_equal(new_cip, None)
elif new_cip != None:
log.info("Got DHCP ACK.")
def dhcp_client_renew_scenario(self, subscriber):
if subscriber.has_service('DHCP'):
time.sleep(2)
log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
tl = self.subscriber_dhcp_client_renew_time()
self.test_status = True
return self.test_status
def subscriber_dhcp_client_renew_time(self, iface = INTF_RX_DEFAULT):
config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
self.onos_dhcp_table_load(config)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac , lval = self.dhcp.only_discover()
log.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
if (cip == None and mac != None):
log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
assert_not_equal(cip, None)
elif cip and sip and mac:
log.info("Triggering DHCP Request.")
new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, renew_time = True)
if new_cip and new_sip and lval:
log.info("Client 's Renewal time is :%s",lval)
log.info("Generating delay till renewal time.")
time.sleep(lval)
log.info("Client Sending Unicast DHCP request.")
latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac, unicast = True)
if latest_cip and latest_sip:
log.info("Got DHCP Ack. Lease Renewed for ip %s and mac %s from server %s." %
(latest_cip, mac, latest_sip) )
elif latest_cip == None:
log.info("Got DHCP NAK. Lease not renewed.")
elif new_cip == None or new_sip == None or lval == None:
log.info("Got DHCP NAK.")
def dhcp_server_reboot_scenario(self, subscriber):
if subscriber.has_service('DHCP'):
time.sleep(2)
log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
tl = self.subscriber_dhcp_server_after_reboot()
self.test_status = True
return self.test_status
def subscriber_dhcp_server_after_reboot(self, iface = INTF_RX_DEFAULT):
''' DHCP server goes down.'''
config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
self.onos_dhcp_table_load(config)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, lval = self.dhcp.only_discover()
log.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
if (cip == None and mac != None):
log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
assert_not_equal(cip, None)
else:
new_cip, new_sip = self.dhcp.only_request(cip, mac)
if new_cip == None:
log.info("Got DHCP server NAK.")
assert_not_equal(new_cip, None)
log.info('Getting DHCP server Down.')
onos_ctrl = OnosCtrl(self.dhcp_app)
onos_ctrl.deactivate()
for i in range(0,4):
log.info("Sending DHCP Request.")
log.info('')
new_cip, new_sip = self.dhcp.only_request(cip, mac)
if new_cip == None and new_sip == None:
log.info('')
log.info("DHCP Request timed out.")
elif new_cip and new_sip:
log.info("Got Reply from DHCP server.")
assert_equal(new_cip,None) #Neagtive Test Case
log.info('Getting DHCP server Up.')
# self.activate_apps(self.dhcp_app)
onos_ctrl = OnosCtrl(self.dhcp_app)
status, _ = onos_ctrl.activate()
assert_equal(status, True)
time.sleep(3)
for i in range(0,4):
log.info("Sending DHCP Request after DHCP server is up.")
log.info('')
new_cip, new_sip = self.dhcp.only_request(cip, mac)
if new_cip == None and new_sip == None:
log.info('')
log.info("DHCP Request timed out.")
elif new_cip and new_sip:
log.info("Got Reply from DHCP server.")
assert_equal(new_cip,None) #Neagtive Test Case
def dhcp_client_rebind_scenario(self, subscriber):
if subscriber.has_service('DHCP'):
time.sleep(2)
log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
tl = self.subscriber_dhcp_client_rebind_time()
self.test_status = True
return self.test_status
def subscriber_dhcp_client_rebind_time(self, iface = INTF_RX_DEFAULT):
config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
self.onos_dhcp_table_load(config)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, lval = self.dhcp.only_discover()
log.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
if (cip == None and mac != None):
log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
assert_not_equal(cip, None)
elif cip and sip and mac:
log.info("Triggering DHCP Request.")
new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, rebind_time = True)
if new_cip and new_sip and lval:
log.info("Client 's Rebind time is :%s",lval)
log.info("Generating delay till rebind time.")
time.sleep(lval)
log.info("Client Sending broadcast DHCP requests for renewing lease or for getting new ip.")
self.dhcp.after_T2 = True
for i in range(0,4):
latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac)
if latest_cip and latest_sip:
log.info("Got DHCP Ack. Lease Renewed for ip %s and mac %s from server %s." %
(latest_cip, mac, latest_sip) )
break
elif latest_cip == None:
log.info("Got DHCP NAK. Lease not renewed.")
assert_not_equal(latest_cip, None)
elif new_cip == None or new_sip == None or lval == None:
log.info("Got DHCP NAK.Lease not Renewed.")
def dhcp_starvation_scenario(self, subscriber):
if subscriber.has_service('DHCP'):
time.sleep(2)
log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
tl = self.subscriber_dhcp_starvation()
self.test_status = True
return self.test_status
def subscriber_dhcp_starvation(self, iface = INTF_RX_DEFAULT):
'''DHCP starve'''
config = {'startip':'172.16.31.10', 'endip':'192.168.127.12',
'ip':'192.168.3.11', 'mac': "ca:fe:c3:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'172.16.17.32', 'router':'172.16.31.10'}
self.onos_dhcp_table_load(config)
self.dhcp = DHCPTest(seed_ip = '172.16.31.10', iface = iface)
log.info('Verifying 1 ')
for x in xrange(50):
mac = RandMAC()._fix()
self.send_recv(mac = mac)
log.info('Verifying 2 ')
cip, sip = self.send_recv(update_seed = True, validate = False)
assert_equal(cip, None)
assert_equal(sip, None)
def dhcp_same_client_multi_discovers_scenario(self, subscriber):
if subscriber.has_service('DHCP'):
time.sleep(2)
log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
tl = self.subscriber_dhcp_same_client_multiple_discover()
self.test_status = True
return self.test_status
def subscriber_dhcp_same_client_multiple_discover(self, iface = INTF_RX_DEFAULT):
''' DHCP Client sending multiple discover . '''
config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
self.onos_dhcp_table_load(config)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, lval = self.dhcp.only_discover()
log.info('Got dhcp client IP %s from server %s for mac %s . Not going to send DHCPREQUEST.' %
(cip, sip, mac) )
log.info('Triggering DHCP discover again.')
new_cip, new_sip, new_mac , lval = self.dhcp.only_discover()
if cip == new_cip:
log.info('Got same ip for 2nd DHCP discover for client IP %s from server %s for mac %s. Triggering DHCP Request. '
% (new_cip, new_sip, new_mac) )
elif cip != new_cip:
log.info('Ip after 1st discover %s' %cip)
log.info('Map after 2nd discover %s' %new_cip)
assert_equal(cip, new_cip)
def dhcp_same_client_multi_request_scenario(self, subscriber):
if subscriber.has_service('DHCP'):
time.sleep(2)
log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
tl = self.subscriber_dhcp_same_client_multiple_request()
self.test_status = True
return self.test_status
def subscriber_dhcp_same_client_multiple_request(self, iface = INTF_RX_DEFAULT):
''' DHCP Client sending multiple repeat DHCP requests. '''
config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
self.onos_dhcp_table_load(config)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
log.info('Sending DHCP discover and DHCP request.')
cip, sip = self.send_recv()
mac = self.dhcp.get_mac(cip)[0]
log.info("Sending DHCP request again.")
new_cip, new_sip = self.dhcp.only_request(cip, mac)
if (new_cip,new_sip) == (cip,sip):
log.info('Got same ip for 2nd DHCP Request for client IP %s from server %s for mac %s.'
% (new_cip, new_sip, mac) )
elif (new_cip,new_sip):
log.info('No DHCP ACK')
assert_equal(new_cip, None)
assert_equal(new_sip, None)
else:
print "Something went wrong."
def dhcp_client_desired_ip_scenario(self, subscriber):
if subscriber.has_service('DHCP'):
time.sleep(2)
log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
tl = self.subscriber_dhcp_client_desired_address()
self.test_status = True
return self.test_status
def subscriber_dhcp_client_desired_address(self, iface = INTF_RX_DEFAULT):
'''DHCP Client asking for desired IP address.'''
config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
self.onos_dhcp_table_load(config)
self.dhcp = DHCPTest(seed_ip = '20.20.20.31', iface = iface)
cip, sip, mac , lval = self.dhcp.only_discover(desired = True)
log.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
if cip == self.dhcp.seed_ip:
log.info('Got dhcp client IP %s from server %s for mac %s as desired .' %
(cip, sip, mac) )
elif cip != self.dhcp.seed_ip:
log.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
log.info('The desired ip was: %s .' % self.dhcp.seed_ip)
assert_equal(cip, self.dhcp.seed_ip)
def dhcp_client_request_pkt_with_non_offered_ip_scenario(self, subscriber):
if subscriber.has_service('DHCP'):
time.sleep(2)
log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
tl = self.subscriber_dhcp_server_nak_packet()
self.test_status = True
return self.test_status
def subscriber_dhcp_server_nak_packet(self, iface = INTF_RX_DEFAULT):
config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
self.onos_dhcp_table_load(config)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, lval = self.dhcp.only_discover()
log.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
if (cip == None and mac != None):
log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
assert_not_equal(cip, None)
else:
new_cip, new_sip = self.dhcp.only_request('20.20.20.31', mac)
if new_cip == None:
log.info("Got DHCP server NAK.")
assert_equal(new_cip, None) #Negative Test Case
def dhcp_client_requested_out_pool_ip_scenario(self, subscriber):
if subscriber.has_service('DHCP'):
time.sleep(2)
log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
tl = self.subscriber_dhcp_client_desired_address_out_of_pool()
self.test_status = True
return self.test_status
def subscriber_dhcp_client_desired_address_out_of_pool(self, iface = INTF_RX_DEFAULT):
'''DHCP Client asking for desired IP address from out of pool.'''
config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
self.onos_dhcp_table_load(config)
self.dhcp = DHCPTest(seed_ip = '20.20.20.35', iface = iface)
cip, sip, mac, lval = self.dhcp.only_discover(desired = True)
log.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
if cip == self.dhcp.seed_ip:
log.info('Got dhcp client IP %s from server %s for mac %s as desired .' %
(cip, sip, mac) )
assert_equal(cip, self.dhcp.seed_ip) #Negative Test Case
elif cip != self.dhcp.seed_ip:
log.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
log.info('The desired ip was: %s .' % self.dhcp.seed_ip)
assert_not_equal(cip, self.dhcp.seed_ip)
elif cip == None:
log.info('Got DHCP NAK')
def dhcp_client_specific_lease_scenario(self, subscriber):
if subscriber.has_service('DHCP'):
time.sleep(2)
log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
tl = self.subscriber_dhcp_specific_lease_packet()
self.test_status = True
return self.test_status
def subscriber_dhcp_specific_lease_packet(self, iface = INTF_RX_DEFAULT):
''' Client sends DHCP Discover packet for particular lease time.'''
config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
self.onos_dhcp_table_load(config)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
log.info('Sending DHCP discover with lease time of 700')
cip, sip, mac, lval = self.dhcp.only_discover(lease_time = True)
log.info("Verifying Client 's IP and mac in DHCP Offer packet.")
if (cip == None and mac != None):
log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
assert_not_equal(cip, None)
elif lval != 700:
log.info('Getting dhcp client IP %s from server %s for mac %s with lease time %s. That is not 700.' %
(cip, sip, mac, lval) )
assert_not_equal(lval, 700)
def test_subscriber_join_recv_channel(self):
###"""Test subscriber join and receive"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels))
assert_equal(test_status, True)
def test_subscriber_join_jump_channel(self):
###"""Test subscriber join and receive for channel surfing"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
port_list = self.generate_port_list(num_subscribers, num_channels))
assert_equal(test_status, True)
def test_subscriber_join_next_channel(self):
###"""Test subscriber join next for channels"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
port_list = self.generate_port_list(num_subscribers, num_channels))
assert_equal(test_status, True)
#@deferred(SUBSCRIBER_TIMEOUT)
def test_subscriber_authentication_with_invalid_certificate_and_channel_surfing(self):
### """Test subscriber to auth with invalidCertification and join channel"""
num_subscribers = 1
num_channels = 1
df = defer.Deferred()
def sub_auth_invalid_cert(df):
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_invalid_cert, self.dhcp_verify, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'all')
assert_equal(test_status, False)
df.callback(0)
reactor.callLater(0, sub_auth_invalid_cert, df)
return df
#@deferred(SUBSCRIBER_TIMEOUT)
def test_subscriber_authentication_with_no_certificate_and_channel_surfing(self):
### """Test subscriber to auth with No Certification and join channel"""
num_subscribers = 1
num_channels = 1
df = defer.Deferred()
def sub_auth_no_cert(df):
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_no_cert, self.dhcp_verify, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, False)
df.callback(0)
reactor.callLater(0, sub_auth_no_cert, df)
return df
def test_subscriber_authentication_with_self_signed_certificate_and_channel_surfing(self):
### """Test subscriber to auth with Self Signed Certification and join channel"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_self_signed_cert, self.dhcp_verify, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_authentication_with_dhcp_discover_and_channel_surfing(self):
### """Test subscriber auth success, DHCP re-discover with DHCP server and join channel"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_discover_scenario, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_authentication_with_dhcp_client_reboot_scenario_and_channel_surfing(self):
### """Test subscriber auth success, DHCP client got re-booted and join channel"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_client_reboot_scenario, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_authentication_with_dhcp_server_reboot_scenario_and_channel_surfing(self):
### """Test subscriber auth , DHCP server re-boot during DHCP process and join channel"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_server_reboot_scenario, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_authentication_with_dhcp_client_rebind_and_channel_surfing(self):
### """Test subscriber auth , DHCP client rebind IP and join channel"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_client_rebind_scenario, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_authentication_with_dhcp_starvation_scenario_and_channel_surfing(self):
### """Test subscriber auth , DHCP starvation and join channel"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_starvation_scenario, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_authentication_with_multiple_dhcp_discover_for_same_subscriber_and_channel_surfing(self):
### """Test subscriber auth , sending same DHCP client discover multiple times and join channel"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_same_client_multi_discovers_scenario, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_authentication_with_multiple_dhcp_request_for_same_subscriber_and_channel_surfing(self):
### """Test subscriber auth , same DHCP client multiple requerts times and join channel"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_same_client_multi_request_scenario, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_authentication_with_dhcp_client_requested_ip_and_channel_surfing(self):
### """Test subscriber auth with DHCP client requesting ip and join channel"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_client_desired_ip_scenario, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_authentication_with_dhcp_non_offered_ip_and_channel_surfing(self):
### """Test subscriber auth with DHCP client request for non-offered ip and join channel"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_client_request_pkt_with_non_offered_ip_scenario, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_authentication_with_dhcp_request_out_of_pool_ip_by_client_and_channel_surfing(self):
### """Test subscriber auth with DHCP client requesting out of pool ip and join channel"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_client_requested_out_pool_ip_scenario, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_authentication_with_dhcp_specified_lease_time_functionality_and_channel_surfing(self):
### """Test subscriber auth with DHCP client specifying lease time and join channel"""
num_subscribers = 1
num_channels = 1
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_client_specific_lease_scenario, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_recv_100channels(self):
num_subscribers = 1
num_channels = 100
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_recv_400channels(self):
num_subscribers = 1
num_channels = 400
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_recv_800channels(self):
num_subscribers = 1
num_channels = 800
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_recv_1200channels(self):
num_subscribers = 1
num_channels = 1200
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_recv_1500channels(self):
num_subscribers = 1
num_channels = 1500
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_jump_100channels(self):
num_subscribers = 1
num_channels = 100
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_jump_400channels(self):
num_subscribers = 1
num_channels = 400
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_jump_800channels(self):
num_subscribers = 1
num_channels = 800
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_jump_1200channel(sself):
num_subscribers = 1
num_channels = 1200
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_jump_1500channels(self):
num_subscribers = 1
num_channels = 1500
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_next_100channels(self):
num_subscribers = 1
num_channels = 100
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_next_400channels(self):
num_subscribers = 1
num_channels = 400
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_next_800channels(self):
num_subscribers = 1
num_channels = 800
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_next_1200channels(self):
num_subscribers = 1
num_channels = 1200
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
def test_subscriber_join_next_1500channels(self):
num_subscribers = 1
num_channels = 1500
test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
port_list = self.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
| 1.476563 | 1 |
tests/test_playback.py | adamcik/mopidy-spotify | 0 | 12762078 | <gh_stars>0
import threading
from unittest import mock
import pytest
from mopidy import audio
from mopidy import backend as backend_api
from mopidy import models
import spotify
from mopidy_spotify import backend, playback
@pytest.fixture
def audio_mock():
audio_mock = mock.Mock(spec=audio.Audio)
return audio_mock
@pytest.yield_fixture
def audio_lib_mock():
patcher = mock.patch.object(playback, "audio", spec=audio)
yield patcher.start()
patcher.stop()
@pytest.fixture
def session_mock():
sp_session_mock = mock.Mock(spec=spotify.Session)
return sp_session_mock
@pytest.fixture
def backend_mock(config, session_mock):
backend_mock = mock.Mock(spec=backend.SpotifyBackend)
backend_mock._config = config
backend_mock._actor_proxy = None
backend_mock._session = session_mock
return backend_mock
@pytest.fixture
def provider(audio_mock, backend_mock):
return playback.SpotifyPlaybackProvider(
audio=audio_mock, backend=backend_mock
)
def test_is_a_playback_provider(provider):
assert isinstance(provider, backend_api.PlaybackProvider)
def test_connect_events_adds_music_delivery_handler_to_session(
session_mock, provider, audio_mock
):
playback_provider = provider
playback_provider._connect_events()
assert (
mock.call(
spotify.SessionEvent.MUSIC_DELIVERY,
playback.music_delivery_callback,
audio_mock,
playback_provider._seeking_event,
playback_provider._push_audio_data_event,
playback_provider._buffer_timestamp,
)
in session_mock.on.call_args_list
)
def test_connect_events_adds_end_of_track_handler_to_session(
session_mock, provider, audio_mock
):
playback_provider = provider
playback_provider._connect_events()
assert (
mock.call(
spotify.SessionEvent.END_OF_TRACK,
playback.end_of_track_callback,
playback_provider._end_of_track_event,
audio_mock,
)
in session_mock.on.call_args_list
)
def test_change_track_aborts_if_no_track_uri(provider):
track = models.Track()
assert provider.change_track(track) is False
def test_change_track_loads_and_plays_spotify_track(session_mock, provider):
uri = "spotify:track:test"
track = models.Track(uri=uri)
assert provider.change_track(track) is True
session_mock.get_track.assert_called_once_with(uri)
sp_track_mock = session_mock.get_track.return_value
sp_track_mock.load.assert_called_once_with(10)
session_mock.player.load.assert_called_once_with(sp_track_mock)
session_mock.player.play.assert_called_once_with()
def test_change_track_aborts_on_spotify_error(session_mock, provider):
track = models.Track(uri="spotfy:track:test")
session_mock.get_track.side_effect = spotify.Error
assert provider.change_track(track) is False
def test_change_track_sets_up_appsrc(audio_mock, provider):
track = models.Track(uri="spotfy:track:test")
assert provider.change_track(track) is True
assert provider._buffer_timestamp.get() == 0
assert audio_mock.prepare_change.call_count == 0
audio_mock.set_appsrc.assert_called_once_with(
playback.GST_CAPS,
need_data=mock.ANY,
enough_data=mock.ANY,
seek_data=mock.ANY,
)
assert audio_mock.start_playback.call_count == 0
audio_mock.set_metadata.assert_called_once_with(track)
def test_resume_starts_spotify_playback(session_mock, provider):
provider.resume()
session_mock.player.play.assert_called_once_with()
def test_stop_pauses_spotify_playback(session_mock, provider):
provider.stop()
session_mock.player.pause.assert_called_once_with()
def test_pause_pauses_spotify_playback(session_mock, provider):
provider.pause()
session_mock.player.pause.assert_called_once_with()
def test_on_seek_data_updates_timestamp_and_seeks_in_spotify(
session_mock, provider
):
provider.on_seek_data(1780)
assert provider._buffer_timestamp.get() == 1780000000
session_mock.player.seek.assert_called_once_with(1780)
def test_on_seek_data_ignores_first_seek_to_zero_on_every_play(
session_mock, provider
):
provider._seeking_event.set()
track = models.Track(uri="spotfy:track:test")
provider.change_track(track)
provider.on_seek_data(0)
assert not provider._seeking_event.is_set()
assert session_mock.player.seek.call_count == 0
def test_need_data_callback():
event = threading.Event()
assert not event.is_set()
playback.need_data_callback(event, 100)
assert event.is_set()
def test_enough_data_callback():
event = threading.Event()
event.set()
assert event.is_set()
playback.enough_data_callback(event)
assert not event.is_set()
def test_seek_data_callback():
seeking_event = threading.Event()
backend_mock = mock.Mock()
playback.seek_data_callback(seeking_event, backend_mock, 1340)
assert seeking_event.is_set()
backend_mock.playback.on_seek_data.assert_called_once_with(1340)
def test_music_delivery_rejects_data_when_seeking(session_mock, audio_mock):
audio_format = mock.Mock()
frames = b"123"
num_frames = 1
seeking_event = threading.Event()
seeking_event.set()
push_audio_data_event = threading.Event()
push_audio_data_event.set()
buffer_timestamp = mock.Mock()
assert seeking_event.is_set()
result = playback.music_delivery_callback(
session_mock,
audio_format,
frames,
num_frames,
audio_mock,
seeking_event,
push_audio_data_event,
buffer_timestamp,
)
assert seeking_event.is_set()
assert audio_mock.emit_data.call_count == 0
assert result == num_frames
def test_music_delivery_when_seeking_accepts_data_after_empty_delivery(
session_mock, audio_mock
):
audio_format = mock.Mock()
frames = b""
num_frames = 0
seeking_event = threading.Event()
seeking_event.set()
push_audio_data_event = threading.Event()
push_audio_data_event.set()
buffer_timestamp = mock.Mock()
assert seeking_event.is_set()
result = playback.music_delivery_callback(
session_mock,
audio_format,
frames,
num_frames,
audio_mock,
seeking_event,
push_audio_data_event,
buffer_timestamp,
)
assert not seeking_event.is_set()
assert audio_mock.emit_data.call_count == 0
assert result == num_frames
def test_music_delivery_rejects_data_depending_on_push_audio_data_event(
session_mock, audio_mock
):
audio_format = mock.Mock()
frames = b"123"
num_frames = 1
seeking_event = threading.Event()
push_audio_data_event = threading.Event()
buffer_timestamp = mock.Mock()
assert not push_audio_data_event.is_set()
result = playback.music_delivery_callback(
session_mock,
audio_format,
frames,
num_frames,
audio_mock,
seeking_event,
push_audio_data_event,
buffer_timestamp,
)
assert audio_mock.emit_data.call_count == 0
assert result == 0
def test_music_delivery_shortcuts_if_no_data_in_frames(
session_mock, audio_lib_mock, audio_mock
):
audio_format = mock.Mock(channels=2, sample_rate=44100, sample_type=0)
frames = b""
num_frames = 1
seeking_event = threading.Event()
push_audio_data_event = threading.Event()
push_audio_data_event.set()
buffer_timestamp = mock.Mock()
result = playback.music_delivery_callback(
session_mock,
audio_format,
frames,
num_frames,
audio_mock,
seeking_event,
push_audio_data_event,
buffer_timestamp,
)
assert result == 0
assert audio_lib_mock.create_buffer.call_count == 0
assert audio_mock.emit_data.call_count == 0
def test_music_delivery_rejects_unknown_audio_formats(session_mock, audio_mock):
audio_format = mock.Mock(sample_type=17)
frames = b"123"
num_frames = 1
seeking_event = threading.Event()
push_audio_data_event = threading.Event()
push_audio_data_event.set()
buffer_timestamp = mock.Mock()
with pytest.raises(AssertionError) as excinfo:
playback.music_delivery_callback(
session_mock,
audio_format,
frames,
num_frames,
audio_mock,
seeking_event,
push_audio_data_event,
buffer_timestamp,
)
assert "Expects 16-bit signed integer samples" in str(excinfo.value)
def test_music_delivery_creates_gstreamer_buffer_and_gives_it_to_audio(
session_mock, audio_mock, audio_lib_mock
):
audio_lib_mock.calculate_duration.return_value = mock.sentinel.duration
audio_lib_mock.create_buffer.return_value = mock.sentinel.gst_buffer
audio_format = mock.Mock(channels=2, sample_rate=44100, sample_type=0)
frames = b"\x00\x00"
num_frames = 1
seeking_event = threading.Event()
push_audio_data_event = threading.Event()
push_audio_data_event.set()
buffer_timestamp = mock.Mock()
buffer_timestamp.get.return_value = mock.sentinel.timestamp
result = playback.music_delivery_callback(
session_mock,
audio_format,
frames,
num_frames,
audio_mock,
seeking_event,
push_audio_data_event,
buffer_timestamp,
)
audio_lib_mock.calculate_duration.assert_called_once_with(1, 44100)
audio_lib_mock.create_buffer.assert_called_once_with(
frames,
timestamp=mock.sentinel.timestamp,
duration=mock.sentinel.duration,
)
buffer_timestamp.increase.assert_called_once_with(mock.sentinel.duration)
audio_mock.emit_data.assert_called_once_with(mock.sentinel.gst_buffer)
assert result == num_frames
def test_music_delivery_consumes_zero_frames_if_audio_fails(
session_mock, audio_mock, audio_lib_mock
):
audio_mock.emit_data.return_value.get.return_value = False
audio_format = mock.Mock(channels=2, sample_rate=44100, sample_type=0)
frames = b"\x00\x00"
num_frames = 1
seeking_event = threading.Event()
push_audio_data_event = threading.Event()
push_audio_data_event.set()
buffer_timestamp = mock.Mock()
buffer_timestamp.get.return_value = mock.sentinel.timestamp
result = playback.music_delivery_callback(
session_mock,
audio_format,
frames,
num_frames,
audio_mock,
seeking_event,
push_audio_data_event,
buffer_timestamp,
)
assert buffer_timestamp.increase.call_count == 0
assert result == 0
def test_end_of_track_callback(session_mock, audio_mock):
end_of_track_event = threading.Event()
playback.end_of_track_callback(session_mock, end_of_track_event, audio_mock)
assert end_of_track_event.is_set()
audio_mock.emit_data.assert_called_once_with(None)
def test_duplicate_end_of_track_callback_is_ignored(session_mock, audio_mock):
end_of_track_event = threading.Event()
end_of_track_event.set()
playback.end_of_track_callback(session_mock, end_of_track_event, audio_mock)
assert end_of_track_event.is_set()
assert audio_mock.emit_data.call_count == 0
def test_buffer_timestamp_wrapper():
wrapper = playback.BufferTimestamp(0)
assert wrapper.get() == 0
wrapper.set(17)
assert wrapper.get() == 17
wrapper.increase(3)
assert wrapper.get() == 20
| 2.15625 | 2 |
gravity_toolkit/read_ICGEM_harmonics.py | yaramohajerani/read-GRACE-harmonics | 0 | 12762079 | <gh_stars>0
#!/usr/bin/env python
u"""
read_ICGEM_harmonics.py
Written by <NAME> (07/2020)
Read gfc files and extract gravity model spherical harmonics from the GFZ ICGEM
GFZ International Centre for Global Earth Models (ICGEM)
http://icgem.gfz-potsdam.de/
INPUTS:
model_file: GFZ ICGEM gfc spherical harmonic data file
OPTIONS:
FLAG: string denoting data lines (default gfc)
OUTPUTS:
clm: cosine spherical harmonics of input data
slm: sine spherical harmonics of input data
eclm: cosine spherical harmonic standard deviations of type errors
eslm: sine spherical harmonic standard deviations of type errors
modelname: name of the gravity model
earth_gravity_constant: GM constant of the Earth for the gravity model
radius: semi-major axis of the Earth for the gravity model
max_degree: maximum degree and order for the gravity model
errors: error type of the gravity model
norm: normalization of the spherical harmonics
tide_system: tide system of gravity model (mean_tide, zero_tide, tide_free)
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
UPDATE HISTORY:
Updated 07/2020: added function docstrings
Updated 07/2017: include parameters to change the tide system
Written 12/2015
"""
import os
import re
import numpy as np
#-- PURPOSE: read spherical harmonic coefficients of a gravity model
def read_ICGEM_harmonics(model_file, FLAG='gfc'):
"""
Extract gravity model spherical harmonics from GFZ ICGEM gfc files
Arguments
---------
model_file: GFZ ICGEM gfc spherical harmonic data file
Keyword arguments
-----------------
FLAG: string denoting data lines
Returns
-------
clm: cosine spherical harmonics of input data
slm: sine spherical harmonics of input data
eclm: cosine spherical harmonic standard deviations of type errors
eslm: sine spherical harmonic standard deviations of type errors
modelname: name of the gravity model
earth_gravity_constant: GM constant of the Earth for gravity model
radius: semi-major axis of the Earth for gravity model
max_degree: maximum degree and order for gravity model
errors: error type of the gravity model
norm: normalization of the spherical harmonics
tide_system: tide system of gravity model
"""
#-- read input data
with open(os.path.expanduser(model_file),'r') as f:
file_contents = f.read().splitlines()
#-- python dictionary with model input and headers
model_input = {}
#-- extract parameters from header
header_parameters = ['modelname','earth_gravity_constant','radius',
'max_degree','errors','norm','tide_system']
parameters_regex = '(' + '|'.join(header_parameters) + ')'
header = [l for l in file_contents if re.match(parameters_regex,l)]
for line in header:
#-- split the line into individual components
line_contents = line.split()
model_input[line_contents[0]] = line_contents[1]
#-- set maximum spherical harmonic order
LMAX = np.int(model_input['max_degree'])
#-- allocate for each Coefficient
model_input['clm'] = np.zeros((LMAX+1,LMAX+1))
model_input['slm'] = np.zeros((LMAX+1,LMAX+1))
model_input['eclm'] = np.zeros((LMAX+1,LMAX+1))
model_input['eslm'] = np.zeros((LMAX+1,LMAX+1))
#-- reduce file_contents to input data using data marker flag
input_data = [l for l in file_contents if re.match(FLAG,l)]
#-- for each line of data in the gravity file
for line in input_data:
#-- split the line into individual components replacing fortran d
line_contents = re.sub('d','e',line,flags=re.IGNORECASE).split()
#-- degree and order for the line
l1 = np.int(line_contents[1])
m1 = np.int(line_contents[2])
#-- read spherical harmonic coefficients
model_input['clm'][l1,m1] = np.float(line_contents[3])
model_input['slm'][l1,m1] = np.float(line_contents[4])
model_input['eclm'][l1,m1] = np.float(line_contents[5])
model_input['eslm'][l1,m1] = np.float(line_contents[6])
#-- return the spherical harmonics and parameters
return model_input
| 2.578125 | 3 |
test/test_data/recursive_test_extension/__init__.py | CuteFwan/dango.py | 30 | 12762080 | <gh_stars>10-100
from dango import dcog, Cog
from .cmds import SubModule # noqa pylint: disable=unused-import
@dcog()
class InModule(Cog):
def __init__(self, config):
pass
| 1.414063 | 1 |
utils/reader.py | GT-AcerZhang/PaddlePaddle-SSD | 47 | 12762081 | import math
import os
import xml.etree.ElementTree
import numpy as np
import paddle
import six
from PIL import Image
from utils import image_util
class Settings(object):
def __init__(self,
label_file_path=None,
resize_h=300,
resize_w=300,
mean_value=127.5,
std_value=0.007843,
apply_distort=True,
apply_expand=True,
ap_version='11point'):
self._ap_version = ap_version
self._label_list = []
if label_file_path is not None:
with open(label_file_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
self._label_list.append(line.strip().replace('\n', ''))
self._apply_distort = apply_distort
self._apply_expand = apply_expand
self._resize_height = resize_h
self._resize_width = resize_w
self._img_mean = mean_value
self._img_std = std_value
self._expand_prob = 0.5
self._expand_max_ratio = 4
self._hue_prob = 0.5
self._hue_delta = 18
self._contrast_prob = 0.5
self._contrast_delta = 0.5
self._saturation_prob = 0.5
self._saturation_delta = 0.5
self._brightness_prob = 0.5
self._brightness_delta = 0.125
@property
def ap_version(self):
return self._ap_version
@property
def apply_expand(self):
return self._apply_expand
@property
def apply_distort(self):
return self._apply_distort
@property
def label_list(self):
return self._label_list
@property
def resize_h(self):
return self._resize_height
@property
def resize_w(self):
return self._resize_width
@property
def img_mean(self):
return self._img_mean
@property
def img_std(self):
return self._img_std
def preprocess(img, bbox_labels, mode, settings):
img_width, img_height = img.size
sampled_labels = bbox_labels
if mode == 'train':
if settings._apply_distort:
img = image_util.distort_image(img, settings)
if settings._apply_expand:
img, bbox_labels, img_width, img_height = image_util.expand_image(
img, bbox_labels, img_width, img_height, settings)
# sampling, hard-code here
batch_sampler = [image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0),
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0),
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0),
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0),
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0),
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0),
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0)]
sampled_bbox = image_util.generate_batch_samples(batch_sampler, bbox_labels)
img = np.array(img)
if len(sampled_bbox) > 0:
idx = int(np.random.uniform(0, len(sampled_bbox)))
img, sampled_labels = image_util.crop_image(img, bbox_labels, sampled_bbox[idx], img_width, img_height)
img = Image.fromarray(img)
img = img.resize((settings.resize_w, settings.resize_h), Image.ANTIALIAS)
img = np.array(img)
if mode == 'train':
mirror = int(np.random.uniform(0, 2))
if mirror == 1:
img = img[:, ::-1, :]
for i in range(len(sampled_labels)):
tmp = sampled_labels[i][1]
sampled_labels[i][1] = 1 - sampled_labels[i][3]
sampled_labels[i][3] = 1 - tmp
# HWC to CHW
if len(img.shape) == 3:
img = np.swapaxes(img, 1, 2)
img = np.swapaxes(img, 1, 0)
img = img.astype('float32')
img -= settings.img_mean
img = img * settings.img_std
return img, sampled_labels
def pascalvoc(settings, file_list, mode, batch_size, shuffle):
def reader():
if mode == 'train' and shuffle:
np.random.shuffle(file_list)
batch_out = []
cnt = 0
for image in file_list:
image_path, label_path = image.split('\t')
if not os.path.exists(image_path):
raise ValueError("%s is not exist, you should specify data path correctly." % image_path)
im = Image.open(image_path)
if im.mode == 'L':
im = im.convert('RGB')
im_width, im_height = im.size
# layout: label | xmin | ymin | xmax | ymax | difficult
bbox_labels = []
root = xml.etree.ElementTree.parse(label_path).getroot()
for object in root.findall('object'):
# start from 1
bbox_sample = [float(settings.label_list.index(object.find('name').text))]
bbox = object.find('bndbox')
difficult = float(object.find('difficult').text)
bbox_sample.append(float(bbox.find('xmin').text) / im_width)
bbox_sample.append(float(bbox.find('ymin').text) / im_height)
bbox_sample.append(float(bbox.find('xmax').text) / im_width)
bbox_sample.append(float(bbox.find('ymax').text) / im_height)
bbox_sample.append(difficult)
bbox_labels.append(bbox_sample)
im, sample_labels = preprocess(im, bbox_labels, mode, settings)
sample_labels = np.array(sample_labels)
if len(sample_labels) == 0: continue
im = im.astype('float32')
boxes = sample_labels[:, 1:5]
lbls = sample_labels[:, 0].astype('int32')
difficults = sample_labels[:, -1].astype('int32')
batch_out.append((im, boxes, lbls, difficults))
if len(batch_out) == batch_size:
yield batch_out
cnt += len(batch_out)
batch_out = []
if mode == 'test' and len(batch_out) > 1:
yield batch_out
cnt += len(batch_out)
return reader
def train(settings, file_list_path, batch_size, shuffle=True, use_multiprocess=True, num_workers=4):
readers = []
images = [line.strip() for line in open(file_list_path)]
np.random.shuffle(images)
n = int(math.ceil(len(images) // num_workers)) if use_multiprocess else len(images)
image_lists = [images[i:i + n] for i in range(0, len(images), n)]
for l in image_lists:
readers.append(pascalvoc(settings, l, 'train', batch_size, shuffle))
if use_multiprocess:
return paddle.reader.multiprocess_reader(readers, False)
else:
return readers[0]
def test(settings, file_list_path, batch_size):
image_list = [line.strip() for line in open(file_list_path)]
return pascalvoc(settings, image_list, 'test', batch_size, False)
| 2.5 | 2 |
_sadm/web/view/__init__.py | jrmsdev/pysadm | 1 | 12762082 | # Copyright (c) <NAME> <<EMAIL>>
# See LICENSE file.
| 0.933594 | 1 |
spectral/io/__init__.py | wwlswj/spectral | 398 | 12762083 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function, unicode_literals
from .spyfile import SpyFile
from ..io import aviris
from ..io import erdas
from ..io import envi
| 1.125 | 1 |
tests/LED_test.py | reneaaron/LightningATM | 0 | 12762084 | #!/usr/bin/python3
# LED Test
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
LED = 13
GPIO.setup(LED, GPIO.OUT)
try:
print("LED is now flashing..")
print("Exit with CTRL+C")
while True:
GPIO.output(LED,1)
time.sleep(0.5)
GPIO.output(LED,0)
time.sleep(0.5)
except KeyboardInterrupt:
GPIO.cleanup()
print(" Bye Bye")
| 3.328125 | 3 |
main.py | Chankyu-Lee/KpopStar-Data | 0 | 12762085 | <reponame>Chankyu-Lee/KpopStar-Data
#!/usr/bin/env python3
import sys
import yaml
import os
import pathlib
from utils import convert_num, display_num, download_image
from tweet import twitter_post, twitter_post_image, twitter_repost, set_test_mode
from birthdays import check_birthdays
from instagram import instagram_data
from youtube import youtube_data
from spotify import spotify_data
from billboard_charts import billboard_data
def load_group():
"""YAML 파일 data.yaml 을 읽어옵니다.
group 에 대한 데이터는 스크립트와 동일한 디렉터리의 data.yaml 파일에 저장됩니다.
반환값:
group 에 대한 모든 정보가 포함된 dictionary
"""
print("YAML 파일에서 데이터 불러오는 중...")
with open('data.yaml', encoding="utf-8") as file:
group = yaml.load(file, Loader=yaml.FullLoader)
out = "{} 멤버 목록 : ".format(group["name"])
for artist in group["members"]:
out += artist["name"]
out += " "
print(out + "\n")
return group
def write_group(group):
"""YAML 파일 data.yaml 에 씁니다.
group 에 대한 데이터는 스크립트와 동일한 디렉터리의 data.yaml 파일에 저장됩니다.
전달인자:
group: group 에 대한 모든 정보가 포함된 dictionary
"""
print("데이터를 YAML 파일에 저장하는 중...")
with open('data.yaml', 'w', encoding="utf-8") as file:
yaml.dump(group, file, sort_keys=False, allow_unicode=True)
def createYAML():
"""YAML 파일 data.yaml 을 생성하고 초기 데이터를 입력 받아 저장합니다.
data.yaml 파일이 생성되는 위치는 스크립트와 동일한 디렉터리 입니다.
"""
name = input("그룹의 이름을 입력하세요: ")
hashtags = input("해쉬태그를 입력하세요 (트윗에 추가됩니다): ")
spotify_id = input(name + "의 spotify의 ID를 입력하세요: ")
twitter_url = input(name + "의 트위터 계정 닉네임(@)을 입력하세요: ")
instagram_url = input(name + "의 인스타그램 계정 url을 입력하세요: ")
youtube_name = input(name + "의 youtube의 계정 이름을 입력하세요 (비워둘 수 있습니다): ")
youtube_url = input(name + "의 youtube의 고유 ID를 입력하세요 (채널의 URL에서 찾을 수 있습니다): ")
member = []
check = 'Y'
while check == 'Y':
print("멤버들의 데이터를 입력받습니다.")
member_name = input("멤버의 이름을 입력하세요: ")
member_years = int(input(member_name + "의 나이를 입력하세요: "))
member_birthday = input(member_name + "의 생일을 다음 양식으로 작성하세요 (DD/MM/YYYY): ")
member_hashtags = input("해쉬태그를 입력하세요 (트윗에 추가됩니다): ")
member_instagram = input(member_name + "의 인스타그램 계정 url을 입력하세요: ")
check = input(member_name + "의 유튜브 계정을 추가하시겠습니까? (Y/N): ")
if check == 'Y':
member_youtube_name = input(member_name + "의 youtube의 계정 이름을 입력하세요 (비워둘 수 있습니다): ")
member_youtube_url = input(member_name + "의 youtube의 고유 ID를 입력하세요 (채널의 URL에서 찾을 수 있습니다): ")
else:
member_youtube_url = None;
check = input(member_name + "의 스포티파이 계정을 추가하시겠습니까? (Y/N): ")
if check == 'Y':
member_spotify_id = input(member_name + "의 spotify의 ID를 입력하세요: ")
else:
member_spotify_id = None;
member_dic = {
"name" : member_name,
"years" : member_years,
"birthday" : member_birthday,
"hashtags" : member_hashtags,
"instagram" : {
'url' : member_instagram
},
}
if member_youtube_url is not None:
member_dic["youtube"] = {
'name' : member_youtube_name,
'url' : member_youtube_url,
'views_scale' : "B",
'videos_scale' : "B",
'subs' : '0',
'total_views' : '0',
'videos' : None
}
if member_spotify_id is not None:
member_dic["spotify"] = {
'id' : member_spotify_id,
'followers' : 0
}
member.append(member_dic)
check = input("멤버를 추가하시겠습니까? (Y/N): ")
dic = {
'name' : name,
'hashtags' : hashtags,
'spotify' : {
'id' : spotify_id,
'followers' : 0
},
'twitter' : {
'url' : twitter_url
},
'instagram' : {
'url' : instagram_url
},
'youtube' : {
'name' : youtube_name,
'url' : youtube_url,
'views_scale' : "B",
'videos_scale' : "B",
'subs' : '0',
'total_views' : '0',
'videos' : None
},
'members' : member
}
with open('data.yaml', 'w', encoding="utf-8") as file:
yaml.dump(dic, file, sort_keys=False, allow_unicode=True)
print("data.yaml 파일이 생성되었습니다.")
print("data.yaml 파일에 데이터를 불러오는 작업이 실행됩니다.")
print("test모드(tweet이 게시되지 않음)가 권장됩니다.")
def check_args():
"""명령줄에서 전달된 인자를 확인합니다.
하나 이상의 파라미터를 전달하여 단일 모듈 source 를 비활성화할 수 있습니다.
허용되는 실제 매개 변수는 다음과 같습니다.
* `-no-instagram`: 인스타그램 source 를 비활성화합니다.
* `-no-youtube`: 유튜브 source 를 비활성화합니다.
* `-no-spotify`: Spotify source 를 비활성화합니다.
* `-no-birthday`: 생일 이벤트 source 를 비활성화합니다.
* `-no-twitter`: 트위터 source 를 비활성화합니다. (reposting 시에 사용)
* `-no-tweet` 은 실제로 활성화된 소스의 업데이트를 봇이 트윗하는 것을 방지합니다. 출력은 여전히 콘솔에서 볼 수 있습니다. 이것은 **테스트**에 매우 유용합니다.
`-no-twitter`는 `-no-tweet`과 다르다는 것을 기억하세요
반환값:
모든 소스와 해당 상태를 포함하는 dictionary, write의 활성화 여부(True 또는 False)
"""
source = {"instagram": True, "youtube": True, "spotify": True, "birthday": True, "twitter": True, "billboard": True}
write = True
if len(sys.argv) > 1:
for arg in sys.argv:
if arg == "-no-tweet":
print("-no-tweet 매개 변수가 전달되었습니다!\nTest 모드가 활성화됨: 봇이 아무 것도 트윗하지 않습니다.")
set_test_mode()
if arg == "-no-instagram":
print("-no-instagram 매개 변수가 전달되었습니다!")
source["instagram"] = False
if arg == "-no-spotify":
print("-no-spotify 매개 변수가 전달되었습니다!")
source["spotify"] = False
if arg == "-no-youtube":
print("-no-youtube 매개 변수가 전달되었습니다!")
source["youtube"] = False
if arg == "-no-birthday":
print("-no-birthday 매개 변수가 전달되었습니다!")
source["birthday"] = False
if arg == "-no-billboard":
print("-no-billboard 매개 변수가 전달되었습니다!")
source["billboard"] = False
if arg == "-no-twitter":
print("-no-twitter 매개 변수가 전달되었습니다!")
source["twitter"] = False
if arg == "-no-write":
print("-no-write 매개 변수가 전달되었습니다!")
write = False
print()
return source, write
def set_args(source):
"""명령 줄 에서 입력받는 전달인자를 프로그램 내에서 설정합니다.
상세 내용은 check_args() 메소드를 참고 하세요.
전달인자:
- source: 모든 소스와 해당 상태를 포함하는 dictionary
"""
check = -1
while check != '0' :
print("비활성화할 모듈 source를 설정합니다.")
print("선택할 모듈에 해당하는 값을 입력하세요.")
print("인스타그램 : 1 \n유튜브 : 2 \n스포티파이 : 3 \n생일 : 4 \n트위터 : 5 \n빌보드 : 6 \n테스트(트윗 방지)모드 : 7 \nyaml 파일 쓰기 방지 모드 : 8 \n설정 종료 : 0")
check = input()
if check == '7':
print("-no-tweet 매개 변수가 전달되었습니다!\nTest 모드가 활성화됨: 봇이 아무 것도 트윗하지 않습니다.")
set_test_mode()
if check == '1':
print("-no-instagram 매개 변수가 전달되었습니다!")
source["instagram"] = False
if check == '3':
print("-no-spotify 매개 변수가 전달되었습니다!")
source["spotify"] = False
if check == '2':
print("-no-youtube 매개 변수가 전달되었습니다!")
source["youtube"] = False
if check == '4':
print("-no-birthday 매개 변수가 전달되었습니다!")
source["birthday"] = False
if check == '6':
print("-no-billboard 매개 변수가 전달되었습니다!")
source["billboard"] = False
if check == '5':
print("-no-twitter 매개 변수가 전달되었습니다!")
source["twitter"] = False
if check == '8':
print("-no-write 매개 변수가 전달되었습니다!")
write = False
if check == '0':
print("설정을 종료합니다.")
if __name__ == '__main__':
source, write = check_args()
exists_file = pathlib.Path('data.yaml').exists()
if exists_file is False:
print("data.yaml 파일이 존재하지 않습니다.")
answer = input("data.yaml 파일을 새로 생성하시겠습니까? (Y/N) : ")
if (answer == 'Y'):
createYAML()
else:
answer = input("data.yaml 파일을 새로 생성하시겠습니까? (Y/N) : ")
if (answer == 'Y'):
createYAML()
answer = input("비활성화할 모듈을 설정하시겠습니까? (Y/N) : ")
if (answer == 'Y'):
set_args(source)
group = load_group()
if source["birthday"]:
group = check_birthdays(group)
if source["youtube"]:
group = youtube_data(group)
if source["twitter"]:
group = twitter_repost(group)
if source["instagram"]:
group = instagram_data(group)
if source["spotify"]:
group = spotify_data(group)
if source["billboard"]:
group = billboard_data(group)
if write:
write_group(group)
| 2.734375 | 3 |
Script/Commands/Messages/Useful/server_info.py | AIDRI/Clash-Of-Clans-Discord-Bot | 0 | 12762086 | from Script.import_emojis import Emojis
from Script.import_functions import create_embed, int_to_str
async def server_info(ctx):
nb_humans = 0
for members in ctx.guild.members:
if members.bot == 0:
nb_humans += 1
nb_bots = 0
for members in ctx.guild.members:
if members.bot == 1:
nb_bots += 1
emojis = ""
count = 0
for emoji in ctx.guild.emojis:
if count > 10:
emojis += "..."
break
emojis += f"{emoji} "
count += 1
admins = ""
count = 0
for member in ctx.guild.members:
if count > 10:
admins += "..."
break
if member.guild_permissions.administrator:
admins += f"{member.mention} "
count += 1
embed = create_embed(ctx.guild.name, f"{Emojis['Owner']} Owner : {ctx.guild.owner.mention}\n{Emojis['Calendar']} Created at : {ctx.guild.created_at.date().isoformat()}\n{Emojis['Members']} Humans : {int_to_str(nb_humans)}\n{Emojis['Bot']} Bots : {int_to_str(nb_bots)}\n{Emojis['Pin']} Region : {ctx.guild.region}\n{Emojis['Boost']} Boost level : {ctx.guild.premium_tier}/3\n{Emojis['Boost']} Boost number : {ctx.guild.premium_subscription_count}\n{Emojis['Emoji_ghost']} emojis : {emojis}\nAdministrators : {admins}", ctx.guild.me.color, "", ctx.guild.icon_url)
embed.set_thumbnail(url=ctx.guild.icon_url)
await ctx.send(embed=embed)
return
| 2.4375 | 2 |
ppysal/release/adjacentpolygon.py | ElsevierSoftwareX/SOFTX_2018_242 | 22 | 12762087 | import collections
import itertools
import sys
import numpy as np
import pysal as ps
from mpi4py import MPI
import time
from globalsort import globalsort
if __name__ == '__main__':
t1 = time.time()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
#Phase I: Compute Hi bounds and sort the points - this get the points local to the cores as well
if rank == 0:
fname = sys.argv[1]
print "Using {} cores for {} polygons".format(comm.size, fname.split('.')[0])
t2 = time.time()
shpfileobj = ps.open(fname)
geoms = []
x = []
y = []
for i, poly in enumerate(shpfileobj):
for j in poly.vertices[:-1]:
geoms.append(i)
x.append(j[0])
y.append(j[1])
nvertices = len(x)
pts = np.empty((nvertices, 3))
pts[:,0] = x
pts[:,1] = y
pts[:,2] = geoms
t3 = time.time()
print "File I/O required {} seconds".format(t3 - t2)
else:
nvertices = None
npivots = int(sys.argv[2])
nvertices = comm.bcast(nvertices)
shape = (nvertices, 3)
comm.Barrier()
if rank == 0:
local_hi = globalsort(comm, rank, shape, pts=pts,
axis='y', samplesize = npivots)
else:
local_hi = globalsort(comm, rank, shape, pts=None,
axis='y', samplesize = npivots)
'''
for i in range(comm.size):
if rank == i:
print i, local_hi[local_hi[:,0].argsort()].shape
sys.exit()
'''
comm.Barrier()
if rank == 0:
t4 = time.time()
print "Global sort took {} seconds".format(t4 - t3)
local_hi = local_hi[np.lexsort((local_hi[:,0], local_hi[:,1]))]
'''
for i in range(comm.size):
if i == rank:
print len(local_hi)
'''
#if rank == 0:
#a = local_hi
#ua, uind = np.unique(np.ascontiguousarray(a[:,:2]).view(np.dtype((np.void,a[:,:2].dtype.itemsize * a[:,:2].shape[1]))),return_inverse=True)
#for i in range(np.max(uind) + 1):
#print local_hi
coincident = []
seed = local_hi[0][:2]
clist = set([])
for i in local_hi:
if np.array_equal(i[:2], seed):
clist.add(i[2])
else:
coincident.append(clist)
clist = set([i[2]])
seed = i[:2]
coincident.append(clist) #Have to get the final iteration
neighbors = collections.defaultdict(set)
for n in coincident:
for c in n:
neighbors[c] = neighbors[c].union(n)
comm.Barrier()
'''
for i in range(comm.size):
if i == rank:
print i, neighbors
comm.Barrier() # Just to get prints to be pretty
'''
if rank == 0:
t5 = time.time()
print "Computing local coincident points took {} seconds".format(t5 - t4)
neighbors_list = comm.gather(neighbors, root=0)
if rank == 0:
neighbors = neighbors_list[0]
for n in neighbors_list[1:]:
for k, v in n.iteritems():
try:
neighbors[k] = neighbors[k].union(v)
except KeyError:
neighbors[k] = v
for k, v in neighbors.iteritems():
v.remove(k)
t6 = time.time()
print "Collecting and parsing neighbors took {} seconds".format(t6 - t5)
t6a = time.time()
w_mpi = ps.W(neighbors)
t7 = time.time()
print "Generating the PySAL W Object took {} seconds".format(t7-t6a)
print "Total runtime was {} seconds".format(t7 - t1)
'''
t8 = time.time()
w_ps = ps.queen_from_shapefile(sys.argv[1])
t9 = time.time()
print "Serial W generation took {} seconds".format(t9 - t8)
for k, v in w_mpi.neighbors.iteritems():
#print k, sorted(v), sorted(w_ps.neighbors[k])
assert(sorted(v) == sorted(w_ps.neighbors[k]))
t10 = time.time()
print "Assertion that PySAL matches pPySAL took {} seconds".format(t10 - t9)
'''
| 1.90625 | 2 |
server/edd/describe/exceptions/__init__.py | trussworks/edd | 13 | 12762088 | """ Module contains exceptions for edd.describe """
from .core import (
DescribeError,
DescribeWarning,
InvalidDescribeRequest,
MessagingMixin,
ReportableDescribeError,
ReportableDescribeWarning,
ReportingLimitWarning,
)
from .resolve import CommunicationError, ResolveError
__all__ = [
"CommunicationError",
"DescribeError",
"DescribeWarning",
"InvalidDescribeRequest",
"MessagingMixin",
"ReportableDescribeError",
"ReportableDescribeWarning",
"ReportingLimitWarning",
"ResolveError",
]
| 1.382813 | 1 |
geoportal/c2cgeoportal_geoportal/views/ogcproxy.py | rbovard/c2cgeoportal | 43 | 12762089 | # Copyright (c) 2011-2021, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import logging
from typing import Dict, Optional, Set, cast
import pyramid.request
from pyramid.httpexceptions import HTTPBadRequest
from sqlalchemy.orm.exc import NoResultFound
from c2cgeoportal_commons.lib.url import Url, get_url2
from c2cgeoportal_commons.models import DBSession, main
from c2cgeoportal_geoportal.lib.caching import get_region
from c2cgeoportal_geoportal.views.proxy import Proxy
CACHE_REGION = get_region("std")
LOG = logging.getLogger(__name__)
class OGCProxy(Proxy):
"""
Proxy implementation that manly manage the ogcserver parameter.
Then load the corresponding OGCServer.
"""
def __init__(self, request: pyramid.request.Request, has_default_ogc_server: bool = False):
Proxy.__init__(self, request)
# params hold the parameters we"re going to send to backend
self.params = dict(self.request.params)
# reset possible value of role_id and user_id
if "role_id" in self.params:
del self.params["role_id"]
if "user_id" in self.params:
del self.params["user_id"]
self.lower_params = self._get_lower_params(self.params)
if not has_default_ogc_server and "ogcserver" not in self.params:
raise HTTPBadRequest("The querystring argument 'ogcserver' is required")
if "ogcserver" in self.params:
self.ogc_server = self._get_ogcserver_byname(self.params["ogcserver"])
@CACHE_REGION.cache_on_arguments() # type: ignore
def _get_ogcserver_byname(self, name: str) -> main.OGCServer: # pylint: disable=no-self-use
try:
result = DBSession.query(main.OGCServer).filter(main.OGCServer.name == name).one()
DBSession.expunge(result)
return cast(main.OGCServer, result)
except NoResultFound:
raise HTTPBadRequest( # pylint: disable=raise-missing-from
f"The OGC Server '{name}' does not exist (existing: "
f"{','.join([t[0] for t in DBSession.query(main.OGCServer.name).all()])})."
)
def _get_wms_url(self, errors: Set[str]) -> Optional[Url]:
ogc_server = self.ogc_server
url = get_url2(f"The OGC server '{ogc_server.name}'", ogc_server.url, self.request, errors)
if errors:
LOG.error("\n".join(errors))
return url
def _get_wfs_url(self, errors: Set[str]) -> Optional[Url]:
ogc_server = self.ogc_server
url = get_url2(
f"The OGC server (WFS) '{ogc_server.name}'",
ogc_server.url_wfs or ogc_server.url,
self.request,
errors,
)
if errors:
LOG.error("\n".join(errors))
return url
def get_headers(self) -> Dict[str, str]:
headers: Dict[str, str] = super().get_headers()
if self.ogc_server.type == main.OGCSERVER_TYPE_QGISSERVER:
headers["X-Qgis-Service-Url"] = self.request.current_route_url(
_query={"ogcserver": self.ogc_server.name}
)
return headers
| 1.25 | 1 |
napari/utils/_tests/test_notebook_display.py | quantumjot/napari | 0 | 12762090 | import numpy as np
from napari.utils import nbscreenshot
def test_nbscreenshot(viewer_factory):
"""Test taking a screenshot."""
view, viewer = viewer_factory()
np.random.seed(0)
data = np.random.random((10, 15))
viewer.add_image(data)
rich_display_object = nbscreenshot(viewer)
assert hasattr(rich_display_object, '_repr_png_')
# Trigger method that would run in jupyter notebook cell automatically
rich_display_object._repr_png_()
assert rich_display_object.image is not None
| 2.625 | 3 |
RGUtil/RGRequestHelp.py | RengeRenge/renge-website | 0 | 12762091 | import base64
import random
import time
from RGUtil.RGCodeUtil import RGResCode
def get_data_with_request(_request):
if _request.is_json:
return _request.json
return _request.values
# if _request.method == "POST":
# return _request.form
# elif _request.json:
# return _request.json
# return _request.args
def request_value(_request, key, default=None):
args = get_data_with_request(_request)
if key in args:
return args[key]
else:
return default
def request_ip(_request, default=None):
headers = _request.headers
if 'X-Real-Ip' in headers:
return headers['X-Real-Ip']
else:
return default
def form_res(code, data=None):
if code == 0:
code = RGResCode.not_existed if data is None else RGResCode.ok
if data is not None:
if not isinstance(data, dict) and not isinstance(data, list):
data = data.__dict__
res = {
'code': int(code),
'data': data
}
return res
else:
res = {
'code': int(code),
}
return res
def is_int_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def request_file_size(request):
re_files = request.files
size = 0
for file_key in re_files:
file = re_files[file_key]
file.seek(0)
size += len(file.read())
file.seek(0)
return size
def request_file_mine(request):
re_files = request.files
size = 0
for file_key in re_files:
file = re_files[file_key]
file.seek(0)
size += len(file.read())
file.seek(0)
return size
baseList = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghjklmnopqrstuvwxyz'
def encode(n, b=58):
"""
:param n: 压缩的数字
:param b: 进制 最大58
:return: 对应进制字符串
"""
result = ''
x = int(n)
while True:
x, y = divmod(x, b)
result = baseList[y] + result
if x <= 0:
break
return result
def decode(n, b=58):
"""
:param n: 数字压缩后的字符串
:param b: 对应的进制 最大58
:return: 原来的数字
"""
result = 0
length = len(n)
for index in range(length):
result += (baseList.index(n[index]) * pow(b, length - index - 1))
return result
def did_encode(dir_id, uid):
dir_id = int(dir_id) + 10
dir_id = str(dir_id)
count = 0
# if len(dir_id) < 8:
# count = 8 - len(dir_id)
# dir_id = ''.join(['0', '0', '0', '0', '0', '0', '0', '0'][:count]) + dir_id
count = str(count)
uid = encode(uid)
dir_id = encode(dir_id)
content = '{}{}.{}.{}'.format(dir_id, uid, count, len(str(uid)))
return safe_encode_b64(content, random_index=0)
def did_decode(content):
content = safe_decode_b64(content)
contents = content.split(sep='.')
uid_count = int(contents[-1])
count = int(contents[-2])
content = contents[0]
uid = content[-uid_count:]
dir_id = content[:-uid_count]
dir_id = dir_id[count:]
return int(decode(dir_id)) - 10, int(decode(uid))
def fid_encode(f_id, uid):
f_id = int(f_id) + 10
time_str = str((time.time_ns()//1000) % 10000000)
f_id = '{}{}'.format(f_id, time_str)
f_id = encode(int(f_id))
return safe_encode_b64('{}.{}.{}'.format(f_id, uid, len(time_str)))
def fid_decode(content):
content = safe_decode_b64(content)
contents = content.split(sep='.')
length = int(contents[-1])
uid = contents[-2]
f_id = str(decode(contents[0]))
f_id = f_id[0:-length]
return int(f_id) - 10, uid
def safe_encode_b64(content, random_index=None):
content = base64.urlsafe_b64encode(content.encode("utf-8"))
content = str(content, "utf-8")
del_count = 0
for i in range(len(content) - 1, -1, -1):
if content[i] == '=':
del_count += 1
content = content[:-1]
else:
break
if random_index is None:
index = random.randint(0, len(content) - 1)
else:
index = 0
return encode(index) + content[:index] + encode(del_count) + content[index:]
def safe_decode_b64(content):
index = decode(content[0])
content = content[1:]
del_count = decode(content[index])
content = content[:index] + content[index+1:]
for i in range(del_count):
content += '='
return str(base64.urlsafe_b64decode(content.encode("utf-8")), "utf-8")
def bytes_to_hex_string(bytes):
result = ''
for byte in bytes:
result += '%02X' % byte
return result
def hex_string_to_bytes(hex_string):
byte_array = bytearray()
for index in range(len(hex_string) // 2):
temp = hex_string[2 * index:2 * index + 2]
temp = bytes(temp, encoding='utf-8')
temp = int(temp, base=16)
byte_array.append(temp)
return byte_array
if __name__ == '__main__':
code = encode(1000, 58)
print('#58', code)
print('#10', decode(code, 58))
dir_id = 122134
uid = 9812312332
print('did', dir_id, 'user_id', uid)
code = did_encode(dir_id=dir_id, uid=uid)
print('did_encode', code)
did, user_id = did_decode(code)
print('did', did, 'user_id', user_id)
token = fid_encode(f_id=dir_id, uid=uid)
print('fid_encode', token)
did, user_id = fid_decode(token)
print('did', did, 'user_id', user_id)
| 2.359375 | 2 |
tests/test_provider_hashicorp_aws.py | mjuenema/python-terrascript | 507 | 12762092 | <reponame>mjuenema/python-terrascript
# tests/test_provider_hashicorp_aws.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:12:25 UTC)
def test_provider_import():
import terrascript.provider.hashicorp.aws
def test_resource_import():
from terrascript.resource.hashicorp.aws import aws_accessanalyzer_analyzer
from terrascript.resource.hashicorp.aws import aws_acm_certificate
from terrascript.resource.hashicorp.aws import aws_acm_certificate_validation
from terrascript.resource.hashicorp.aws import aws_acmpca_certificate
from terrascript.resource.hashicorp.aws import aws_acmpca_certificate_authority
from terrascript.resource.hashicorp.aws import (
aws_acmpca_certificate_authority_certificate,
)
from terrascript.resource.hashicorp.aws import aws_alb
from terrascript.resource.hashicorp.aws import aws_alb_listener
from terrascript.resource.hashicorp.aws import aws_alb_listener_certificate
from terrascript.resource.hashicorp.aws import aws_alb_listener_rule
from terrascript.resource.hashicorp.aws import aws_alb_target_group
from terrascript.resource.hashicorp.aws import aws_alb_target_group_attachment
from terrascript.resource.hashicorp.aws import aws_ami
from terrascript.resource.hashicorp.aws import aws_ami_copy
from terrascript.resource.hashicorp.aws import aws_ami_from_instance
from terrascript.resource.hashicorp.aws import aws_ami_launch_permission
from terrascript.resource.hashicorp.aws import aws_amplify_app
from terrascript.resource.hashicorp.aws import aws_amplify_backend_environment
from terrascript.resource.hashicorp.aws import aws_amplify_branch
from terrascript.resource.hashicorp.aws import aws_amplify_domain_association
from terrascript.resource.hashicorp.aws import aws_amplify_webhook
from terrascript.resource.hashicorp.aws import aws_api_gateway_account
from terrascript.resource.hashicorp.aws import aws_api_gateway_api_key
from terrascript.resource.hashicorp.aws import aws_api_gateway_authorizer
from terrascript.resource.hashicorp.aws import aws_api_gateway_base_path_mapping
from terrascript.resource.hashicorp.aws import aws_api_gateway_client_certificate
from terrascript.resource.hashicorp.aws import aws_api_gateway_deployment
from terrascript.resource.hashicorp.aws import aws_api_gateway_documentation_part
from terrascript.resource.hashicorp.aws import aws_api_gateway_documentation_version
from terrascript.resource.hashicorp.aws import aws_api_gateway_domain_name
from terrascript.resource.hashicorp.aws import aws_api_gateway_gateway_response
from terrascript.resource.hashicorp.aws import aws_api_gateway_integration
from terrascript.resource.hashicorp.aws import aws_api_gateway_integration_response
from terrascript.resource.hashicorp.aws import aws_api_gateway_method
from terrascript.resource.hashicorp.aws import aws_api_gateway_method_response
from terrascript.resource.hashicorp.aws import aws_api_gateway_method_settings
from terrascript.resource.hashicorp.aws import aws_api_gateway_model
from terrascript.resource.hashicorp.aws import aws_api_gateway_request_validator
from terrascript.resource.hashicorp.aws import aws_api_gateway_resource
from terrascript.resource.hashicorp.aws import aws_api_gateway_rest_api
from terrascript.resource.hashicorp.aws import aws_api_gateway_rest_api_policy
from terrascript.resource.hashicorp.aws import aws_api_gateway_stage
from terrascript.resource.hashicorp.aws import aws_api_gateway_usage_plan
from terrascript.resource.hashicorp.aws import aws_api_gateway_usage_plan_key
from terrascript.resource.hashicorp.aws import aws_api_gateway_vpc_link
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_api
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_api_mapping
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_authorizer
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_deployment
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_domain_name
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_integration
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_integration_response
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_model
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_route
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_route_response
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_stage
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_vpc_link
from terrascript.resource.hashicorp.aws import aws_app_cookie_stickiness_policy
from terrascript.resource.hashicorp.aws import aws_appautoscaling_policy
from terrascript.resource.hashicorp.aws import aws_appautoscaling_scheduled_action
from terrascript.resource.hashicorp.aws import aws_appautoscaling_target
from terrascript.resource.hashicorp.aws import aws_appconfig_application
from terrascript.resource.hashicorp.aws import aws_appconfig_configuration_profile
from terrascript.resource.hashicorp.aws import aws_appconfig_deployment
from terrascript.resource.hashicorp.aws import aws_appconfig_deployment_strategy
from terrascript.resource.hashicorp.aws import aws_appconfig_environment
from terrascript.resource.hashicorp.aws import (
aws_appconfig_hosted_configuration_version,
)
from terrascript.resource.hashicorp.aws import aws_appmesh_gateway_route
from terrascript.resource.hashicorp.aws import aws_appmesh_mesh
from terrascript.resource.hashicorp.aws import aws_appmesh_route
from terrascript.resource.hashicorp.aws import aws_appmesh_virtual_gateway
from terrascript.resource.hashicorp.aws import aws_appmesh_virtual_node
from terrascript.resource.hashicorp.aws import aws_appmesh_virtual_router
from terrascript.resource.hashicorp.aws import aws_appmesh_virtual_service
from terrascript.resource.hashicorp.aws import (
aws_apprunner_auto_scaling_configuration_version,
)
from terrascript.resource.hashicorp.aws import aws_apprunner_connection
from terrascript.resource.hashicorp.aws import (
aws_apprunner_custom_domain_association,
)
from terrascript.resource.hashicorp.aws import aws_apprunner_service
from terrascript.resource.hashicorp.aws import aws_appstream_fleet
from terrascript.resource.hashicorp.aws import aws_appstream_stack
from terrascript.resource.hashicorp.aws import aws_appsync_api_key
from terrascript.resource.hashicorp.aws import aws_appsync_datasource
from terrascript.resource.hashicorp.aws import aws_appsync_function
from terrascript.resource.hashicorp.aws import aws_appsync_graphql_api
from terrascript.resource.hashicorp.aws import aws_appsync_resolver
from terrascript.resource.hashicorp.aws import aws_athena_database
from terrascript.resource.hashicorp.aws import aws_athena_named_query
from terrascript.resource.hashicorp.aws import aws_athena_workgroup
from terrascript.resource.hashicorp.aws import aws_autoscaling_attachment
from terrascript.resource.hashicorp.aws import aws_autoscaling_group
from terrascript.resource.hashicorp.aws import aws_autoscaling_group_tag
from terrascript.resource.hashicorp.aws import aws_autoscaling_lifecycle_hook
from terrascript.resource.hashicorp.aws import aws_autoscaling_notification
from terrascript.resource.hashicorp.aws import aws_autoscaling_policy
from terrascript.resource.hashicorp.aws import aws_autoscaling_schedule
from terrascript.resource.hashicorp.aws import aws_autoscalingplans_scaling_plan
from terrascript.resource.hashicorp.aws import aws_backup_global_settings
from terrascript.resource.hashicorp.aws import aws_backup_plan
from terrascript.resource.hashicorp.aws import aws_backup_region_settings
from terrascript.resource.hashicorp.aws import aws_backup_selection
from terrascript.resource.hashicorp.aws import aws_backup_vault
from terrascript.resource.hashicorp.aws import aws_backup_vault_notifications
from terrascript.resource.hashicorp.aws import aws_backup_vault_policy
from terrascript.resource.hashicorp.aws import aws_batch_compute_environment
from terrascript.resource.hashicorp.aws import aws_batch_job_definition
from terrascript.resource.hashicorp.aws import aws_batch_job_queue
from terrascript.resource.hashicorp.aws import aws_budgets_budget
from terrascript.resource.hashicorp.aws import aws_budgets_budget_action
from terrascript.resource.hashicorp.aws import aws_chime_voice_connector
from terrascript.resource.hashicorp.aws import aws_chime_voice_connector_group
from terrascript.resource.hashicorp.aws import aws_chime_voice_connector_logging
from terrascript.resource.hashicorp.aws import aws_chime_voice_connector_origination
from terrascript.resource.hashicorp.aws import aws_chime_voice_connector_streaming
from terrascript.resource.hashicorp.aws import aws_chime_voice_connector_termination
from terrascript.resource.hashicorp.aws import aws_cloud9_environment_ec2
from terrascript.resource.hashicorp.aws import aws_cloudformation_stack
from terrascript.resource.hashicorp.aws import aws_cloudformation_stack_set
from terrascript.resource.hashicorp.aws import aws_cloudformation_stack_set_instance
from terrascript.resource.hashicorp.aws import aws_cloudformation_type
from terrascript.resource.hashicorp.aws import aws_cloudfront_cache_policy
from terrascript.resource.hashicorp.aws import aws_cloudfront_distribution
from terrascript.resource.hashicorp.aws import aws_cloudfront_function
from terrascript.resource.hashicorp.aws import aws_cloudfront_key_group
from terrascript.resource.hashicorp.aws import (
aws_cloudfront_monitoring_subscription,
)
from terrascript.resource.hashicorp.aws import aws_cloudfront_origin_access_identity
from terrascript.resource.hashicorp.aws import aws_cloudfront_origin_request_policy
from terrascript.resource.hashicorp.aws import aws_cloudfront_public_key
from terrascript.resource.hashicorp.aws import aws_cloudfront_realtime_log_config
from terrascript.resource.hashicorp.aws import aws_cloudhsm_v2_cluster
from terrascript.resource.hashicorp.aws import aws_cloudhsm_v2_hsm
from terrascript.resource.hashicorp.aws import aws_cloudtrail
from terrascript.resource.hashicorp.aws import aws_cloudwatch_composite_alarm
from terrascript.resource.hashicorp.aws import aws_cloudwatch_dashboard
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_api_destination
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_archive
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_bus
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_bus_policy
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_connection
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_permission
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_rule
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_target
from terrascript.resource.hashicorp.aws import aws_cloudwatch_log_destination
from terrascript.resource.hashicorp.aws import aws_cloudwatch_log_destination_policy
from terrascript.resource.hashicorp.aws import aws_cloudwatch_log_group
from terrascript.resource.hashicorp.aws import aws_cloudwatch_log_metric_filter
from terrascript.resource.hashicorp.aws import aws_cloudwatch_log_resource_policy
from terrascript.resource.hashicorp.aws import aws_cloudwatch_log_stream
from terrascript.resource.hashicorp.aws import (
aws_cloudwatch_log_subscription_filter,
)
from terrascript.resource.hashicorp.aws import aws_cloudwatch_metric_alarm
from terrascript.resource.hashicorp.aws import aws_cloudwatch_metric_stream
from terrascript.resource.hashicorp.aws import aws_cloudwatch_query_definition
from terrascript.resource.hashicorp.aws import aws_codeartifact_domain
from terrascript.resource.hashicorp.aws import (
aws_codeartifact_domain_permissions_policy,
)
from terrascript.resource.hashicorp.aws import aws_codeartifact_repository
from terrascript.resource.hashicorp.aws import (
aws_codeartifact_repository_permissions_policy,
)
from terrascript.resource.hashicorp.aws import aws_codebuild_project
from terrascript.resource.hashicorp.aws import aws_codebuild_report_group
from terrascript.resource.hashicorp.aws import aws_codebuild_source_credential
from terrascript.resource.hashicorp.aws import aws_codebuild_webhook
from terrascript.resource.hashicorp.aws import aws_codecommit_repository
from terrascript.resource.hashicorp.aws import aws_codecommit_trigger
from terrascript.resource.hashicorp.aws import aws_codedeploy_app
from terrascript.resource.hashicorp.aws import aws_codedeploy_deployment_config
from terrascript.resource.hashicorp.aws import aws_codedeploy_deployment_group
from terrascript.resource.hashicorp.aws import aws_codepipeline
from terrascript.resource.hashicorp.aws import aws_codepipeline_webhook
from terrascript.resource.hashicorp.aws import aws_codestarconnections_connection
from terrascript.resource.hashicorp.aws import aws_codestarconnections_host
from terrascript.resource.hashicorp.aws import (
aws_codestarnotifications_notification_rule,
)
from terrascript.resource.hashicorp.aws import aws_cognito_identity_pool
from terrascript.resource.hashicorp.aws import (
aws_cognito_identity_pool_roles_attachment,
)
from terrascript.resource.hashicorp.aws import aws_cognito_identity_provider
from terrascript.resource.hashicorp.aws import aws_cognito_resource_server
from terrascript.resource.hashicorp.aws import aws_cognito_user_group
from terrascript.resource.hashicorp.aws import aws_cognito_user_pool
from terrascript.resource.hashicorp.aws import aws_cognito_user_pool_client
from terrascript.resource.hashicorp.aws import aws_cognito_user_pool_domain
from terrascript.resource.hashicorp.aws import (
aws_cognito_user_pool_ui_customization,
)
from terrascript.resource.hashicorp.aws import aws_config_aggregate_authorization
from terrascript.resource.hashicorp.aws import aws_config_config_rule
from terrascript.resource.hashicorp.aws import aws_config_configuration_aggregator
from terrascript.resource.hashicorp.aws import aws_config_configuration_recorder
from terrascript.resource.hashicorp.aws import (
aws_config_configuration_recorder_status,
)
from terrascript.resource.hashicorp.aws import aws_config_conformance_pack
from terrascript.resource.hashicorp.aws import aws_config_delivery_channel
from terrascript.resource.hashicorp.aws import (
aws_config_organization_conformance_pack,
)
from terrascript.resource.hashicorp.aws import aws_config_organization_custom_rule
from terrascript.resource.hashicorp.aws import aws_config_organization_managed_rule
from terrascript.resource.hashicorp.aws import aws_config_remediation_configuration
from terrascript.resource.hashicorp.aws import aws_connect_contact_flow
from terrascript.resource.hashicorp.aws import aws_connect_instance
from terrascript.resource.hashicorp.aws import aws_cur_report_definition
from terrascript.resource.hashicorp.aws import aws_customer_gateway
from terrascript.resource.hashicorp.aws import aws_datapipeline_pipeline
from terrascript.resource.hashicorp.aws import aws_datasync_agent
from terrascript.resource.hashicorp.aws import aws_datasync_location_efs
from terrascript.resource.hashicorp.aws import (
aws_datasync_location_fsx_windows_file_system,
)
from terrascript.resource.hashicorp.aws import aws_datasync_location_nfs
from terrascript.resource.hashicorp.aws import aws_datasync_location_s3
from terrascript.resource.hashicorp.aws import aws_datasync_location_smb
from terrascript.resource.hashicorp.aws import aws_datasync_task
from terrascript.resource.hashicorp.aws import aws_dax_cluster
from terrascript.resource.hashicorp.aws import aws_dax_parameter_group
from terrascript.resource.hashicorp.aws import aws_dax_subnet_group
from terrascript.resource.hashicorp.aws import aws_db_cluster_snapshot
from terrascript.resource.hashicorp.aws import aws_db_event_subscription
from terrascript.resource.hashicorp.aws import aws_db_instance
from terrascript.resource.hashicorp.aws import aws_db_instance_role_association
from terrascript.resource.hashicorp.aws import aws_db_option_group
from terrascript.resource.hashicorp.aws import aws_db_parameter_group
from terrascript.resource.hashicorp.aws import aws_db_proxy
from terrascript.resource.hashicorp.aws import aws_db_proxy_default_target_group
from terrascript.resource.hashicorp.aws import aws_db_proxy_endpoint
from terrascript.resource.hashicorp.aws import aws_db_proxy_target
from terrascript.resource.hashicorp.aws import aws_db_security_group
from terrascript.resource.hashicorp.aws import aws_db_snapshot
from terrascript.resource.hashicorp.aws import aws_db_subnet_group
from terrascript.resource.hashicorp.aws import aws_default_network_acl
from terrascript.resource.hashicorp.aws import aws_default_route_table
from terrascript.resource.hashicorp.aws import aws_default_security_group
from terrascript.resource.hashicorp.aws import aws_default_subnet
from terrascript.resource.hashicorp.aws import aws_default_vpc
from terrascript.resource.hashicorp.aws import aws_default_vpc_dhcp_options
from terrascript.resource.hashicorp.aws import aws_devicefarm_project
from terrascript.resource.hashicorp.aws import (
aws_directory_service_conditional_forwarder,
)
from terrascript.resource.hashicorp.aws import aws_directory_service_directory
from terrascript.resource.hashicorp.aws import (
aws_directory_service_log_subscription,
)
from terrascript.resource.hashicorp.aws import aws_dlm_lifecycle_policy
from terrascript.resource.hashicorp.aws import aws_dms_certificate
from terrascript.resource.hashicorp.aws import aws_dms_endpoint
from terrascript.resource.hashicorp.aws import aws_dms_event_subscription
from terrascript.resource.hashicorp.aws import aws_dms_replication_instance
from terrascript.resource.hashicorp.aws import aws_dms_replication_subnet_group
from terrascript.resource.hashicorp.aws import aws_dms_replication_task
from terrascript.resource.hashicorp.aws import aws_docdb_cluster
from terrascript.resource.hashicorp.aws import aws_docdb_cluster_instance
from terrascript.resource.hashicorp.aws import aws_docdb_cluster_parameter_group
from terrascript.resource.hashicorp.aws import aws_docdb_cluster_snapshot
from terrascript.resource.hashicorp.aws import aws_docdb_subnet_group
from terrascript.resource.hashicorp.aws import aws_dx_bgp_peer
from terrascript.resource.hashicorp.aws import aws_dx_connection
from terrascript.resource.hashicorp.aws import aws_dx_connection_association
from terrascript.resource.hashicorp.aws import aws_dx_gateway
from terrascript.resource.hashicorp.aws import aws_dx_gateway_association
from terrascript.resource.hashicorp.aws import aws_dx_gateway_association_proposal
from terrascript.resource.hashicorp.aws import (
aws_dx_hosted_private_virtual_interface,
)
from terrascript.resource.hashicorp.aws import (
aws_dx_hosted_private_virtual_interface_accepter,
)
from terrascript.resource.hashicorp.aws import (
aws_dx_hosted_public_virtual_interface,
)
from terrascript.resource.hashicorp.aws import (
aws_dx_hosted_public_virtual_interface_accepter,
)
from terrascript.resource.hashicorp.aws import (
aws_dx_hosted_transit_virtual_interface,
)
from terrascript.resource.hashicorp.aws import (
aws_dx_hosted_transit_virtual_interface_accepter,
)
from terrascript.resource.hashicorp.aws import aws_dx_lag
from terrascript.resource.hashicorp.aws import aws_dx_private_virtual_interface
from terrascript.resource.hashicorp.aws import aws_dx_public_virtual_interface
from terrascript.resource.hashicorp.aws import aws_dx_transit_virtual_interface
from terrascript.resource.hashicorp.aws import aws_dynamodb_global_table
from terrascript.resource.hashicorp.aws import (
aws_dynamodb_kinesis_streaming_destination,
)
from terrascript.resource.hashicorp.aws import aws_dynamodb_table
from terrascript.resource.hashicorp.aws import aws_dynamodb_table_item
from terrascript.resource.hashicorp.aws import aws_dynamodb_tag
from terrascript.resource.hashicorp.aws import aws_ebs_default_kms_key
from terrascript.resource.hashicorp.aws import aws_ebs_encryption_by_default
from terrascript.resource.hashicorp.aws import aws_ebs_snapshot
from terrascript.resource.hashicorp.aws import aws_ebs_snapshot_copy
from terrascript.resource.hashicorp.aws import aws_ebs_snapshot_import
from terrascript.resource.hashicorp.aws import aws_ebs_volume
from terrascript.resource.hashicorp.aws import aws_ec2_availability_zone_group
from terrascript.resource.hashicorp.aws import aws_ec2_capacity_reservation
from terrascript.resource.hashicorp.aws import aws_ec2_carrier_gateway
from terrascript.resource.hashicorp.aws import aws_ec2_client_vpn_authorization_rule
from terrascript.resource.hashicorp.aws import aws_ec2_client_vpn_endpoint
from terrascript.resource.hashicorp.aws import (
aws_ec2_client_vpn_network_association,
)
from terrascript.resource.hashicorp.aws import aws_ec2_client_vpn_route
from terrascript.resource.hashicorp.aws import aws_ec2_fleet
from terrascript.resource.hashicorp.aws import aws_ec2_local_gateway_route
from terrascript.resource.hashicorp.aws import (
aws_ec2_local_gateway_route_table_vpc_association,
)
from terrascript.resource.hashicorp.aws import aws_ec2_managed_prefix_list
from terrascript.resource.hashicorp.aws import aws_ec2_managed_prefix_list_entry
from terrascript.resource.hashicorp.aws import aws_ec2_tag
from terrascript.resource.hashicorp.aws import aws_ec2_traffic_mirror_filter
from terrascript.resource.hashicorp.aws import aws_ec2_traffic_mirror_filter_rule
from terrascript.resource.hashicorp.aws import aws_ec2_traffic_mirror_session
from terrascript.resource.hashicorp.aws import aws_ec2_traffic_mirror_target
from terrascript.resource.hashicorp.aws import aws_ec2_transit_gateway
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_peering_attachment,
)
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_peering_attachment_accepter,
)
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_prefix_list_reference,
)
from terrascript.resource.hashicorp.aws import aws_ec2_transit_gateway_route
from terrascript.resource.hashicorp.aws import aws_ec2_transit_gateway_route_table
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_route_table_association,
)
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_route_table_propagation,
)
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_vpc_attachment,
)
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_vpc_attachment_accepter,
)
from terrascript.resource.hashicorp.aws import aws_ecr_lifecycle_policy
from terrascript.resource.hashicorp.aws import aws_ecr_registry_policy
from terrascript.resource.hashicorp.aws import aws_ecr_replication_configuration
from terrascript.resource.hashicorp.aws import aws_ecr_repository
from terrascript.resource.hashicorp.aws import aws_ecr_repository_policy
from terrascript.resource.hashicorp.aws import aws_ecrpublic_repository
from terrascript.resource.hashicorp.aws import aws_ecs_capacity_provider
from terrascript.resource.hashicorp.aws import aws_ecs_cluster
from terrascript.resource.hashicorp.aws import aws_ecs_service
from terrascript.resource.hashicorp.aws import aws_ecs_tag
from terrascript.resource.hashicorp.aws import aws_ecs_task_definition
from terrascript.resource.hashicorp.aws import aws_efs_access_point
from terrascript.resource.hashicorp.aws import aws_efs_backup_policy
from terrascript.resource.hashicorp.aws import aws_efs_file_system
from terrascript.resource.hashicorp.aws import aws_efs_file_system_policy
from terrascript.resource.hashicorp.aws import aws_efs_mount_target
from terrascript.resource.hashicorp.aws import aws_egress_only_internet_gateway
from terrascript.resource.hashicorp.aws import aws_eip
from terrascript.resource.hashicorp.aws import aws_eip_association
from terrascript.resource.hashicorp.aws import aws_eks_addon
from terrascript.resource.hashicorp.aws import aws_eks_cluster
from terrascript.resource.hashicorp.aws import aws_eks_fargate_profile
from terrascript.resource.hashicorp.aws import aws_eks_identity_provider_config
from terrascript.resource.hashicorp.aws import aws_eks_node_group
from terrascript.resource.hashicorp.aws import aws_elastic_beanstalk_application
from terrascript.resource.hashicorp.aws import (
aws_elastic_beanstalk_application_version,
)
from terrascript.resource.hashicorp.aws import (
aws_elastic_beanstalk_configuration_template,
)
from terrascript.resource.hashicorp.aws import aws_elastic_beanstalk_environment
from terrascript.resource.hashicorp.aws import aws_elasticache_cluster
from terrascript.resource.hashicorp.aws import (
aws_elasticache_global_replication_group,
)
from terrascript.resource.hashicorp.aws import aws_elasticache_parameter_group
from terrascript.resource.hashicorp.aws import aws_elasticache_replication_group
from terrascript.resource.hashicorp.aws import aws_elasticache_security_group
from terrascript.resource.hashicorp.aws import aws_elasticache_subnet_group
from terrascript.resource.hashicorp.aws import aws_elasticache_user
from terrascript.resource.hashicorp.aws import aws_elasticache_user_group
from terrascript.resource.hashicorp.aws import aws_elasticsearch_domain
from terrascript.resource.hashicorp.aws import aws_elasticsearch_domain_policy
from terrascript.resource.hashicorp.aws import aws_elasticsearch_domain_saml_options
from terrascript.resource.hashicorp.aws import aws_elastictranscoder_pipeline
from terrascript.resource.hashicorp.aws import aws_elastictranscoder_preset
from terrascript.resource.hashicorp.aws import aws_elb
from terrascript.resource.hashicorp.aws import aws_elb_attachment
from terrascript.resource.hashicorp.aws import aws_emr_cluster
from terrascript.resource.hashicorp.aws import aws_emr_instance_fleet
from terrascript.resource.hashicorp.aws import aws_emr_instance_group
from terrascript.resource.hashicorp.aws import aws_emr_managed_scaling_policy
from terrascript.resource.hashicorp.aws import aws_emr_security_configuration
from terrascript.resource.hashicorp.aws import aws_flow_log
from terrascript.resource.hashicorp.aws import aws_fms_admin_account
from terrascript.resource.hashicorp.aws import aws_fms_policy
from terrascript.resource.hashicorp.aws import aws_fsx_backup
from terrascript.resource.hashicorp.aws import aws_fsx_lustre_file_system
from terrascript.resource.hashicorp.aws import aws_fsx_ontap_file_system
from terrascript.resource.hashicorp.aws import aws_fsx_windows_file_system
from terrascript.resource.hashicorp.aws import aws_gamelift_alias
from terrascript.resource.hashicorp.aws import aws_gamelift_build
from terrascript.resource.hashicorp.aws import aws_gamelift_fleet
from terrascript.resource.hashicorp.aws import aws_gamelift_game_session_queue
from terrascript.resource.hashicorp.aws import aws_glacier_vault
from terrascript.resource.hashicorp.aws import aws_glacier_vault_lock
from terrascript.resource.hashicorp.aws import aws_globalaccelerator_accelerator
from terrascript.resource.hashicorp.aws import aws_globalaccelerator_endpoint_group
from terrascript.resource.hashicorp.aws import aws_globalaccelerator_listener
from terrascript.resource.hashicorp.aws import aws_glue_catalog_database
from terrascript.resource.hashicorp.aws import aws_glue_catalog_table
from terrascript.resource.hashicorp.aws import aws_glue_classifier
from terrascript.resource.hashicorp.aws import aws_glue_connection
from terrascript.resource.hashicorp.aws import aws_glue_crawler
from terrascript.resource.hashicorp.aws import (
aws_glue_data_catalog_encryption_settings,
)
from terrascript.resource.hashicorp.aws import aws_glue_dev_endpoint
from terrascript.resource.hashicorp.aws import aws_glue_job
from terrascript.resource.hashicorp.aws import aws_glue_ml_transform
from terrascript.resource.hashicorp.aws import aws_glue_partition
from terrascript.resource.hashicorp.aws import aws_glue_registry
from terrascript.resource.hashicorp.aws import aws_glue_resource_policy
from terrascript.resource.hashicorp.aws import aws_glue_schema
from terrascript.resource.hashicorp.aws import aws_glue_security_configuration
from terrascript.resource.hashicorp.aws import aws_glue_trigger
from terrascript.resource.hashicorp.aws import aws_glue_user_defined_function
from terrascript.resource.hashicorp.aws import aws_glue_workflow
from terrascript.resource.hashicorp.aws import aws_guardduty_detector
from terrascript.resource.hashicorp.aws import aws_guardduty_filter
from terrascript.resource.hashicorp.aws import aws_guardduty_invite_accepter
from terrascript.resource.hashicorp.aws import aws_guardduty_ipset
from terrascript.resource.hashicorp.aws import aws_guardduty_member
from terrascript.resource.hashicorp.aws import (
aws_guardduty_organization_admin_account,
)
from terrascript.resource.hashicorp.aws import (
aws_guardduty_organization_configuration,
)
from terrascript.resource.hashicorp.aws import aws_guardduty_publishing_destination
from terrascript.resource.hashicorp.aws import aws_guardduty_threatintelset
from terrascript.resource.hashicorp.aws import aws_iam_access_key
from terrascript.resource.hashicorp.aws import aws_iam_account_alias
from terrascript.resource.hashicorp.aws import aws_iam_account_password_policy
from terrascript.resource.hashicorp.aws import aws_iam_group
from terrascript.resource.hashicorp.aws import aws_iam_group_membership
from terrascript.resource.hashicorp.aws import aws_iam_group_policy
from terrascript.resource.hashicorp.aws import aws_iam_group_policy_attachment
from terrascript.resource.hashicorp.aws import aws_iam_instance_profile
from terrascript.resource.hashicorp.aws import aws_iam_openid_connect_provider
from terrascript.resource.hashicorp.aws import aws_iam_policy
from terrascript.resource.hashicorp.aws import aws_iam_policy_attachment
from terrascript.resource.hashicorp.aws import aws_iam_role
from terrascript.resource.hashicorp.aws import aws_iam_role_policy
from terrascript.resource.hashicorp.aws import aws_iam_role_policy_attachment
from terrascript.resource.hashicorp.aws import aws_iam_saml_provider
from terrascript.resource.hashicorp.aws import aws_iam_server_certificate
from terrascript.resource.hashicorp.aws import aws_iam_service_linked_role
from terrascript.resource.hashicorp.aws import aws_iam_user
from terrascript.resource.hashicorp.aws import aws_iam_user_group_membership
from terrascript.resource.hashicorp.aws import aws_iam_user_login_profile
from terrascript.resource.hashicorp.aws import aws_iam_user_policy
from terrascript.resource.hashicorp.aws import aws_iam_user_policy_attachment
from terrascript.resource.hashicorp.aws import aws_iam_user_ssh_key
from terrascript.resource.hashicorp.aws import aws_imagebuilder_component
from terrascript.resource.hashicorp.aws import (
aws_imagebuilder_distribution_configuration,
)
from terrascript.resource.hashicorp.aws import aws_imagebuilder_image
from terrascript.resource.hashicorp.aws import aws_imagebuilder_image_pipeline
from terrascript.resource.hashicorp.aws import aws_imagebuilder_image_recipe
from terrascript.resource.hashicorp.aws import (
aws_imagebuilder_infrastructure_configuration,
)
from terrascript.resource.hashicorp.aws import aws_inspector_assessment_target
from terrascript.resource.hashicorp.aws import aws_inspector_assessment_template
from terrascript.resource.hashicorp.aws import aws_inspector_resource_group
from terrascript.resource.hashicorp.aws import aws_instance
from terrascript.resource.hashicorp.aws import aws_internet_gateway
from terrascript.resource.hashicorp.aws import aws_iot_certificate
from terrascript.resource.hashicorp.aws import aws_iot_policy
from terrascript.resource.hashicorp.aws import aws_iot_policy_attachment
from terrascript.resource.hashicorp.aws import aws_iot_role_alias
from terrascript.resource.hashicorp.aws import aws_iot_thing
from terrascript.resource.hashicorp.aws import aws_iot_thing_principal_attachment
from terrascript.resource.hashicorp.aws import aws_iot_thing_type
from terrascript.resource.hashicorp.aws import aws_iot_topic_rule
from terrascript.resource.hashicorp.aws import aws_key_pair
from terrascript.resource.hashicorp.aws import aws_kinesis_analytics_application
from terrascript.resource.hashicorp.aws import aws_kinesis_firehose_delivery_stream
from terrascript.resource.hashicorp.aws import aws_kinesis_stream
from terrascript.resource.hashicorp.aws import aws_kinesis_stream_consumer
from terrascript.resource.hashicorp.aws import aws_kinesis_video_stream
from terrascript.resource.hashicorp.aws import aws_kinesisanalyticsv2_application
from terrascript.resource.hashicorp.aws import (
aws_kinesisanalyticsv2_application_snapshot,
)
from terrascript.resource.hashicorp.aws import aws_kms_alias
from terrascript.resource.hashicorp.aws import aws_kms_ciphertext
from terrascript.resource.hashicorp.aws import aws_kms_external_key
from terrascript.resource.hashicorp.aws import aws_kms_grant
from terrascript.resource.hashicorp.aws import aws_kms_key
from terrascript.resource.hashicorp.aws import aws_lakeformation_data_lake_settings
from terrascript.resource.hashicorp.aws import aws_lakeformation_permissions
from terrascript.resource.hashicorp.aws import aws_lakeformation_resource
from terrascript.resource.hashicorp.aws import aws_lambda_alias
from terrascript.resource.hashicorp.aws import aws_lambda_code_signing_config
from terrascript.resource.hashicorp.aws import aws_lambda_event_source_mapping
from terrascript.resource.hashicorp.aws import aws_lambda_function
from terrascript.resource.hashicorp.aws import (
aws_lambda_function_event_invoke_config,
)
from terrascript.resource.hashicorp.aws import aws_lambda_layer_version
from terrascript.resource.hashicorp.aws import aws_lambda_permission
from terrascript.resource.hashicorp.aws import (
aws_lambda_provisioned_concurrency_config,
)
from terrascript.resource.hashicorp.aws import aws_launch_configuration
from terrascript.resource.hashicorp.aws import aws_launch_template
from terrascript.resource.hashicorp.aws import aws_lb
from terrascript.resource.hashicorp.aws import aws_lb_cookie_stickiness_policy
from terrascript.resource.hashicorp.aws import aws_lb_listener
from terrascript.resource.hashicorp.aws import aws_lb_listener_certificate
from terrascript.resource.hashicorp.aws import aws_lb_listener_rule
from terrascript.resource.hashicorp.aws import aws_lb_ssl_negotiation_policy
from terrascript.resource.hashicorp.aws import aws_lb_target_group
from terrascript.resource.hashicorp.aws import aws_lb_target_group_attachment
from terrascript.resource.hashicorp.aws import aws_lex_bot
from terrascript.resource.hashicorp.aws import aws_lex_bot_alias
from terrascript.resource.hashicorp.aws import aws_lex_intent
from terrascript.resource.hashicorp.aws import aws_lex_slot_type
from terrascript.resource.hashicorp.aws import aws_licensemanager_association
from terrascript.resource.hashicorp.aws import (
aws_licensemanager_license_configuration,
)
from terrascript.resource.hashicorp.aws import aws_lightsail_domain
from terrascript.resource.hashicorp.aws import aws_lightsail_instance
from terrascript.resource.hashicorp.aws import aws_lightsail_instance_public_ports
from terrascript.resource.hashicorp.aws import aws_lightsail_key_pair
from terrascript.resource.hashicorp.aws import aws_lightsail_static_ip
from terrascript.resource.hashicorp.aws import aws_lightsail_static_ip_attachment
from terrascript.resource.hashicorp.aws import (
aws_load_balancer_backend_server_policy,
)
from terrascript.resource.hashicorp.aws import aws_load_balancer_listener_policy
from terrascript.resource.hashicorp.aws import aws_load_balancer_policy
from terrascript.resource.hashicorp.aws import aws_macie2_account
from terrascript.resource.hashicorp.aws import aws_macie2_classification_job
from terrascript.resource.hashicorp.aws import aws_macie2_custom_data_identifier
from terrascript.resource.hashicorp.aws import aws_macie2_findings_filter
from terrascript.resource.hashicorp.aws import aws_macie2_invitation_accepter
from terrascript.resource.hashicorp.aws import aws_macie2_member
from terrascript.resource.hashicorp.aws import aws_macie2_organization_admin_account
from terrascript.resource.hashicorp.aws import aws_macie_member_account_association
from terrascript.resource.hashicorp.aws import aws_macie_s3_bucket_association
from terrascript.resource.hashicorp.aws import aws_main_route_table_association
from terrascript.resource.hashicorp.aws import aws_media_convert_queue
from terrascript.resource.hashicorp.aws import aws_media_package_channel
from terrascript.resource.hashicorp.aws import aws_media_store_container
from terrascript.resource.hashicorp.aws import aws_media_store_container_policy
from terrascript.resource.hashicorp.aws import aws_mq_broker
from terrascript.resource.hashicorp.aws import aws_mq_configuration
from terrascript.resource.hashicorp.aws import aws_msk_cluster
from terrascript.resource.hashicorp.aws import aws_msk_configuration
from terrascript.resource.hashicorp.aws import aws_msk_scram_secret_association
from terrascript.resource.hashicorp.aws import aws_mwaa_environment
from terrascript.resource.hashicorp.aws import aws_nat_gateway
from terrascript.resource.hashicorp.aws import aws_neptune_cluster
from terrascript.resource.hashicorp.aws import aws_neptune_cluster_endpoint
from terrascript.resource.hashicorp.aws import aws_neptune_cluster_instance
from terrascript.resource.hashicorp.aws import aws_neptune_cluster_parameter_group
from terrascript.resource.hashicorp.aws import aws_neptune_cluster_snapshot
from terrascript.resource.hashicorp.aws import aws_neptune_event_subscription
from terrascript.resource.hashicorp.aws import aws_neptune_parameter_group
from terrascript.resource.hashicorp.aws import aws_neptune_subnet_group
from terrascript.resource.hashicorp.aws import aws_network_acl
from terrascript.resource.hashicorp.aws import aws_network_acl_rule
from terrascript.resource.hashicorp.aws import aws_network_interface
from terrascript.resource.hashicorp.aws import aws_network_interface_attachment
from terrascript.resource.hashicorp.aws import aws_network_interface_sg_attachment
from terrascript.resource.hashicorp.aws import aws_networkfirewall_firewall
from terrascript.resource.hashicorp.aws import aws_networkfirewall_firewall_policy
from terrascript.resource.hashicorp.aws import (
aws_networkfirewall_logging_configuration,
)
from terrascript.resource.hashicorp.aws import aws_networkfirewall_resource_policy
from terrascript.resource.hashicorp.aws import aws_networkfirewall_rule_group
from terrascript.resource.hashicorp.aws import aws_opsworks_application
from terrascript.resource.hashicorp.aws import aws_opsworks_custom_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_ganglia_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_haproxy_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_instance
from terrascript.resource.hashicorp.aws import aws_opsworks_java_app_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_memcached_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_mysql_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_nodejs_app_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_permission
from terrascript.resource.hashicorp.aws import aws_opsworks_php_app_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_rails_app_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_rds_db_instance
from terrascript.resource.hashicorp.aws import aws_opsworks_stack
from terrascript.resource.hashicorp.aws import aws_opsworks_static_web_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_user_profile
from terrascript.resource.hashicorp.aws import aws_organizations_account
from terrascript.resource.hashicorp.aws import (
aws_organizations_delegated_administrator,
)
from terrascript.resource.hashicorp.aws import aws_organizations_organization
from terrascript.resource.hashicorp.aws import aws_organizations_organizational_unit
from terrascript.resource.hashicorp.aws import aws_organizations_policy
from terrascript.resource.hashicorp.aws import aws_organizations_policy_attachment
from terrascript.resource.hashicorp.aws import aws_pinpoint_adm_channel
from terrascript.resource.hashicorp.aws import aws_pinpoint_apns_channel
from terrascript.resource.hashicorp.aws import aws_pinpoint_apns_sandbox_channel
from terrascript.resource.hashicorp.aws import aws_pinpoint_apns_voip_channel
from terrascript.resource.hashicorp.aws import (
aws_pinpoint_apns_voip_sandbox_channel,
)
from terrascript.resource.hashicorp.aws import aws_pinpoint_app
from terrascript.resource.hashicorp.aws import aws_pinpoint_baidu_channel
from terrascript.resource.hashicorp.aws import aws_pinpoint_email_channel
from terrascript.resource.hashicorp.aws import aws_pinpoint_event_stream
from terrascript.resource.hashicorp.aws import aws_pinpoint_gcm_channel
from terrascript.resource.hashicorp.aws import aws_pinpoint_sms_channel
from terrascript.resource.hashicorp.aws import aws_placement_group
from terrascript.resource.hashicorp.aws import aws_prometheus_workspace
from terrascript.resource.hashicorp.aws import aws_proxy_protocol_policy
from terrascript.resource.hashicorp.aws import aws_qldb_ledger
from terrascript.resource.hashicorp.aws import aws_quicksight_group
from terrascript.resource.hashicorp.aws import aws_quicksight_group_membership
from terrascript.resource.hashicorp.aws import aws_quicksight_user
from terrascript.resource.hashicorp.aws import aws_ram_principal_association
from terrascript.resource.hashicorp.aws import aws_ram_resource_association
from terrascript.resource.hashicorp.aws import aws_ram_resource_share
from terrascript.resource.hashicorp.aws import aws_ram_resource_share_accepter
from terrascript.resource.hashicorp.aws import aws_rds_cluster
from terrascript.resource.hashicorp.aws import aws_rds_cluster_endpoint
from terrascript.resource.hashicorp.aws import aws_rds_cluster_instance
from terrascript.resource.hashicorp.aws import aws_rds_cluster_parameter_group
from terrascript.resource.hashicorp.aws import aws_rds_cluster_role_association
from terrascript.resource.hashicorp.aws import aws_rds_global_cluster
from terrascript.resource.hashicorp.aws import aws_redshift_cluster
from terrascript.resource.hashicorp.aws import aws_redshift_event_subscription
from terrascript.resource.hashicorp.aws import aws_redshift_parameter_group
from terrascript.resource.hashicorp.aws import aws_redshift_security_group
from terrascript.resource.hashicorp.aws import aws_redshift_snapshot_copy_grant
from terrascript.resource.hashicorp.aws import aws_redshift_snapshot_schedule
from terrascript.resource.hashicorp.aws import (
aws_redshift_snapshot_schedule_association,
)
from terrascript.resource.hashicorp.aws import aws_redshift_subnet_group
from terrascript.resource.hashicorp.aws import aws_resourcegroups_group
from terrascript.resource.hashicorp.aws import aws_route
from terrascript.resource.hashicorp.aws import aws_route53_delegation_set
from terrascript.resource.hashicorp.aws import aws_route53_health_check
from terrascript.resource.hashicorp.aws import aws_route53_hosted_zone_dnssec
from terrascript.resource.hashicorp.aws import aws_route53_key_signing_key
from terrascript.resource.hashicorp.aws import aws_route53_query_log
from terrascript.resource.hashicorp.aws import aws_route53_record
from terrascript.resource.hashicorp.aws import aws_route53_resolver_dnssec_config
from terrascript.resource.hashicorp.aws import aws_route53_resolver_endpoint
from terrascript.resource.hashicorp.aws import aws_route53_resolver_firewall_config
from terrascript.resource.hashicorp.aws import (
aws_route53_resolver_firewall_domain_list,
)
from terrascript.resource.hashicorp.aws import aws_route53_resolver_firewall_rule
from terrascript.resource.hashicorp.aws import (
aws_route53_resolver_firewall_rule_group,
)
from terrascript.resource.hashicorp.aws import (
aws_route53_resolver_firewall_rule_group_association,
)
from terrascript.resource.hashicorp.aws import aws_route53_resolver_query_log_config
from terrascript.resource.hashicorp.aws import (
aws_route53_resolver_query_log_config_association,
)
from terrascript.resource.hashicorp.aws import aws_route53_resolver_rule
from terrascript.resource.hashicorp.aws import aws_route53_resolver_rule_association
from terrascript.resource.hashicorp.aws import (
aws_route53_vpc_association_authorization,
)
from terrascript.resource.hashicorp.aws import aws_route53_zone
from terrascript.resource.hashicorp.aws import aws_route53_zone_association
from terrascript.resource.hashicorp.aws import (
aws_route53recoverycontrolconfig_cluster,
)
from terrascript.resource.hashicorp.aws import (
aws_route53recoverycontrolconfig_control_panel,
)
from terrascript.resource.hashicorp.aws import (
aws_route53recoverycontrolconfig_routing_control,
)
from terrascript.resource.hashicorp.aws import (
aws_route53recoverycontrolconfig_safety_rule,
)
from terrascript.resource.hashicorp.aws import aws_route53recoveryreadiness_cell
from terrascript.resource.hashicorp.aws import (
aws_route53recoveryreadiness_readiness_check,
)
from terrascript.resource.hashicorp.aws import (
aws_route53recoveryreadiness_recovery_group,
)
from terrascript.resource.hashicorp.aws import (
aws_route53recoveryreadiness_resource_set,
)
from terrascript.resource.hashicorp.aws import aws_route_table
from terrascript.resource.hashicorp.aws import aws_route_table_association
from terrascript.resource.hashicorp.aws import aws_s3_access_point
from terrascript.resource.hashicorp.aws import aws_s3_account_public_access_block
from terrascript.resource.hashicorp.aws import aws_s3_bucket
from terrascript.resource.hashicorp.aws import aws_s3_bucket_analytics_configuration
from terrascript.resource.hashicorp.aws import aws_s3_bucket_inventory
from terrascript.resource.hashicorp.aws import aws_s3_bucket_metric
from terrascript.resource.hashicorp.aws import aws_s3_bucket_notification
from terrascript.resource.hashicorp.aws import aws_s3_bucket_object
from terrascript.resource.hashicorp.aws import aws_s3_bucket_ownership_controls
from terrascript.resource.hashicorp.aws import aws_s3_bucket_policy
from terrascript.resource.hashicorp.aws import aws_s3_bucket_public_access_block
from terrascript.resource.hashicorp.aws import aws_s3_object_copy
from terrascript.resource.hashicorp.aws import aws_s3control_bucket
from terrascript.resource.hashicorp.aws import (
aws_s3control_bucket_lifecycle_configuration,
)
from terrascript.resource.hashicorp.aws import aws_s3control_bucket_policy
from terrascript.resource.hashicorp.aws import aws_s3outposts_endpoint
from terrascript.resource.hashicorp.aws import aws_sagemaker_app
from terrascript.resource.hashicorp.aws import aws_sagemaker_app_image_config
from terrascript.resource.hashicorp.aws import aws_sagemaker_code_repository
from terrascript.resource.hashicorp.aws import aws_sagemaker_device_fleet
from terrascript.resource.hashicorp.aws import aws_sagemaker_domain
from terrascript.resource.hashicorp.aws import aws_sagemaker_endpoint
from terrascript.resource.hashicorp.aws import aws_sagemaker_endpoint_configuration
from terrascript.resource.hashicorp.aws import aws_sagemaker_feature_group
from terrascript.resource.hashicorp.aws import aws_sagemaker_flow_definition
from terrascript.resource.hashicorp.aws import aws_sagemaker_human_task_ui
from terrascript.resource.hashicorp.aws import aws_sagemaker_image
from terrascript.resource.hashicorp.aws import aws_sagemaker_image_version
from terrascript.resource.hashicorp.aws import aws_sagemaker_model
from terrascript.resource.hashicorp.aws import aws_sagemaker_model_package_group
from terrascript.resource.hashicorp.aws import aws_sagemaker_notebook_instance
from terrascript.resource.hashicorp.aws import (
aws_sagemaker_notebook_instance_lifecycle_configuration,
)
from terrascript.resource.hashicorp.aws import aws_sagemaker_user_profile
from terrascript.resource.hashicorp.aws import aws_sagemaker_workforce
from terrascript.resource.hashicorp.aws import aws_sagemaker_workteam
from terrascript.resource.hashicorp.aws import aws_schemas_discoverer
from terrascript.resource.hashicorp.aws import aws_schemas_registry
from terrascript.resource.hashicorp.aws import aws_schemas_schema
from terrascript.resource.hashicorp.aws import aws_secretsmanager_secret
from terrascript.resource.hashicorp.aws import aws_secretsmanager_secret_policy
from terrascript.resource.hashicorp.aws import aws_secretsmanager_secret_rotation
from terrascript.resource.hashicorp.aws import aws_secretsmanager_secret_version
from terrascript.resource.hashicorp.aws import aws_security_group
from terrascript.resource.hashicorp.aws import aws_security_group_rule
from terrascript.resource.hashicorp.aws import aws_securityhub_account
from terrascript.resource.hashicorp.aws import aws_securityhub_action_target
from terrascript.resource.hashicorp.aws import aws_securityhub_insight
from terrascript.resource.hashicorp.aws import aws_securityhub_invite_accepter
from terrascript.resource.hashicorp.aws import aws_securityhub_member
from terrascript.resource.hashicorp.aws import (
aws_securityhub_organization_admin_account,
)
from terrascript.resource.hashicorp.aws import (
aws_securityhub_organization_configuration,
)
from terrascript.resource.hashicorp.aws import aws_securityhub_product_subscription
from terrascript.resource.hashicorp.aws import aws_securityhub_standards_control
from terrascript.resource.hashicorp.aws import (
aws_securityhub_standards_subscription,
)
from terrascript.resource.hashicorp.aws import (
aws_serverlessapplicationrepository_cloudformation_stack,
)
from terrascript.resource.hashicorp.aws import aws_service_discovery_http_namespace
from terrascript.resource.hashicorp.aws import aws_service_discovery_instance
from terrascript.resource.hashicorp.aws import (
aws_service_discovery_private_dns_namespace,
)
from terrascript.resource.hashicorp.aws import (
aws_service_discovery_public_dns_namespace,
)
from terrascript.resource.hashicorp.aws import aws_service_discovery_service
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_budget_resource_association,
)
from terrascript.resource.hashicorp.aws import aws_servicecatalog_constraint
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_organizations_access,
)
from terrascript.resource.hashicorp.aws import aws_servicecatalog_portfolio
from terrascript.resource.hashicorp.aws import aws_servicecatalog_portfolio_share
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_principal_portfolio_association,
)
from terrascript.resource.hashicorp.aws import aws_servicecatalog_product
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_product_portfolio_association,
)
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_provisioned_product,
)
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_provisioning_artifact,
)
from terrascript.resource.hashicorp.aws import aws_servicecatalog_service_action
from terrascript.resource.hashicorp.aws import aws_servicecatalog_tag_option
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_tag_option_resource_association,
)
from terrascript.resource.hashicorp.aws import aws_servicequotas_service_quota
from terrascript.resource.hashicorp.aws import aws_ses_active_receipt_rule_set
from terrascript.resource.hashicorp.aws import aws_ses_configuration_set
from terrascript.resource.hashicorp.aws import aws_ses_domain_dkim
from terrascript.resource.hashicorp.aws import aws_ses_domain_identity
from terrascript.resource.hashicorp.aws import aws_ses_domain_identity_verification
from terrascript.resource.hashicorp.aws import aws_ses_domain_mail_from
from terrascript.resource.hashicorp.aws import aws_ses_email_identity
from terrascript.resource.hashicorp.aws import aws_ses_event_destination
from terrascript.resource.hashicorp.aws import aws_ses_identity_notification_topic
from terrascript.resource.hashicorp.aws import aws_ses_identity_policy
from terrascript.resource.hashicorp.aws import aws_ses_receipt_filter
from terrascript.resource.hashicorp.aws import aws_ses_receipt_rule
from terrascript.resource.hashicorp.aws import aws_ses_receipt_rule_set
from terrascript.resource.hashicorp.aws import aws_ses_template
from terrascript.resource.hashicorp.aws import aws_sfn_activity
from terrascript.resource.hashicorp.aws import aws_sfn_state_machine
from terrascript.resource.hashicorp.aws import aws_shield_protection
from terrascript.resource.hashicorp.aws import aws_shield_protection_group
from terrascript.resource.hashicorp.aws import aws_signer_signing_job
from terrascript.resource.hashicorp.aws import aws_signer_signing_profile
from terrascript.resource.hashicorp.aws import aws_signer_signing_profile_permission
from terrascript.resource.hashicorp.aws import aws_simpledb_domain
from terrascript.resource.hashicorp.aws import aws_snapshot_create_volume_permission
from terrascript.resource.hashicorp.aws import aws_sns_platform_application
from terrascript.resource.hashicorp.aws import aws_sns_sms_preferences
from terrascript.resource.hashicorp.aws import aws_sns_topic
from terrascript.resource.hashicorp.aws import aws_sns_topic_policy
from terrascript.resource.hashicorp.aws import aws_sns_topic_subscription
from terrascript.resource.hashicorp.aws import aws_spot_datafeed_subscription
from terrascript.resource.hashicorp.aws import aws_spot_fleet_request
from terrascript.resource.hashicorp.aws import aws_spot_instance_request
from terrascript.resource.hashicorp.aws import aws_sqs_queue
from terrascript.resource.hashicorp.aws import aws_sqs_queue_policy
from terrascript.resource.hashicorp.aws import aws_ssm_activation
from terrascript.resource.hashicorp.aws import aws_ssm_association
from terrascript.resource.hashicorp.aws import aws_ssm_document
from terrascript.resource.hashicorp.aws import aws_ssm_maintenance_window
from terrascript.resource.hashicorp.aws import aws_ssm_maintenance_window_target
from terrascript.resource.hashicorp.aws import aws_ssm_maintenance_window_task
from terrascript.resource.hashicorp.aws import aws_ssm_parameter
from terrascript.resource.hashicorp.aws import aws_ssm_patch_baseline
from terrascript.resource.hashicorp.aws import aws_ssm_patch_group
from terrascript.resource.hashicorp.aws import aws_ssm_resource_data_sync
from terrascript.resource.hashicorp.aws import aws_ssoadmin_account_assignment
from terrascript.resource.hashicorp.aws import (
aws_ssoadmin_managed_policy_attachment,
)
from terrascript.resource.hashicorp.aws import aws_ssoadmin_permission_set
from terrascript.resource.hashicorp.aws import (
aws_ssoadmin_permission_set_inline_policy,
)
from terrascript.resource.hashicorp.aws import aws_storagegateway_cache
from terrascript.resource.hashicorp.aws import (
aws_storagegateway_cached_iscsi_volume,
)
from terrascript.resource.hashicorp.aws import (
aws_storagegateway_file_system_association,
)
from terrascript.resource.hashicorp.aws import aws_storagegateway_gateway
from terrascript.resource.hashicorp.aws import aws_storagegateway_nfs_file_share
from terrascript.resource.hashicorp.aws import aws_storagegateway_smb_file_share
from terrascript.resource.hashicorp.aws import (
aws_storagegateway_stored_iscsi_volume,
)
from terrascript.resource.hashicorp.aws import aws_storagegateway_tape_pool
from terrascript.resource.hashicorp.aws import aws_storagegateway_upload_buffer
from terrascript.resource.hashicorp.aws import aws_storagegateway_working_storage
from terrascript.resource.hashicorp.aws import aws_subnet
from terrascript.resource.hashicorp.aws import aws_swf_domain
from terrascript.resource.hashicorp.aws import aws_synthetics_canary
from terrascript.resource.hashicorp.aws import aws_timestreamwrite_database
from terrascript.resource.hashicorp.aws import aws_timestreamwrite_table
from terrascript.resource.hashicorp.aws import aws_transfer_access
from terrascript.resource.hashicorp.aws import aws_transfer_server
from terrascript.resource.hashicorp.aws import aws_transfer_ssh_key
from terrascript.resource.hashicorp.aws import aws_transfer_user
from terrascript.resource.hashicorp.aws import aws_volume_attachment
from terrascript.resource.hashicorp.aws import aws_vpc
from terrascript.resource.hashicorp.aws import aws_vpc_dhcp_options
from terrascript.resource.hashicorp.aws import aws_vpc_dhcp_options_association
from terrascript.resource.hashicorp.aws import aws_vpc_endpoint
from terrascript.resource.hashicorp.aws import (
aws_vpc_endpoint_connection_notification,
)
from terrascript.resource.hashicorp.aws import (
aws_vpc_endpoint_route_table_association,
)
from terrascript.resource.hashicorp.aws import aws_vpc_endpoint_service
from terrascript.resource.hashicorp.aws import (
aws_vpc_endpoint_service_allowed_principal,
)
from terrascript.resource.hashicorp.aws import aws_vpc_endpoint_subnet_association
from terrascript.resource.hashicorp.aws import aws_vpc_ipv4_cidr_block_association
from terrascript.resource.hashicorp.aws import aws_vpc_peering_connection
from terrascript.resource.hashicorp.aws import aws_vpc_peering_connection_accepter
from terrascript.resource.hashicorp.aws import aws_vpc_peering_connection_options
from terrascript.resource.hashicorp.aws import aws_vpn_connection
from terrascript.resource.hashicorp.aws import aws_vpn_connection_route
from terrascript.resource.hashicorp.aws import aws_vpn_gateway
from terrascript.resource.hashicorp.aws import aws_vpn_gateway_attachment
from terrascript.resource.hashicorp.aws import aws_vpn_gateway_route_propagation
from terrascript.resource.hashicorp.aws import aws_waf_byte_match_set
from terrascript.resource.hashicorp.aws import aws_waf_geo_match_set
from terrascript.resource.hashicorp.aws import aws_waf_ipset
from terrascript.resource.hashicorp.aws import aws_waf_rate_based_rule
from terrascript.resource.hashicorp.aws import aws_waf_regex_match_set
from terrascript.resource.hashicorp.aws import aws_waf_regex_pattern_set
from terrascript.resource.hashicorp.aws import aws_waf_rule
from terrascript.resource.hashicorp.aws import aws_waf_rule_group
from terrascript.resource.hashicorp.aws import aws_waf_size_constraint_set
from terrascript.resource.hashicorp.aws import aws_waf_sql_injection_match_set
from terrascript.resource.hashicorp.aws import aws_waf_web_acl
from terrascript.resource.hashicorp.aws import aws_waf_xss_match_set
from terrascript.resource.hashicorp.aws import aws_wafregional_byte_match_set
from terrascript.resource.hashicorp.aws import aws_wafregional_geo_match_set
from terrascript.resource.hashicorp.aws import aws_wafregional_ipset
from terrascript.resource.hashicorp.aws import aws_wafregional_rate_based_rule
from terrascript.resource.hashicorp.aws import aws_wafregional_regex_match_set
from terrascript.resource.hashicorp.aws import aws_wafregional_regex_pattern_set
from terrascript.resource.hashicorp.aws import aws_wafregional_rule
from terrascript.resource.hashicorp.aws import aws_wafregional_rule_group
from terrascript.resource.hashicorp.aws import aws_wafregional_size_constraint_set
from terrascript.resource.hashicorp.aws import (
aws_wafregional_sql_injection_match_set,
)
from terrascript.resource.hashicorp.aws import aws_wafregional_web_acl
from terrascript.resource.hashicorp.aws import aws_wafregional_web_acl_association
from terrascript.resource.hashicorp.aws import aws_wafregional_xss_match_set
from terrascript.resource.hashicorp.aws import aws_wafv2_ip_set
from terrascript.resource.hashicorp.aws import aws_wafv2_regex_pattern_set
from terrascript.resource.hashicorp.aws import aws_wafv2_rule_group
from terrascript.resource.hashicorp.aws import aws_wafv2_web_acl
from terrascript.resource.hashicorp.aws import aws_wafv2_web_acl_association
from terrascript.resource.hashicorp.aws import (
aws_wafv2_web_acl_logging_configuration,
)
from terrascript.resource.hashicorp.aws import aws_worklink_fleet
from terrascript.resource.hashicorp.aws import (
aws_worklink_website_certificate_authority_association,
)
from terrascript.resource.hashicorp.aws import aws_workspaces_directory
from terrascript.resource.hashicorp.aws import aws_workspaces_ip_group
from terrascript.resource.hashicorp.aws import aws_workspaces_workspace
from terrascript.resource.hashicorp.aws import aws_xray_encryption_config
from terrascript.resource.hashicorp.aws import aws_xray_group
from terrascript.resource.hashicorp.aws import aws_xray_sampling_rule
def test_datasource_import():
from terrascript.data.hashicorp.aws import aws_acm_certificate
from terrascript.data.hashicorp.aws import aws_acmpca_certificate
from terrascript.data.hashicorp.aws import aws_acmpca_certificate_authority
from terrascript.data.hashicorp.aws import aws_alb
from terrascript.data.hashicorp.aws import aws_alb_listener
from terrascript.data.hashicorp.aws import aws_alb_target_group
from terrascript.data.hashicorp.aws import aws_ami
from terrascript.data.hashicorp.aws import aws_ami_ids
from terrascript.data.hashicorp.aws import aws_api_gateway_api_key
from terrascript.data.hashicorp.aws import aws_api_gateway_domain_name
from terrascript.data.hashicorp.aws import aws_api_gateway_resource
from terrascript.data.hashicorp.aws import aws_api_gateway_rest_api
from terrascript.data.hashicorp.aws import aws_api_gateway_vpc_link
from terrascript.data.hashicorp.aws import aws_apigatewayv2_api
from terrascript.data.hashicorp.aws import aws_apigatewayv2_apis
from terrascript.data.hashicorp.aws import aws_appmesh_mesh
from terrascript.data.hashicorp.aws import aws_appmesh_virtual_service
from terrascript.data.hashicorp.aws import aws_arn
from terrascript.data.hashicorp.aws import aws_autoscaling_group
from terrascript.data.hashicorp.aws import aws_autoscaling_groups
from terrascript.data.hashicorp.aws import aws_availability_zone
from terrascript.data.hashicorp.aws import aws_availability_zones
from terrascript.data.hashicorp.aws import aws_backup_plan
from terrascript.data.hashicorp.aws import aws_backup_selection
from terrascript.data.hashicorp.aws import aws_backup_vault
from terrascript.data.hashicorp.aws import aws_batch_compute_environment
from terrascript.data.hashicorp.aws import aws_batch_job_queue
from terrascript.data.hashicorp.aws import aws_billing_service_account
from terrascript.data.hashicorp.aws import aws_caller_identity
from terrascript.data.hashicorp.aws import aws_canonical_user_id
from terrascript.data.hashicorp.aws import aws_cloudformation_export
from terrascript.data.hashicorp.aws import aws_cloudformation_stack
from terrascript.data.hashicorp.aws import aws_cloudformation_type
from terrascript.data.hashicorp.aws import aws_cloudfront_cache_policy
from terrascript.data.hashicorp.aws import aws_cloudfront_distribution
from terrascript.data.hashicorp.aws import aws_cloudfront_function
from terrascript.data.hashicorp.aws import (
aws_cloudfront_log_delivery_canonical_user_id,
)
from terrascript.data.hashicorp.aws import aws_cloudfront_origin_request_policy
from terrascript.data.hashicorp.aws import aws_cloudhsm_v2_cluster
from terrascript.data.hashicorp.aws import aws_cloudtrail_service_account
from terrascript.data.hashicorp.aws import aws_cloudwatch_event_connection
from terrascript.data.hashicorp.aws import aws_cloudwatch_event_source
from terrascript.data.hashicorp.aws import aws_cloudwatch_log_group
from terrascript.data.hashicorp.aws import aws_cloudwatch_log_groups
from terrascript.data.hashicorp.aws import aws_codeartifact_authorization_token
from terrascript.data.hashicorp.aws import aws_codeartifact_repository_endpoint
from terrascript.data.hashicorp.aws import aws_codecommit_repository
from terrascript.data.hashicorp.aws import aws_codestarconnections_connection
from terrascript.data.hashicorp.aws import aws_cognito_user_pools
from terrascript.data.hashicorp.aws import aws_connect_contact_flow
from terrascript.data.hashicorp.aws import aws_connect_instance
from terrascript.data.hashicorp.aws import aws_cur_report_definition
from terrascript.data.hashicorp.aws import aws_customer_gateway
from terrascript.data.hashicorp.aws import aws_db_cluster_snapshot
from terrascript.data.hashicorp.aws import aws_db_event_categories
from terrascript.data.hashicorp.aws import aws_db_instance
from terrascript.data.hashicorp.aws import aws_db_snapshot
from terrascript.data.hashicorp.aws import aws_db_subnet_group
from terrascript.data.hashicorp.aws import aws_default_tags
from terrascript.data.hashicorp.aws import aws_directory_service_directory
from terrascript.data.hashicorp.aws import aws_docdb_engine_version
from terrascript.data.hashicorp.aws import aws_docdb_orderable_db_instance
from terrascript.data.hashicorp.aws import aws_dx_connection
from terrascript.data.hashicorp.aws import aws_dx_gateway
from terrascript.data.hashicorp.aws import aws_dx_location
from terrascript.data.hashicorp.aws import aws_dx_locations
from terrascript.data.hashicorp.aws import aws_dynamodb_table
from terrascript.data.hashicorp.aws import aws_ebs_default_kms_key
from terrascript.data.hashicorp.aws import aws_ebs_encryption_by_default
from terrascript.data.hashicorp.aws import aws_ebs_snapshot
from terrascript.data.hashicorp.aws import aws_ebs_snapshot_ids
from terrascript.data.hashicorp.aws import aws_ebs_volume
from terrascript.data.hashicorp.aws import aws_ebs_volumes
from terrascript.data.hashicorp.aws import aws_ec2_coip_pool
from terrascript.data.hashicorp.aws import aws_ec2_coip_pools
from terrascript.data.hashicorp.aws import aws_ec2_instance_type
from terrascript.data.hashicorp.aws import aws_ec2_instance_type_offering
from terrascript.data.hashicorp.aws import aws_ec2_instance_type_offerings
from terrascript.data.hashicorp.aws import aws_ec2_local_gateway
from terrascript.data.hashicorp.aws import aws_ec2_local_gateway_route_table
from terrascript.data.hashicorp.aws import aws_ec2_local_gateway_route_tables
from terrascript.data.hashicorp.aws import aws_ec2_local_gateway_virtual_interface
from terrascript.data.hashicorp.aws import (
aws_ec2_local_gateway_virtual_interface_group,
)
from terrascript.data.hashicorp.aws import (
aws_ec2_local_gateway_virtual_interface_groups,
)
from terrascript.data.hashicorp.aws import aws_ec2_local_gateways
from terrascript.data.hashicorp.aws import aws_ec2_managed_prefix_list
from terrascript.data.hashicorp.aws import aws_ec2_spot_price
from terrascript.data.hashicorp.aws import aws_ec2_transit_gateway
from terrascript.data.hashicorp.aws import (
aws_ec2_transit_gateway_dx_gateway_attachment,
)
from terrascript.data.hashicorp.aws import (
aws_ec2_transit_gateway_peering_attachment,
)
from terrascript.data.hashicorp.aws import aws_ec2_transit_gateway_route_table
from terrascript.data.hashicorp.aws import aws_ec2_transit_gateway_route_tables
from terrascript.data.hashicorp.aws import aws_ec2_transit_gateway_vpc_attachment
from terrascript.data.hashicorp.aws import aws_ec2_transit_gateway_vpn_attachment
from terrascript.data.hashicorp.aws import aws_ecr_authorization_token
from terrascript.data.hashicorp.aws import aws_ecr_image
from terrascript.data.hashicorp.aws import aws_ecr_repository
from terrascript.data.hashicorp.aws import aws_ecs_cluster
from terrascript.data.hashicorp.aws import aws_ecs_container_definition
from terrascript.data.hashicorp.aws import aws_ecs_service
from terrascript.data.hashicorp.aws import aws_ecs_task_definition
from terrascript.data.hashicorp.aws import aws_efs_access_point
from terrascript.data.hashicorp.aws import aws_efs_access_points
from terrascript.data.hashicorp.aws import aws_efs_file_system
from terrascript.data.hashicorp.aws import aws_efs_mount_target
from terrascript.data.hashicorp.aws import aws_eip
from terrascript.data.hashicorp.aws import aws_eks_addon
from terrascript.data.hashicorp.aws import aws_eks_cluster
from terrascript.data.hashicorp.aws import aws_eks_cluster_auth
from terrascript.data.hashicorp.aws import aws_eks_clusters
from terrascript.data.hashicorp.aws import aws_eks_node_group
from terrascript.data.hashicorp.aws import aws_eks_node_groups
from terrascript.data.hashicorp.aws import aws_elastic_beanstalk_application
from terrascript.data.hashicorp.aws import aws_elastic_beanstalk_hosted_zone
from terrascript.data.hashicorp.aws import aws_elastic_beanstalk_solution_stack
from terrascript.data.hashicorp.aws import aws_elasticache_cluster
from terrascript.data.hashicorp.aws import aws_elasticache_replication_group
from terrascript.data.hashicorp.aws import aws_elasticache_user
from terrascript.data.hashicorp.aws import aws_elasticsearch_domain
from terrascript.data.hashicorp.aws import aws_elb
from terrascript.data.hashicorp.aws import aws_elb_hosted_zone_id
from terrascript.data.hashicorp.aws import aws_elb_service_account
from terrascript.data.hashicorp.aws import aws_globalaccelerator_accelerator
from terrascript.data.hashicorp.aws import aws_glue_connection
from terrascript.data.hashicorp.aws import aws_glue_data_catalog_encryption_settings
from terrascript.data.hashicorp.aws import aws_glue_script
from terrascript.data.hashicorp.aws import aws_guardduty_detector
from terrascript.data.hashicorp.aws import aws_iam_account_alias
from terrascript.data.hashicorp.aws import aws_iam_group
from terrascript.data.hashicorp.aws import aws_iam_instance_profile
from terrascript.data.hashicorp.aws import aws_iam_policy
from terrascript.data.hashicorp.aws import aws_iam_policy_document
from terrascript.data.hashicorp.aws import aws_iam_role
from terrascript.data.hashicorp.aws import aws_iam_roles
from terrascript.data.hashicorp.aws import aws_iam_server_certificate
from terrascript.data.hashicorp.aws import aws_iam_session_context
from terrascript.data.hashicorp.aws import aws_iam_user
from terrascript.data.hashicorp.aws import aws_iam_users
from terrascript.data.hashicorp.aws import aws_identitystore_group
from terrascript.data.hashicorp.aws import aws_identitystore_user
from terrascript.data.hashicorp.aws import aws_imagebuilder_component
from terrascript.data.hashicorp.aws import (
aws_imagebuilder_distribution_configuration,
)
from terrascript.data.hashicorp.aws import aws_imagebuilder_image
from terrascript.data.hashicorp.aws import aws_imagebuilder_image_pipeline
from terrascript.data.hashicorp.aws import aws_imagebuilder_image_recipe
from terrascript.data.hashicorp.aws import (
aws_imagebuilder_infrastructure_configuration,
)
from terrascript.data.hashicorp.aws import aws_inspector_rules_packages
from terrascript.data.hashicorp.aws import aws_instance
from terrascript.data.hashicorp.aws import aws_instances
from terrascript.data.hashicorp.aws import aws_internet_gateway
from terrascript.data.hashicorp.aws import aws_iot_endpoint
from terrascript.data.hashicorp.aws import aws_ip_ranges
from terrascript.data.hashicorp.aws import aws_kinesis_stream
from terrascript.data.hashicorp.aws import aws_kinesis_stream_consumer
from terrascript.data.hashicorp.aws import aws_kms_alias
from terrascript.data.hashicorp.aws import aws_kms_ciphertext
from terrascript.data.hashicorp.aws import aws_kms_key
from terrascript.data.hashicorp.aws import aws_kms_public_key
from terrascript.data.hashicorp.aws import aws_kms_secret
from terrascript.data.hashicorp.aws import aws_kms_secrets
from terrascript.data.hashicorp.aws import aws_lakeformation_data_lake_settings
from terrascript.data.hashicorp.aws import aws_lakeformation_permissions
from terrascript.data.hashicorp.aws import aws_lakeformation_resource
from terrascript.data.hashicorp.aws import aws_lambda_alias
from terrascript.data.hashicorp.aws import aws_lambda_code_signing_config
from terrascript.data.hashicorp.aws import aws_lambda_function
from terrascript.data.hashicorp.aws import aws_lambda_invocation
from terrascript.data.hashicorp.aws import aws_lambda_layer_version
from terrascript.data.hashicorp.aws import aws_launch_configuration
from terrascript.data.hashicorp.aws import aws_launch_template
from terrascript.data.hashicorp.aws import aws_lb
from terrascript.data.hashicorp.aws import aws_lb_listener
from terrascript.data.hashicorp.aws import aws_lb_target_group
from terrascript.data.hashicorp.aws import aws_lex_bot
from terrascript.data.hashicorp.aws import aws_lex_bot_alias
from terrascript.data.hashicorp.aws import aws_lex_intent
from terrascript.data.hashicorp.aws import aws_lex_slot_type
from terrascript.data.hashicorp.aws import aws_mq_broker
from terrascript.data.hashicorp.aws import aws_msk_broker_nodes
from terrascript.data.hashicorp.aws import aws_msk_cluster
from terrascript.data.hashicorp.aws import aws_msk_configuration
from terrascript.data.hashicorp.aws import aws_msk_kafka_version
from terrascript.data.hashicorp.aws import aws_nat_gateway
from terrascript.data.hashicorp.aws import aws_neptune_engine_version
from terrascript.data.hashicorp.aws import aws_neptune_orderable_db_instance
from terrascript.data.hashicorp.aws import aws_network_acls
from terrascript.data.hashicorp.aws import aws_network_interface
from terrascript.data.hashicorp.aws import aws_network_interfaces
from terrascript.data.hashicorp.aws import (
aws_organizations_delegated_administrators,
)
from terrascript.data.hashicorp.aws import aws_organizations_delegated_services
from terrascript.data.hashicorp.aws import aws_organizations_organization
from terrascript.data.hashicorp.aws import aws_organizations_organizational_units
from terrascript.data.hashicorp.aws import aws_outposts_outpost
from terrascript.data.hashicorp.aws import aws_outposts_outpost_instance_type
from terrascript.data.hashicorp.aws import aws_outposts_outpost_instance_types
from terrascript.data.hashicorp.aws import aws_outposts_outposts
from terrascript.data.hashicorp.aws import aws_outposts_site
from terrascript.data.hashicorp.aws import aws_outposts_sites
from terrascript.data.hashicorp.aws import aws_partition
from terrascript.data.hashicorp.aws import aws_prefix_list
from terrascript.data.hashicorp.aws import aws_pricing_product
from terrascript.data.hashicorp.aws import aws_qldb_ledger
from terrascript.data.hashicorp.aws import aws_ram_resource_share
from terrascript.data.hashicorp.aws import aws_rds_certificate
from terrascript.data.hashicorp.aws import aws_rds_cluster
from terrascript.data.hashicorp.aws import aws_rds_engine_version
from terrascript.data.hashicorp.aws import aws_rds_orderable_db_instance
from terrascript.data.hashicorp.aws import aws_redshift_cluster
from terrascript.data.hashicorp.aws import aws_redshift_orderable_cluster
from terrascript.data.hashicorp.aws import aws_redshift_service_account
from terrascript.data.hashicorp.aws import aws_region
from terrascript.data.hashicorp.aws import aws_regions
from terrascript.data.hashicorp.aws import aws_resourcegroupstaggingapi_resources
from terrascript.data.hashicorp.aws import aws_route
from terrascript.data.hashicorp.aws import aws_route53_delegation_set
from terrascript.data.hashicorp.aws import aws_route53_resolver_endpoint
from terrascript.data.hashicorp.aws import aws_route53_resolver_rule
from terrascript.data.hashicorp.aws import aws_route53_resolver_rules
from terrascript.data.hashicorp.aws import aws_route53_zone
from terrascript.data.hashicorp.aws import aws_route_table
from terrascript.data.hashicorp.aws import aws_route_tables
from terrascript.data.hashicorp.aws import aws_s3_bucket
from terrascript.data.hashicorp.aws import aws_s3_bucket_object
from terrascript.data.hashicorp.aws import aws_s3_bucket_objects
from terrascript.data.hashicorp.aws import aws_sagemaker_prebuilt_ecr_image
from terrascript.data.hashicorp.aws import aws_secretsmanager_secret
from terrascript.data.hashicorp.aws import aws_secretsmanager_secret_rotation
from terrascript.data.hashicorp.aws import aws_secretsmanager_secret_version
from terrascript.data.hashicorp.aws import aws_security_group
from terrascript.data.hashicorp.aws import aws_security_groups
from terrascript.data.hashicorp.aws import (
aws_serverlessapplicationrepository_application,
)
from terrascript.data.hashicorp.aws import aws_service_discovery_dns_namespace
from terrascript.data.hashicorp.aws import aws_servicecatalog_constraint
from terrascript.data.hashicorp.aws import aws_servicecatalog_launch_paths
from terrascript.data.hashicorp.aws import aws_servicecatalog_portfolio
from terrascript.data.hashicorp.aws import aws_servicecatalog_portfolio_constraints
from terrascript.data.hashicorp.aws import aws_servicecatalog_product
from terrascript.data.hashicorp.aws import aws_servicequotas_service
from terrascript.data.hashicorp.aws import aws_servicequotas_service_quota
from terrascript.data.hashicorp.aws import aws_sfn_activity
from terrascript.data.hashicorp.aws import aws_sfn_state_machine
from terrascript.data.hashicorp.aws import aws_signer_signing_job
from terrascript.data.hashicorp.aws import aws_signer_signing_profile
from terrascript.data.hashicorp.aws import aws_sns_topic
from terrascript.data.hashicorp.aws import aws_sqs_queue
from terrascript.data.hashicorp.aws import aws_ssm_document
from terrascript.data.hashicorp.aws import aws_ssm_parameter
from terrascript.data.hashicorp.aws import aws_ssm_patch_baseline
from terrascript.data.hashicorp.aws import aws_ssoadmin_instances
from terrascript.data.hashicorp.aws import aws_ssoadmin_permission_set
from terrascript.data.hashicorp.aws import aws_storagegateway_local_disk
from terrascript.data.hashicorp.aws import aws_subnet
from terrascript.data.hashicorp.aws import aws_subnet_ids
from terrascript.data.hashicorp.aws import aws_subnets
from terrascript.data.hashicorp.aws import aws_transfer_server
from terrascript.data.hashicorp.aws import aws_vpc
from terrascript.data.hashicorp.aws import aws_vpc_dhcp_options
from terrascript.data.hashicorp.aws import aws_vpc_endpoint
from terrascript.data.hashicorp.aws import aws_vpc_endpoint_service
from terrascript.data.hashicorp.aws import aws_vpc_peering_connection
from terrascript.data.hashicorp.aws import aws_vpc_peering_connections
from terrascript.data.hashicorp.aws import aws_vpcs
from terrascript.data.hashicorp.aws import aws_vpn_gateway
from terrascript.data.hashicorp.aws import aws_waf_ipset
from terrascript.data.hashicorp.aws import aws_waf_rate_based_rule
from terrascript.data.hashicorp.aws import aws_waf_rule
from terrascript.data.hashicorp.aws import aws_waf_web_acl
from terrascript.data.hashicorp.aws import aws_wafregional_ipset
from terrascript.data.hashicorp.aws import aws_wafregional_rate_based_rule
from terrascript.data.hashicorp.aws import aws_wafregional_rule
from terrascript.data.hashicorp.aws import aws_wafregional_web_acl
from terrascript.data.hashicorp.aws import aws_wafv2_ip_set
from terrascript.data.hashicorp.aws import aws_wafv2_regex_pattern_set
from terrascript.data.hashicorp.aws import aws_wafv2_rule_group
from terrascript.data.hashicorp.aws import aws_wafv2_web_acl
from terrascript.data.hashicorp.aws import aws_workspaces_bundle
from terrascript.data.hashicorp.aws import aws_workspaces_directory
from terrascript.data.hashicorp.aws import aws_workspaces_image
from terrascript.data.hashicorp.aws import aws_workspaces_workspace
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.hashicorp.aws
#
# t = terrascript.provider.hashicorp.aws.aws()
# s = str(t)
#
# assert 'https://github.com/hashicorp/terraform-provider-aws' in s
# assert '3.60.0' in s
| 1.710938 | 2 |
custom_components/alexa_media/alarm_control_panel.py | furetto72/alexa_media_player | 1 | 12762093 | <filename>custom_components/alexa_media/alarm_control_panel.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
"""
Alexa Devices Alarm Control Panel using Guard Mode.
For more details about this platform, please refer to the documentation at
https://community.home-assistant.io/t/echo-devices-alexa-as-media-player-testers-needed/58639
"""
import logging
from typing import List # noqa pylint: disable=unused-import
from homeassistant import util
from homeassistant.components.alarm_control_panel import AlarmControlPanel
from homeassistant.const import (STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.event import call_later
from . import DATA_ALEXAMEDIA
from . import DOMAIN as ALEXA_DOMAIN
from . import MIN_TIME_BETWEEN_FORCED_SCANS, MIN_TIME_BETWEEN_SCANS, hide_email
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = [ALEXA_DOMAIN]
def setup_platform(hass, config, add_devices_callback,
discovery_info=None):
"""Set up the Alexa alarm control panel platform."""
devices = [] # type: List[AlexaAlarmControlPanel]
for account, account_dict in (hass.data[DATA_ALEXAMEDIA]
['accounts'].items()):
alexa_client = AlexaAlarmControlPanel(account_dict['login_obj'],
hass) \
# type: AlexaAlarmControlPanel
if not (alexa_client and alexa_client.unique_id):
_LOGGER.debug("%s: Skipping creation of uninitialized device: %s",
hide_email(account),
alexa_client)
continue
devices.append(alexa_client)
(hass.data[DATA_ALEXAMEDIA]
['accounts']
[account]
['entities']
['alarm_control_panel']) = alexa_client
if devices:
_LOGGER.debug("Adding %s", devices)
try:
add_devices_callback(devices, True)
except HomeAssistantError as exception_:
message = exception_.message # type: str
if message.startswith("Entity id already exists"):
_LOGGER.debug("Device already added: %s",
message)
else:
_LOGGER.debug("Unable to add devices: %s : %s",
devices,
message)
return True
class AlexaAlarmControlPanel(AlarmControlPanel):
"""Implementation of Alexa Media Player alarm control panel."""
def __init__(self, login, hass):
# pylint: disable=unexpected-keyword-arg
"""Initialize the Alexa device."""
from alexapy import AlexaAPI
# Class info
self._login = login
self.alexa_api = AlexaAPI(self, login)
self.alexa_api_session = login.session
self.account = hide_email(login.email)
self.hass = hass
# Guard info
self._appliance_id = None
self._guard_entity_id = None
self._friendly_name = "Alexa Guard"
self._state = None
self._should_poll = False
self._attrs = {}
try:
from simplejson import JSONDecodeError
data = self.alexa_api.get_guard_details(self._login)
guard_dict = (data['locationDetails']
['locationDetails']['Default_Location']
['amazonBridgeDetails']['amazonBridgeDetails']
['LambdaBridge_AAA/OnGuardSmartHomeBridgeService']
['applianceDetails']['applianceDetails'])
except (KeyError, TypeError, JSONDecodeError):
guard_dict = {}
for key, value in guard_dict.items():
if value['modelName'] == "REDROCK_GUARD_PANEL":
self._appliance_id = value['applianceId']
self._guard_entity_id = value['entityId']
self._friendly_name += " " + self._appliance_id[-5:]
_LOGGER.debug("%s: Discovered %s: %s %s",
self.account,
self._friendly_name,
self._appliance_id,
self._guard_entity_id)
if not self._appliance_id:
_LOGGER.debug("%s: No Alexa Guard entity found", self.account)
return None
# Register event handler on bus
hass.bus.listen(('{}_{}'.format(ALEXA_DOMAIN,
hide_email(login.email)))[0:32],
self._handle_event)
self.refresh(no_throttle=True)
def _handle_event(self, event):
"""Handle websocket events.
Used instead of polling.
"""
if 'push_activity' in event.data:
call_later(self.hass, 2, lambda _:
self.refresh(no_throttle=True))
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def refresh(self):
"""Update Guard state."""
import json
_LOGGER.debug("%s: Refreshing %s", self.account, self.name)
state = None
state_json = self.alexa_api.get_guard_state(self._login,
self._appliance_id)
# _LOGGER.debug("%s: state_json %s", self.account, state_json)
if (state_json and 'deviceStates' in state_json
and state_json['deviceStates']):
cap = state_json['deviceStates'][0]['capabilityStates']
# _LOGGER.debug("%s: cap %s", self.account, cap)
for item_json in cap:
item = json.loads(item_json)
# _LOGGER.debug("%s: item %s", self.account, item)
if item['name'] == 'armState':
state = item['value']
# _LOGGER.debug("%s: state %s", self.account, state)
elif state_json['errors']:
_LOGGER.debug("%s: Error refreshing alarm_control_panel %s: %s",
self.account,
self.name,
json.dumps(state_json['errors']) if state_json
else None)
if state is None:
return
if state == "ARMED_AWAY":
self._state = STATE_ALARM_ARMED_AWAY
elif state == "ARMED_STAY":
self._state = STATE_ALARM_DISARMED
else:
self._state = STATE_ALARM_DISARMED
_LOGGER.debug("%s: Alarm State: %s", self.account, self.state)
self.schedule_update_ha_state()
def alarm_disarm(self, code=None):
# pylint: disable=unexpected-keyword-arg
"""Send disarm command.
We use the arm_home state as Alexa does not have disarm state.
"""
self.alarm_arm_home()
self.schedule_update_ha_state()
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self.alexa_api.set_guard_state(self._login,
self._guard_entity_id,
"ARMED_STAY")
self.refresh(no_throttle=True)
self.schedule_update_ha_state()
def alarm_arm_away(self, code=None):
"""Send arm away command."""
# pylint: disable=unexpected-keyword-arg
self.alexa_api.set_guard_state(self._login,
self._guard_entity_id,
"ARMED_AWAY")
self.refresh(no_throttle=True)
self.schedule_update_ha_state()
@property
def unique_id(self):
"""Return the unique ID."""
return self._guard_entity_id
@property
def name(self):
"""Return the name of the device."""
return self._friendly_name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attrs
@property
def should_poll(self):
"""Return the polling state."""
return self._should_poll or not (self.hass.data[DATA_ALEXAMEDIA]
['accounts'][self._login.email]
['websocket'])
| 1.867188 | 2 |
pyeem/plots/base.py | drewmee/PyEEM | 4 | 12762094 | <gh_stars>1-10
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from celluloid import Camera
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
def _get_subplot_dims(n):
"""[summary]
Args:
n (int): [description]
Returns:
tuple of int: [description]
"""
ncols = 4
if n % ncols:
nplots = n + (ncols - n % ncols)
else:
nplots = n
nrows = int(nplots / ncols)
return nrows, ncols
def _colorbar(mappable, units, cbar_kws={}):
"""[summary]
Args:
mappable (matplotlib.image.AxesImage or matplotlib.contour.QuadContourSet): [description]
units (str): [description]
cbar_kws (dict, optional): Optional keyword arguments to include for the colorbar. Defaults to {}.
Returns:
matplotlib.colorbar.Colorbar: [description]
"""
# https://joseph-long.com/writing/colorbars/
last_axes = plt.gca()
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cbar_ax_size = cbar_kws.get("cbar_ax_size", "8%")
cbar_ax_pad = cbar_kws.get("cbar_ax_pad", 0.05)
cax = divider.append_axes("right", size=cbar_ax_size, pad=cbar_ax_pad)
cbar = fig.colorbar(mappable, cax=cax)
cbar_tick_params_labelsize = cbar_kws.get("cbar_tick_params_labelsize", 11)
cbar.ax.tick_params(labelsize=cbar_tick_params_labelsize)
cbar.formatter.set_powerlimits((-2, 3))
plt.sca(last_axes)
cbar_label_size = cbar_kws.get("cbar_label_size", 12)
cbar_labelpad = cbar_kws.get("cbar_labelpad", 5)
cbar.set_label(units, size=cbar_label_size, labelpad=cbar_labelpad)
return cbar
def _eem_contour(
eem, ax, intensity_units, include_cbar, plot_kws={}, cbar_kws={}, **kwargs
):
"""[summary]
Args:
eem (pandas.DataFrame): [description]
ax (matplotlib.axes.Axes): If an axis is provided, the contour will be plotted on this axis.
Otherwise, a new axis object will be created.
intensity_units (str): [description]
include_cbar (bool): If true, colorbar will be included.
plot_kws (dict, optional): Optional keyword arguments to include. They are sent as an argument
to the matplotlib plot call. Defaults to {}.
cbar_kws (dict, optional): Optional keyword arguments to include for the colorbar. Defaults to {}.
Returns:
matplotlib.contour.QuadContourSet: [description]
"""
# Set the default plot kws.
# contourf doesn't take aspect as a kwarg...
# so we have to remove it and pass it seperately
# via set_aspect(). Clunky but oh well.
default_aspect = "equal"
aspect = plot_kws.get("aspect", default_aspect)
contour_kws = plot_kws.copy()
contour_kws.pop("aspect", None)
default_contour_kws = dict()
contour_kws = dict(default_contour_kws, **contour_kws)
fl = eem.to_numpy()
excitation = eem.columns.to_numpy()
emission = eem.index.to_numpy()
hmap = ax.contourf(excitation, emission, fl, **contour_kws)
ax.set_aspect(aspect)
if include_cbar:
cbar = _colorbar(hmap, units=intensity_units, cbar_kws=cbar_kws)
return hmap
def _eem_imshow(
eem, ax, intensity_units, include_cbar, plot_kws={}, cbar_kws={}, **kwargs
):
"""[summary]
Args:
eem (pandas.DataFrame): [description]
ax (matplotlib.axes.Axes): If an axis is provided, the imshow will be plotted on this axis.
Otherwise, a new axis object will be created.
intensity_units (str): [description]
include_cbar (bool): If true, colorbar will be included.
plot_kws (dict, optional): Optional keyword arguments to include. They are sent as an argument
to the matplotlib plot call. Defaults to {}.
cbar_kws (dict, optional): Optional keyword arguments to include for the colorbar. Defaults to {}.
Returns:
matplotlib.image.AxesImage: [description]
"""
excitation = eem.columns.to_numpy()
emission = eem.index.to_numpy()
default_plot_kws = dict(
origin="lower",
extent=[excitation[0], excitation[-1], emission[0], emission[-1]],
aspect="equal",
)
plot_kws = dict(default_plot_kws, **plot_kws)
hmap = ax.imshow(eem, **plot_kws)
if include_cbar:
cbar = _colorbar(hmap, intensity_units, cbar_kws=cbar_kws)
return hmap
def _eem_surface_contour(
eem,
ax,
intensity_units,
include_cbar,
plot_type="surface",
surface_plot_kws={},
contour_plot_kws={},
cbar_kws={},
**kwargs
):
"""[summary]
Args:
eem (pandas.DataFrame): [description]
ax (matplotlib.axes.Axes): If an axis is provided, the surface will be plotted on this axis.
Otherwise, a new axis object will be created.
intensity_units (str): [description]
include_cbar (bool): If true, colorbar will be included.
plot_type (str, optional): [description]. Defaults to "surface".
surface_plot_kws (dict, optional): Optional keyword arguments to include. They are sent as an
argument to the matplotlib surface plot call. Defaults to {}.
contour_plot_kws (dict, optional): Optional keyword arguments to include. They are sent as an
argument to the matplotlib contour plot call. Defaults to {}.
cbar_kws (dict, optional): Optional keyword arguments to include for the colorbar. Defaults to {}.
Returns:
mpl_toolkits.mplot3d.art3d.Poly3DCollection: [description]
"""
excitation = eem.columns.to_numpy()
emission = eem.index.to_numpy()
fl = eem.to_numpy()
excitation, emission = np.meshgrid(excitation, emission)
default_surface_plot_kws = dict(
rstride=1, cstride=1, alpha=0.75, cmap="viridis", shade=False
)
surface_plot_kws = dict(default_surface_plot_kws, **surface_plot_kws)
hmap = ax.plot_surface(excitation, emission, fl, **surface_plot_kws)
zlim_min = kwargs.get("zlim_min", np.nanmin(fl))
zlim_max = kwargs.get("zlim_max", np.nanmax(fl))
z_offset = zlim_max * -2
default_contour_plot_kws = dict(
zdir="z", offset=z_offset, vmin=zlim_min, vmax=zlim_max
)
contour_plot_kws = dict(default_contour_plot_kws, **contour_plot_kws)
if plot_type == "surface_contour":
ax.contourf(excitation, emission, fl, **contour_plot_kws)
zlim_min += z_offset
ax.set_zlim(zlim_min, zlim_max)
ax.zaxis.set_ticks_position("none")
ax.set_zticks([])
elev = kwargs.get("elev", 20)
azim = kwargs.get("azim", 135)
ax.view_init(elev=elev, azim=azim)
ax.xaxis.pane.set_edgecolor("grey")
ax.yaxis.pane.set_edgecolor("grey")
ax.zaxis.pane.set_edgecolor("grey")
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
title = kwargs.get("title", "Excitation Emission Matrix")
title_fontsize = kwargs.get("title_fontsize", 14)
title_fontweight = kwargs.get("title_fontweight", "bold")
title_pad = kwargs.get("pad", 0)
ax.set_title(
title,
wrap=True,
fontsize=title_fontsize,
fontweight=title_fontweight,
pad=title_pad,
)
wavelength_units = kwargs.get("wavelength_units", "nm")
xlabel = kwargs.get(
"xlabel", "Excitation " + r"$\lambda$, %s" % str(wavelength_units)
)
ylabel = kwargs.get(
"ylabel", "Emission " + r"$\lambda$, %s" % str(wavelength_units)
)
axis_label_fontsize = kwargs.get("axis_label_fontsize", 12)
axis_labelpad = kwargs.get("axis_labelpad", 5)
ax.set_xlabel(xlabel, fontsize=axis_label_fontsize, labelpad=axis_labelpad)
ax.set_ylabel(ylabel, fontsize=axis_label_fontsize, labelpad=axis_labelpad)
tick_params_labelsize = kwargs.get("tick_params_labelsize", 10)
ax.tick_params(axis="both", which="major", pad=0, labelsize=tick_params_labelsize)
xaxis_major_maxnlocator = kwargs.get("xaxis_major_maxnlocator", 4)
yaxis_major_maxnlocator = kwargs.get("yaxis_major_maxnlocator", 4)
ax.xaxis.set_major_locator(ticker.MaxNLocator(xaxis_major_maxnlocator))
ax.yaxis.set_major_locator(ticker.MaxNLocator(yaxis_major_maxnlocator))
if include_cbar:
shrink = cbar_kws.get("shrink", 0.5)
label_size = cbar_kws.get("size", 12)
tick_params_labelsize = kwargs.get("labelsize", 11)
cbar = plt.colorbar(hmap, ax=ax, shrink=shrink)
cbar.set_label(intensity_units, size=label_size)
cbar.ax.ticklabel_format(
style="scientific", scilimits=(-2, 3), useMathText=True
)
cbar.ax.tick_params(labelsize=tick_params_labelsize)
return hmap
def eem_plot(
eem_df,
ax=None,
plot_type="imshow",
wavelength_units="nm",
intensity_units="unspecified",
include_cbar=True,
aspect="equal",
fig_kws={},
plot_kws={},
cbar_kws={},
**kwargs
):
"""[summary]
Args:
eem_df (pandas.DataFrame): An Excitation Emission matrix.
ax (matplotlib.axes.Axes, optional): If an axis is provided, the EEM will be plotted on this
axis. Otherwise, a new axis object will be created. Defaults to None.
plot_type (str, optional): [description]. Defaults to "imshow".
intensity_units (str, optional): [description]. Defaults to "unspecified".
wavelength_units (str, optional): [description]. Defaults to "nm".
aspect (str, optional): [description]. Defaults to "equal".
include_cbar (bool): If true, colorbar will be included.
fig_kws (dict, optional): Optional keyword arguments to include for the figure. Defaults to {}.
plot_kws (dict, optional): Optional keyword arguments to include. They are sent as an argument
to the matplotlib plot call. Defaults to {}.
cbar_kws (dict, optional): Optional keyword arguments to include for the colorbar. Defaults to {}.
Raises:
ValueError: [description]
Returns:
matplotlib.contour.QuadContourSet, matplotlib.image.AxesImage, or
mpl_toolkits.mplot3d.art3d.Poly3DCollection: [description]
"""
# Set the default figure kws
default_fig_kws = dict()
fig_kws = dict(default_fig_kws, **fig_kws)
if ax is None:
projection = None
if plot_type in ["surface", "surface_contour"]:
projection = "3d"
fig = plt.figure(**fig_kws)
ax = plt.gca(projection=projection)
if plot_type == "contour":
hmap = _eem_contour(
eem_df,
ax,
intensity_units,
include_cbar,
plot_kws=plot_kws,
cbar_kws=cbar_kws,
**kwargs
)
elif plot_type == "imshow":
hmap = _eem_imshow(
eem_df,
ax,
intensity_units,
include_cbar,
plot_kws=plot_kws,
cbar_kws=cbar_kws,
**kwargs
)
elif plot_type in ["surface", "surface_contour"]:
hmap = _eem_surface_contour(
eem_df, ax, intensity_units, include_cbar, plot_type=plot_type, **kwargs
)
return hmap
else:
raise ValueError("plot_type must be imshow, contour, or surface_contour")
tick_params_labelsize = kwargs.get("tick_params_labelsize", 11)
ax.tick_params(axis="both", which="major", labelsize=tick_params_labelsize)
title = kwargs.get("title", "Excitation Emission Matrix")
title_wrap = kwargs.get("title_wrap", True)
title_fontsize = kwargs.get("title_fontsize", 14)
title_pad = kwargs.get("title_pad", 20)
fontweight = kwargs.get("title_fontweight", "bold")
ax.set_title(
title,
wrap=title_wrap,
fontsize=title_fontsize,
fontweight=fontweight,
pad=title_pad,
)
xlabel = kwargs.get(
"xlabel", "Excitation " + r"$\lambda$, %s" % str(wavelength_units)
)
ylabel = kwargs.get(
"ylabel", "Emission " + r"$\lambda$, %s" % str(wavelength_units)
)
axis_label_fontsize = kwargs.get("axis_label_fontsize", 12)
axis_labelpad = kwargs.get("axis_labelpad", 5)
ax.set_xlabel(xlabel, fontsize=axis_label_fontsize, labelpad=axis_labelpad)
ax.set_ylabel(ylabel, fontsize=axis_label_fontsize, labelpad=axis_labelpad)
return hmap
def plot_absorbance(ax=None, plot_kws={}, fig_kws={}, **kwargs):
return
| 2.484375 | 2 |
UnityPy/classes/PPtr.py | yvsdrop/UnityPy | 313 | 12762095 | from ..files import ObjectReader
from ..streams import EndianBinaryWriter
from ..helpers import ImportHelper
from .. import files
from ..enums import FileType, ClassIDType
import os
from .. import environment
def save_ptr(obj, writer: EndianBinaryWriter):
if isinstance(obj, PPtr):
writer.write_int(obj.file_id)
else:
writer.write_int(0) # it's usually 0......
if obj._version < 14:
writer.write_int(obj.path_id)
else:
writer.write_long(obj.path_id)
cached_managers = dict()
class PPtr:
def __init__(self, reader: ObjectReader):
self._version = reader.version2
self.index = -2
self.file_id = reader.read_int()
self.path_id = reader.read_int() if self._version < 14 else reader.read_long()
self.assets_file = reader.assets_file
self._obj = None
def save(self, writer: EndianBinaryWriter):
save_ptr(self, writer)
def get_obj(self):
if self._obj != None:
return self._obj
manager = None
if self.file_id == 0:
manager = self.assets_file
elif self.file_id > 0 and self.file_id - 1 < len(self.assets_file.externals):
if self.index == -2:
external_name = self.assets_file.externals[self.file_id - 1].name
parent = self.assets_file.parent
if parent is not None:
if external_name in parent.files:
manager = parent.files[external_name]
elif external_name.upper() in parent.files:
manager = parent.files[external_name.upper()]
else:
while not isinstance(parent, environment.Environment):
parent = parent.parent
if parent.path:
path = parent.path
files = os.listdir(path)
if external_name in files:
parent.load_files([os.path.join(path, external_name)])
manager = parent.files[external_name]
else:
if external_name not in cached_managers:
typ, reader = ImportHelper.check_file_type(external_name)
if typ == FileType.AssetsFile:
cached_managers[external_name] = files.SerializedFile(reader)
if external_name in cached_managers:
manager = cached_managers[external_name]
if manager and self.path_id in manager.objects:
self._obj = manager.objects[self.path_id]
else:
self._obj = None
return self._obj
def __getattr__(self, key):
obj = self.get_obj()
if obj is None:
if key == "type":
return ClassIDType.UnknownType
raise AttributeError(key)
return getattr(obj, key)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self._obj.__class__.__repr__(self.get_obj()) if self.get_obj() else "Not Found")
def __bool__(self):
return True if self.get_obj() else False
| 2.484375 | 2 |
PhysicsEngine/NumericalThermoFluidHandler.py | RECIEM/Ballistica | 2 | 12762096 | <filename>PhysicsEngine/NumericalThermoFluidHandler.py
# Red Ciudadana de Estaciones Meteorologicas
#
# Copyright @ 2021
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import numpy as np
import pandas as pd
from scipy.integrate import solve_ivp
from scipy.integrate import cumtrapz
from PhysicsEngine import PhysicsHandler
class NumericalThermoFluidHandler(PhysicsHandler):
# Constants for all instances of the solver
###########################################
# Sphericity
sa_norm = 1.6075
# Sutherland's viscosity model
tref = 273.15
tsuth = 110.4
muref = 1.716e-5
def __init__(self, v0=0, theta=0, dens=0.7, a=0.05, b=0.05, c=0.05, rho = 1.2754, temp = 293.0, h = 0, d = 0):
self.v0 = v0
self.theta = theta
self.a = a
self.b = b
self.c = c
self.rho = rho
self.T = temp
self.height = h
self.distance = d
self.windx = 0
self.windz = 0
self.data = None
self.computeIdeal = False
self.barrier = False
# Intermediate sphericity-related values, compute as early as possible to avoid constant recomputation
self.phi = self.sphericity
self.A = np.exp(2.3288 - (6.4581 * self.phi) + 2.4486 * (self.phi ** 2))
self.B = 0.0964 + (0.5565 * self.phi)
self.C = np.exp(4.905 - (13.8944 * self.phi) + (18.4222 * (self.phi ** 2)) - (10.2599 * (self.phi ** 3)))
self.D = np.exp(1.4681 + (12.2584 * self.phi) - (20.7322 * (self.phi ** 2)) + (15.8855 * (self.phi ** 3)))
self.dSph = np.power(self.a * self.b * self.c, 1.0/3)
# Compute kinematic viscosity only once
self.mu = self.compMu()
self.dens = dens
self.m = 0
@property
def sphericity(self):
radius = np.power(self.a * self.b * self.c, 1.0/3)
sphArea = 4 * np.pi * (radius ** 2)
parArea = self.surfArea
return sphArea/parArea
@property
def sph_volume(self):
radius = np.power(self.a * self.b * self.c, 1.0 / 3)
return (4.0/3.0) * np.pi * (radius ** 3)
def compMu(self):
mu = self.muref * np.power(self.T / self.tref, 3.0/2) * ((self.tref + self.tsuth) / (self.T + self.tsuth))
return mu
def Re(self, v):
return (self.dSph * v * self.rho) / self.mu
def Cd(self, v):
return (24.0 / self.Re(v))*(1 + (self.A * (self.Re(v)**self.B))) + (self.C / (1 + (self.D / self.Re(v))))
@staticmethod
def norm(a, b):
return np.sqrt(np.power(a, 2) + np.power(b, 2))
@property
def surfArea(self):
projareap = np.power(self.a*self.b, self.sa_norm) + np.power(self.a*self.c, self.sa_norm) + np.power(self.b*self.c, self.sa_norm)
return 4*np.pi*np.power(projareap/3.0, 1.0/self.sa_norm)
def E(self, v):
return (self.rho*v*v*self.surfArea)/(2 * self.m)
def setMass(self):
self.m = self.dens * self.sph_volume
def compute(self):
# Update mu value
self.phi = self.sphericity
self.A = np.exp(2.3288 - (6.4581 * self.phi) + 2.4486 * (self.phi ** 2))
self.B = 0.0964 + (0.5565 * self.phi)
self.C = np.exp(4.905 - (13.8944 * self.phi) + (18.4222 * (self.phi ** 2)) - (10.2599 * (self.phi ** 3)))
self.D = np.exp(1.4681 + (12.2584 * self.phi) - (20.7322 * (self.phi ** 2)) + (15.8855 * (self.phi ** 3)))
self.dSph = np.power(self.a * self.b * self.c, 1.0/3)
self.mu = self.compMu()
tstart = 0
tend = 200
tsamples = 10001
trng = np.linspace(tstart, tend, tsamples)
vx0 = self.v0 * np.cos(self.theta)
vy0 = self.v0 * np.sin(self.theta)
def acc(t, v):
vx = v[0]
vy = v[1]
v = self.norm(vx, vy)
Enow = self.E(v)
dvxdt = -Enow * self.Cd(v) * (vx - self.windx)/v
dvydt = -Enow * self.Cd(v) * (vy / v) - self.g
return [dvxdt, dvydt]
# Integrate velocities
vel0 = [vx0, vy0]
vel = solve_ivp(acc, [0, 200], vel0, method='RK45', t_eval=trng).y
vxrng = vel[0]
vyrng = vel[1]
# Integrate positions
xrng = cumtrapz(vxrng, trng, initial=0)
yrng = cumtrapz(vyrng, trng, initial=0)
vrng = np.sqrt(np.power(vxrng, 2) + np.power(vyrng, 2))
# Record Reynolds number
cdrng = self.Cd(vrng)
rerng = self.Re(vrng)
darray = np.transpose(np.array([trng, xrng, yrng, vxrng, vyrng, vrng, cdrng, rerng]))
self.data = pd.DataFrame(
{'t': darray[:, 0], 'x': darray[:, 1], 'z': darray[:, 2], 'vx': darray[:, 3], 'vz': darray[:, 4],
'v': darray[:, 5], 'cd': darray[:, 6], 're': darray[:, 7]})
if self.barrier:
self.data = self.data[self.data['x'] <= self.distance]
def save_csv(self, filename):
if (filename == '') or (self.data is None):
return
else:
self.data.to_csv(filename)
def maxT(self):
if self.data is None:
return 0.0
else:
return self.data[self.data['z'] == self.data['z'].max()]['t'].values[0]
def maxH(self):
if self.data is None:
return 0.0
else:
return self.data[self.data['z'] == self.data['z'].max()]['z'].values[0]
def totalR(self):
if self.data is None:
return 0.0
else:
adjdata = self.data[self.data['z'] >= np.min([0, self.height])]
return adjdata.tail(1)['x'].values[0]
def maxDistance(self):
if self.data is None:
return 0.0
else:
adjdata = self.data[self.data['z'] >= np.min([0, self.height])]
return adjdata['x'].max()
def totalT(self):
if self.data is None:
return 0.0
else:
adjdata = self.data[self.data['z'] >= np.min([0, self.height])]
return adjdata.tail(1)['t'].values[0]
def finalTheta(self):
if self.data is None:
return 0.0
else:
adjdata = self.data[self.data['z'] >= np.min([0, self.height])]
if adjdata.tail(1)['vx'].values[0] == 0:
return 90.0
else:
return -1 * np.rad2deg(np.arctan(adjdata.tail(1)['vz'].values[0] / adjdata.tail(1)['vx'].values[0]))
def finalV(self):
if self.data is None:
return 0.0
else:
adjdata = self.data[self.data['z'] >= np.min([0, self.height])]
return adjdata.tail(1)['v'].values[0]
| 2.71875 | 3 |
CA.py | robustml-eurecom/quality_control_CMR | 2 | 12762097 | import os
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from medpy.metric import binary
#use gpu if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class AE(nn.Module):
def __init__(self, latent_size=100):
super().__init__()
self.init_layers(latent_size)
self.apply(self.weight_init)
self.loss_function=self.Loss()
self.metrics=self.Metrics()
self.optimizer=torch.optim.Adam(self.parameters(),lr=2e-4,weight_decay=1e-5)
def init_layers(self,latent_size):
self.encoder = nn.Sequential(
nn.Conv2d(in_channels=4,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=32,out_channels=32,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=32,out_channels=64,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=64,out_channels=128,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=128),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=128,out_channels=64,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=64,out_channels=32,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=32,out_channels=latent_size,kernel_size=4,stride=2,padding=1)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(in_channels=latent_size,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=32,out_channels=64,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=128),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=128,out_channels=64,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=64,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=32,out_channels=32,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=32,out_channels=4,kernel_size=4,stride=2,padding=1),
nn.Softmax(dim=1)
)
def weight_init(self,m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_uniform_(m.weight)
def forward(self, x):
latent = self.encoder(x)
reconstruction = self.decoder(latent)
return reconstruction
class Loss():
def __init__(self,call_id=0):
self.MSELoss=nn.MSELoss()
self.GDLoss=self.GDLoss()
class GDLoss:
def __call__(self,x,y):
tp=torch.sum(x*y,dim=(0,2,3))
fp=torch.sum(x*(1-y),dim=(0,2,3))
fn=torch.sum((1-x)*y,dim=(0,2,3))
nominator=2*tp+1e-05
denominator=2*tp+fp+fn+1e-05
dice_score=-(nominator/(denominator+1e-8))[1:].mean()
return dice_score
def __call__(self,prediction,target,epoch=None,validation=False):
contributes={}
contributes["MSELoss"]=self.MSELoss(prediction,target)
contributes["GDLoss"]=self.GDLoss(prediction,target)
contributes["Total"]=contributes["MSELoss"]+contributes["GDLoss"]
if validation:
return {k:v.item() for k,v in contributes.items()}
return contributes["Total"]
class Metrics():
def __init__(self):
self.DC=self.DC()
self.HD=self.HD()
class DC:
def __call__(self,prediction,target):
try:
return binary.dc(prediction,target)
except Exception:
return 0
class HD:
def __call__(self,prediction,target):
try:
return binary.hd(prediction,target)
except Exception:
return np.nan
def __call__(self,prediction,target,validation=False):
metrics={}
for c,key in enumerate(["BK_","RV_","MYO_","LV_"]):
ref=np.copy(target)
pred=np.copy(prediction)
ref=np.where(ref!=c,0,1)
pred=np.where(pred!=c,0,1)
metrics[key+"dc"]=self.DC(pred,ref)
metrics[key+"hd"]=self.HD(pred,ref)
return metrics
def training_routine(self,epochs,train_loader,val_loader,ckpt_folder):
if not os.path.isdir(ckpt_folder):
os.mkdir(ckpt_folder)
history = []
best_acc = None
for epoch in epochs:
#training
self.train()
for patient in train_loader:
for batch in patient:
batch=batch.to(device)
self.optimizer.zero_grad()
reconstruction=self.forward(batch)
loss=self.loss_function(reconstruction,batch,epoch)
loss.backward()
self.optimizer.step()
#validation
self.eval()
with torch.no_grad():
result = self.evaluation_routine(val_loader)
#checkpoint
if(best_acc==None or result['Total']<best_acc or epoch%10==0):
ckpt=os.path.join(ckpt_folder,"{:03d}.pth".format(epoch))
if(best_acc==None or result['Total']<best_acc): best_acc=result['Total']; ckpt=ckpt.split(".pth")[0]+"_best.pth"
torch.save({"AE": self.state_dict(),"AE_optim": self.optimizer.state_dict(),"epoch": epoch},ckpt)
#report
self.epoch_end(epoch, result)
history.append(result)
return history
def evaluation_routine(self,val_loader):
epoch_summary={}
for patient in val_loader:
gt=[];reconstruction=[]
#loss terms
for batch in patient:
batch={"gt":batch.to(device)}
batch["reconstruction"]=self.forward(batch["gt"])
gt=torch.cat([gt,batch["gt"]],dim=0) if len(gt)>0 else batch["gt"]
reconstruction=torch.cat([reconstruction,batch["reconstruction"]],dim=0) if len(reconstruction)>0 else batch["reconstruction"]
for k,v in self.loss_function(batch["reconstruction"],batch["gt"],validation=True).items():
if k not in epoch_summary.keys(): epoch_summary[k]=[]
epoch_summary[k].append(v)
#validation metrics
gt=np.argmax(gt.cpu().numpy(),axis=1)
gt={"ED":gt[:len(gt)//2],"ES":gt[len(gt)//2:]}
reconstruction=np.argmax(reconstruction.cpu().numpy(),axis=1)
reconstruction={"ED":reconstruction[:len(reconstruction)//2],"ES":reconstruction[len(reconstruction)//2:]}
for phase in ["ED","ES"]:
for k,v in self.metrics(reconstruction[phase],gt[phase]).items():
if k not in epoch_summary.keys(): epoch_summary[k]=[]
epoch_summary[k].append(v)
epoch_summary={k:np.mean(v) for k,v in epoch_summary.items()}
return epoch_summary
def epoch_end(self,epoch,result):
print("\033[1mEpoch [{}]\033[0m".format(epoch))
header,row="",""
for k,v in result.items():
header+="{:.6}\t".format(k);row+="{:.6}\t".format("{:.4f}".format(v))
print(header);print(row)
def plot_history(history):
losses = [x['Total'] for x in history]
plt.plot(losses, '-x', label="loss")
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.title('Losses vs. No. of epochs')
plt.grid()
plt.show()
| 2.375 | 2 |
scripts/trace.py | TomiBelan/fajr | 0 | 12762098 | #!/usr/bin/env python
import sys
from struct import unpack
from collections import namedtuple
EntryHeader = namedtuple('EntryHeader', 'id parent length')
Entry = namedtuple('Entry', 'id parent message trace data children')
class DecodeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def entry_stream(f):
hdr = f.read(4)
if hdr != 'FBTR':
raise DecodeError('Bad header')
entry = read_entry(f)
while entry != None:
yield entry
entry = read_entry(f)
def read_entry(f):
ehdr_data = f.read(12)
if ehdr_data == '':
# eof
return None
if ehdr_data[:2] != 'BE':
raise DecodeError('Bad trace entry header')
if ehdr_data[2:4] != 'TR':
raise DecodeError('Unknown trace entry type')
ehdr = EntryHeader(*unpack('>HHI', ehdr_data[4:]))
edata = f.read(ehdr.length)
pos, msg = unserialize(edata)
skip, trace = unserialize(edata[pos:])
pos += skip
skip, data = unserialize(edata[pos:])
return Entry(ehdr.id, ehdr.parent, msg, trace, data, [])
def unserialize(data):
if data[0] == 'S':
length = unpack('>I', data[1:5])[0]
s = data[5:5+length]
return 5+length, s
elif data[0] == 'I':
i = unpack('>I', data[1:5])[0]
return 5, i
elif data[0] == 'A':
cnt = unpack('>I', data[1:5])[0]
# In PHP, order of keys matters, so use array of tuples
# instead of a map
vals = []
pos = 5
for i in range(cnt):
skip, key = unserialize(data[pos:])
pos += skip
skip, value = unserialize(data[pos:])
pos += skip
vals.append((key, value))
return pos, vals
elif data[0] == 'N':
return 1, None
def build_tree(stream):
m = {}
entries = list(stream)
for entry in entries:
if entry.id in m:
raise DecodeError('Duplicate entry with id ' + str(entry.id))
m[entry.id] = entry
for entry in entries:
if not entry.parent in m:
raise DecodeError('Unknown parent with id ' + str(entry.id))
if entry.id != 0:
parent = m[entry.parent]
parent.children.append(entry)
if not 0 in m:
raise DecodeError('Root not present')
return m
if __name__ == '__main__':
entry_map = build_tree(entry_stream(sys.stdin))
if len(sys.argv) == 1:
def print_tree(entry, indent=1):
print str(entry.id).zfill(4)+' '*indent + entry.message
for child in entry.children:
print_tree(child, indent+1)
print_tree(entry_map[0])
elif len(sys.argv) == 2:
id = int(sys.argv[1])
if not id in entry_map:
sys.stderr.write('No such id: ' + str(id) + '\n')
exit(1)
entry = entry_map[int(sys.argv[1])]
sys.stderr.write('Entry ' + str(id) + (' in ' + str(entry.parent) if entry.parent else '') + ': ' + entry.message + '\n')
for key, value in entry.trace:
sys.stderr.write(str(key) + ': ' + str(value) + '\n')
if entry.data == None:
sys.stdout.write('null\n')
else:
sys.stdout.write(str(entry.data))
| 2.484375 | 2 |
tdd_wallet/views/credit_amount/tests/__init__.py | kapeed2091/tdd_practice | 0 | 12762099 | # pylint: disable=wrong-import-position
APP_NAME = "tdd_wallet"
OPERATION_NAME = "credit_amount"
REQUEST_METHOD = "post"
URL_SUFFIX = "credit/v1/"
from .test_case_01 import TestCase01CreditAmountAPITestCase
__all__ = [
"TestCase01CreditAmountAPITestCase"
]
| 1.203125 | 1 |
tests/test_bql.py | almartin82/bayeslite | 964 | 12762100 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import StringIO
import apsw
import pytest
import struct
import bayeslite
import bayeslite.ast as ast
import bayeslite.compiler as compiler
import bayeslite.core as core
import bayeslite.guess as guess
import bayeslite.backends.troll_rng as troll
import bayeslite.parse as parse
from bayeslite.exception import BQLError
from bayeslite.math_util import relerr
from bayeslite.backends.cgpm_backend import CGPM_Backend
from bayeslite.util import cursor_value
import test_core
import test_csv
from stochastic import stochastic
def bql2sql(string, setup=None):
with bayeslite.bayesdb_open(':memory:') as bdb:
test_core.t1_schema(bdb)
test_core.t1_data(bdb)
bdb.execute('''
create population p1 for t1 (
id ignore;
label nominal;
age numerical;
weight numerical
)
''')
if setup is not None:
setup(bdb)
phrases = parse.parse_bql_string(string)
out = compiler.Output(0, {}, ())
for phrase in phrases:
assert ast.is_query(phrase)
compiler.compile_query(bdb, phrase, out)
out.write(';')
return out.getvalue()
# XXX Kludgey mess. Please reorganize.
def bql2sqlparam(string):
with bayeslite.bayesdb_open(':memory:') as bdb:
test_core.t1_schema(bdb)
test_core.t1_data(bdb)
bdb.execute('''
create population p1 for t1 (
id ignore;
label nominal;
age numerical;
weight numerical
)
''')
phrases = parse.parse_bql_string(string)
out0 = StringIO.StringIO()
for phrase in phrases:
out = None
if isinstance(phrase, ast.Parametrized):
bindings = (None,) * phrase.n_numpar
out = compiler.Output(phrase.n_numpar, phrase.nampar_map,
bindings)
phrase = phrase.phrase
else:
out = StringIO.StringIO()
assert ast.is_query(phrase)
compiler.compile_query(bdb, phrase, out)
# XXX Do something about the parameters.
out0.write(out.getvalue())
out0.write(';')
return out0.getvalue()
def bql_execute(bdb, string, bindings=()):
return map(tuple, bdb.execute(string, bindings))
def empty(cursor):
assert cursor is not None
assert cursor.description is not None
assert len(cursor.description) == 0
with pytest.raises(StopIteration):
cursor.next()
def test_trivial_population():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
# XXX if (not) exists
bdb.execute('''
create population p for t (
guess stattypes of (*);
age numerical
)
''')
bdb.execute('drop population p')
def test_population_invalid_numerical():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
with pytest.raises(BQLError):
bdb.execute('''
create population p for t (
guess stattypes of (*);
gender numerical
)
''')
def test_population_invalid_numerical_alterpop_addvar():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
bdb.execute('''
create population p for t (
guess stattypes of (*);
ignore gender
)
''')
with pytest.raises(BQLError):
bdb.execute('alter population p add variable gender numerical')
bdb.execute('drop population p')
def test_population_invalid_numerical_alterpop_stattype():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
bdb.execute('''
create population p for t (
guess stattypes of (*);
gender nominal
)
''')
with pytest.raises(BQLError):
bdb.execute('''
alter population p set stattype of gender to numerical
''')
bdb.execute('drop population p')
def test_similarity_identity():
with test_core.t1() as (bdb, population_id, _generator_id):
bdb.execute('initialize 6 models for p1_cc;')
rowids = bdb.sql_execute('select rowid from t1')
for rowid in rowids:
c = bdb.execute('''
estimate similarity of (rowid=?) to (rowid=?)
in the context of age by p1
''', (rowid[0], rowid[0])).fetchall()
assert len(c) == 1
assert c[0][0] == 1
def test_predictive_relevance():
assert bql2sql('''
estimate predictive relevance
of (label = 'Uganda')
to existing rows (rowid < 4)
and hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'Europe', "weight" = 7)
)
in the context of "weight"
by p1
''') == \
'SELECT bql_row_predictive_relevance(1, NULL, NULL, ' \
'(SELECT _rowid_ FROM "t1" WHERE ("label" = \'Uganda\')), '\
'\'[1, 2, 3]\', 3, '\
'2, 82, 3, 14, NULL, 2, 74, 1, \'Europe\', 3, 7, NULL);'
assert bql2sql('''
estimate predictive relevance
of (label = 'mumble')
to existing rows (label = 'frotz' or age <= 4)
in the context of "label"
by p1
''') == \
'SELECT bql_row_predictive_relevance(1, NULL, NULL, ' \
'(SELECT _rowid_ FROM "t1" WHERE ("label" = \'mumble\')), '\
'\'[5, 8]\', 1);'
assert bql2sql('''
estimate label,
predictive relevance
to hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'hunf', "weight" = 7)
)
in the context of "age",
_rowid_ + 1
from p1
''') == \
'SELECT "label", bql_row_predictive_relevance(1, NULL, NULL, _rowid_, '\
'\'[]\', 2, 2, 82, 3, 14, NULL, 2, 74, 1, \'hunf\', 3, 7, NULL), '\
'("_rowid_" + 1) FROM "t1";'
# No matching rows should still compile.
assert bql2sql('''
estimate label,
predictive relevance to existing rows (rowid < 0)
in the context of "age"
from p1
''') == \
'SELECT "label", bql_row_predictive_relevance(1, NULL, NULL, _rowid_, '\
'\'[]\', 2) FROM "t1";'
# When using `BY`, require OF to be specified.
with pytest.raises(BQLError):
bql2sql('''
estimate predictive relevance
to hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'Europe', "weight" = 7)
)
in the context of "age"
by p1
''')
# When using `FROM`, require OF to be unspecified.
with pytest.raises(BQLError):
bql2sql('''
estimate predictive relevance
of (name = 'mansour')
to hypothetical rows with values (
("age" = 82, "weight" = 14)
)
in the context of "age"
from p1
''')
assert bql2sql('''
estimate label from p1
where
(predictive relevance to existing rows (label = 'quux' and age < 5)
in the context of "weight") > 1
order by
predictive relevance
to hypothetical rows with values ((label='zot'))
in the context of "age"
''') == \
'SELECT "label" FROM "t1" WHERE '\
'(bql_row_predictive_relevance(1, NULL, NULL, '\
'_rowid_, \'[5]\', 3) > 1) '\
'ORDER BY bql_row_predictive_relevance(1, NULL, NULL, '\
'_rowid_, \'[]\', 2, 1, \'zot\', NULL);'
@stochastic(max_runs=2, min_passes=1)
def test_conditional_probability(seed):
with test_core.t1(seed=seed) as (bdb, _population_id, _generator_id):
bdb.execute('drop generator p1_cc')
bdb.execute('drop population p1')
bdb.execute('''
create population p1 for t1 (
ignore id, label;
set stattype of age to numerical;
set stattype of weight to numerical
)
''')
bdb.execute('''
create generator p1_cond_prob_cc for p1;
''')
bdb.execute('initialize 1 model for p1_cond_prob_cc')
bdb.execute('alter generator p1_cond_prob_cc '
'ensure variables * dependent')
bdb.execute('analyze p1_cond_prob_cc for 1 iteration')
q0 = 'estimate probability density of age = 8 by p1'
q1 = 'estimate probability density of age = 8 given () by p1'
age_is_8 = bdb.execute(q0).fetchvalue()
assert age_is_8 == bdb.execute(q1).fetchvalue()
q2 = 'estimate probability density of age = 8 given (weight = 16)' \
' by p1'
age_is_8_given_weight_is_16 = bdb.execute(q2).fetchvalue()
assert age_is_8 < age_is_8_given_weight_is_16
probs = bdb.execute(
'estimate probability density of value 8 given (weight = 16)'
' from columns of p1 where v.name != \'weight\'').fetchall()
assert [(age_is_8_given_weight_is_16,)] == probs
@stochastic(max_runs=2, min_passes=1)
def test_joint_probability(seed):
with test_core.t1(seed=seed) as (bdb, _population_id, _generator_id):
bdb.execute('initialize 10 models for p1_cc')
bdb.execute('analyze p1_cc for 10 iterations')
q0 = 'estimate probability density of age = 8 by p1'
q1 = 'estimate probability density of (age = 8) by p1'
assert bdb.execute(q0).fetchvalue() == bdb.execute(q1).fetchvalue()
q1 = 'estimate probability density of (age = 8) given () by p1'
assert bdb.execute(q0).fetchvalue() == bdb.execute(q1).fetchvalue()
q2 = 'estimate probability density of age = 8 given (weight = 16)' \
' by p1'
assert bdb.execute(q0).fetchvalue() < bdb.execute(q2).fetchvalue()
q0 = 'estimate probability density of age = 8 by p1'
q1 = 'estimate probability density of (age = 8, weight = 16) by p1'
assert bdb.execute(q1).fetchvalue() < bdb.execute(q0).fetchvalue()
q2 = 'estimate probability density of (age = 8, weight = 16)' \
" given (label = 'mumble') by p1"
assert bdb.execute(q1).fetchvalue() < bdb.execute(q2).fetchvalue()
def test_badbql():
with test_core.t1() as (bdb, _population_id, _generator_id):
with pytest.raises(ValueError):
bdb.execute('')
with pytest.raises(ValueError):
bdb.execute(';')
with pytest.raises(ValueError):
bdb.execute('select 0; select 1')
def test_select_trivial():
assert bql2sql('select null;') == 'SELECT NULL;'
assert bql2sql("select 'x';") == "SELECT 'x';"
assert bql2sql("select 'x''y';") == "SELECT 'x''y';"
assert bql2sql('select "x";') == 'SELECT "x";'
assert bql2sql('select "x""y";') == 'SELECT "x""y";'
assert bql2sql('select 0;') == 'SELECT 0;'
assert bql2sql('select 0.;') == 'SELECT 0.0;'
assert bql2sql('select .0;') == 'SELECT 0.0;'
assert bql2sql('select 0.0;') == 'SELECT 0.0;'
assert bql2sql('select 1e0;') == 'SELECT 1.0;'
assert bql2sql('select 1e+1;') == 'SELECT 10.0;'
assert bql2sql('select 1e-1;') == 'SELECT 0.1;'
assert bql2sql('select -1e+1;') == 'SELECT (- 10.0);'
assert bql2sql('select +1e-1;') == 'SELECT (+ 0.1);'
assert bql2sql('select SQRT(1-EXP(-2*value)) FROM bm_mi;') == \
'SELECT "SQRT"((1 - "EXP"(((- 2) * "value")))) FROM "bm_mi";'
assert bql2sql('select .1e0;') == 'SELECT 0.1;'
assert bql2sql('select 1.e10;') == 'SELECT 10000000000.0;'
assert bql2sql('select all 0;') == 'SELECT 0;'
assert bql2sql('select distinct 0;') == 'SELECT DISTINCT 0;'
assert bql2sql('select 0 as z;') == 'SELECT 0 AS "z";'
assert bql2sql('select * from t;') == 'SELECT * FROM "t";'
assert bql2sql('select t.* from t;') == 'SELECT "t".* FROM "t";'
assert bql2sql('select c from t;') == 'SELECT "c" FROM "t";'
assert bql2sql('select c as d from t;') == 'SELECT "c" AS "d" FROM "t";'
assert bql2sql('select t.c as d from t;') == \
'SELECT "t"."c" AS "d" FROM "t";'
assert bql2sql('select t.c as d, p as q, x from t;') == \
'SELECT "t"."c" AS "d", "p" AS "q", "x" FROM "t";'
assert bql2sql('select * from t, u;') == 'SELECT * FROM "t", "u";'
assert bql2sql('select * from t as u;') == 'SELECT * FROM "t" AS "u";'
assert bql2sql('select * from (select 0);') == 'SELECT * FROM (SELECT 0);'
assert bql2sql('select t.c from (select d as c from u) as t;') == \
'SELECT "t"."c" FROM (SELECT "d" AS "c" FROM "u") AS "t";'
assert bql2sql('select * where x;') == 'SELECT * WHERE "x";'
assert bql2sql('select * from t where x;') == \
'SELECT * FROM "t" WHERE "x";'
assert bql2sql('select * group by x;') == 'SELECT * GROUP BY "x";'
assert bql2sql('select * from t where x group by y;') == \
'SELECT * FROM "t" WHERE "x" GROUP BY "y";'
assert bql2sql('select * from t where x group by y, z;') == \
'SELECT * FROM "t" WHERE "x" GROUP BY "y", "z";'
assert bql2sql('select * from t where x group by y having sum(z) < 1') == \
'SELECT * FROM "t" WHERE "x" GROUP BY "y" HAVING ("sum"("z") < 1);'
assert bql2sql('select * order by x;') == 'SELECT * ORDER BY "x";'
assert bql2sql('select * order by x asc;') == 'SELECT * ORDER BY "x";'
assert bql2sql('select * order by x desc;') == \
'SELECT * ORDER BY "x" DESC;'
assert bql2sql('select * order by x, y;') == 'SELECT * ORDER BY "x", "y";'
assert bql2sql('select * order by x desc, y;') == \
'SELECT * ORDER BY "x" DESC, "y";'
assert bql2sql('select * order by x, y asc;') == \
'SELECT * ORDER BY "x", "y";'
assert bql2sql('select * limit 32;') == 'SELECT * LIMIT 32;'
assert bql2sql('select * limit 32 offset 16;') == \
'SELECT * LIMIT 32 OFFSET 16;'
assert bql2sql('select * limit 16, 32;') == 'SELECT * LIMIT 32 OFFSET 16;'
assert bql2sql('select (select0);') == 'SELECT "select0";'
assert bql2sql('select (select 0);') == 'SELECT (SELECT 0);'
assert bql2sql('select f(f(), f(x), y);') == \
'SELECT "f"("f"(), "f"("x"), "y");'
assert bql2sql('select a and b or c or not d is e is not f like j;') == \
'SELECT ((("a" AND "b") OR "c") OR' \
+ ' (NOT ((("d" IS "e") IS NOT "f") LIKE "j")));'
assert bql2sql('select a like b not like c like d escape e;') == \
'SELECT ((("a" LIKE "b") NOT LIKE "c") LIKE "d" ESCAPE "e");'
assert bql2sql('select a like b escape c glob d not glob e;') == \
'SELECT ((("a" LIKE "b" ESCAPE "c") GLOB "d") NOT GLOB "e");'
assert bql2sql('select a not glob b glob c escape d;') == \
'SELECT (("a" NOT GLOB "b") GLOB "c" ESCAPE "d");'
assert bql2sql('select a glob b escape c regexp e not regexp f;') == \
'SELECT ((("a" GLOB "b" ESCAPE "c") REGEXP "e") NOT REGEXP "f");'
assert bql2sql('select a not regexp b regexp c escape d;') == \
'SELECT (("a" NOT REGEXP "b") REGEXP "c" ESCAPE "d");'
assert bql2sql('select a regexp b escape c not regexp d escape e;') == \
'SELECT (("a" REGEXP "b" ESCAPE "c") NOT REGEXP "d" ESCAPE "e");'
assert bql2sql('select a not regexp b escape c match e not match f;') == \
'SELECT ((("a" NOT REGEXP "b" ESCAPE "c") MATCH "e") NOT MATCH "f");'
assert bql2sql('select a not match b match c escape d;') == \
'SELECT (("a" NOT MATCH "b") MATCH "c" ESCAPE "d");'
assert bql2sql('select a match b escape c not match d escape e;') == \
'SELECT (("a" MATCH "b" ESCAPE "c") NOT MATCH "d" ESCAPE "e");'
assert bql2sql('select a not match b escape c between d and e;') == \
'SELECT (("a" NOT MATCH "b" ESCAPE "c") BETWEEN "d" AND "e");'
assert bql2sql('select a between b and c and d;') == \
'SELECT (("a" BETWEEN "b" AND "c") AND "d");'
assert bql2sql('select a like b like c escape d between e and f;') == \
'SELECT ((("a" LIKE "b") LIKE "c" ESCAPE "d") BETWEEN "e" AND "f");'
assert bql2sql('select a between b and c not between d and e;') == \
'SELECT (("a" BETWEEN "b" AND "c") NOT BETWEEN "d" AND "e");'
assert bql2sql('select a not between b and c in (select f);') == \
'SELECT (("a" NOT BETWEEN "b" AND "c") IN (SELECT "f"));'
assert bql2sql('select a in (select b) and c not in (select d);') == \
'SELECT (("a" IN (SELECT "b")) AND ("c" NOT IN (SELECT "d")));'
assert bql2sql("select a in (1 + 2, '3') and b not in (select c);") == \
'SELECT (("a" IN ((1 + 2), \'3\')) AND ("b" NOT IN (SELECT "c")));'
assert bql2sql('select a in (select b) isnull notnull!=c<>d<e<=f>g;') == \
'SELECT ((((("a" IN (SELECT "b")) ISNULL) NOTNULL) != "c") !=' \
+ ' ((("d" < "e") <= "f") > "g"));'
assert bql2sql('select a>b>=c<<d>>e&f|g+h-i*j/k;') == \
'SELECT (("a" > "b") >= (((("c" << "d") >> "e") & "f") |' \
+ ' (("g" + "h") - (("i" * "j") / "k"))));'
assert bql2sql('select a/b%c||~~d collate e collate\'f\'||1;') == \
'SELECT (("a" / "b") % (("c" || (((~ (~ "d")) COLLATE "e")' \
+ ' COLLATE "f")) || 1));'
assert bql2sql('select cast(f(x) as binary blob);') == \
'SELECT CAST("f"("x") AS "binary" "blob");'
assert bql2sql('select cast(42 as varint(73));') == \
'SELECT CAST(42 AS "varint"(73));'
assert bql2sql('select cast(f(x, y, z) as varchar(12 ,34));') == \
'SELECT CAST("f"("x", "y", "z") AS "varchar"(12, 34));'
assert bql2sql('select exists (select a) and not exists (select b);') == \
'SELECT (EXISTS (SELECT "a") AND (NOT EXISTS (SELECT "b")));'
assert bql2sql('select case when a - b then c else d end from t;') == \
'SELECT CASE WHEN ("a" - "b") THEN "c" ELSE "d" END FROM "t";'
assert bql2sql('select case f(a) when b + c then d else e end from t;') \
== \
'SELECT CASE "f"("a") WHEN ("b" + "c") THEN "d" ELSE "e" END FROM "t";'
def test_estimate_bql():
# PREDICTIVE PROBABILITY
assert bql2sql('estimate predictive probability of weight from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[3]\', \'[]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of (age, weight) '
'from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[2, 3]\', \'[]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of (age, weight) given '
'(label) from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[2, 3]\', \'[1]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of (*) from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[1, 2, 3]\', \'[]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of (*) given (age, weight) '
'from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[1]\', \'[2, 3]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of age given (*) '
'from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[2]\', \'[1, 3]\')' \
' FROM "t1";'
assert bql2sql('estimate label, predictive probability of weight'
' from p1;') \
== \
'SELECT "label", ' \
'bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[3]\', \'[]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of weight, label'
' from p1;') \
== \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[3]\', \'[]\'),' \
' "label"' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of weight + 1'
' from p1;') == \
'SELECT (bql_row_column_predictive_probability(1, NULL, NULL, '\
'_rowid_, \'[3]\', \'[]\') + 1)' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of weight given (*) + 1'
' from p1;') == \
'SELECT (bql_row_column_predictive_probability(1, NULL, NULL, '\
'_rowid_, \'[3]\', \'[1, 2]\') + 1)' \
' FROM "t1";'
# PREDICTIVE PROBABILITY parse and compilation errors.
with pytest.raises(parse.BQLParseError):
# Need a table.
bql2sql('estimate predictive probability of weight;')
with pytest.raises(parse.BQLParseError):
# Need at most one generator.
bql2sql('estimate predictive probability of weight'
' from p1, p1;')
with pytest.raises(parse.BQLParseError):
# Need a generator name, not a subquery.
bql2sql('estimate predictive probability of weight'
' from (select 0);')
with pytest.raises(parse.BQLParseError):
# Need a column.
bql2sql('estimate predictive probability from p1;')
with pytest.raises(bayeslite.BQLError):
# Using (*) in both targets and constraints.
bql2sql('estimate predictive probability of (*) given (*) from p1;')
with pytest.raises(bayeslite.BQLError):
# Using (weight, *) in targets.
bql2sql('estimate predictive probability of (weight, *) given (age) '
'from p1;')
with pytest.raises(bayeslite.BQLError):
# Using (age, *) in constraints.
bql2sql('estimate predictive probability of weight given (*, age) '
'from p1;')
with pytest.raises(bayeslite.BQLError):
# Using duplicate column age.
bql2sql('estimate predictive probability of age given (weight, age) '
'from p1;')
# PROBABILITY DENISTY.
assert bql2sql('estimate probability density of weight = 20 from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20) FROM "t1";'
assert bql2sql('estimate probability density of weight = 20'
' given (age = 8)'
' from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, NULL, 2, 8) FROM "t1";'
assert bql2sql('estimate probability density of (weight = 20, age = 8)'
' from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, 2, 8) FROM "t1";'
assert bql2sql('estimate probability density of (weight = 20, age = 8)'
" given (label = 'mumble') from p1;") == \
"SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, 2, 8, NULL, 1, 'mumble')" \
' FROM "t1";'
assert bql2sql('estimate probability density of weight = (c + 1)'
' from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, ("c" + 1)) FROM "t1";'
assert bql2sql('estimate probability density of weight = f(c)'
' from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, "f"("c")) FROM "t1";'
assert bql2sql('estimate similarity to (rowid = 5) '
'in the context of weight from p1;') == \
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 3) FROM "t1";'
assert bql2sql(
'estimate similarity of (rowid = 12) to (rowid = 5) '
'in the context of weight from p1;') == \
'SELECT bql_row_similarity(1, NULL, NULL,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 12)),' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 3) FROM "t1";'
assert bql2sql('estimate similarity to (rowid = 5) in the context of age'
' from p1') == \
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 2) FROM "t1";'
assert bql2sql(
'estimate similarity of (rowid = 5) to (height = 7 and age < 10)'
' in the context of weight from p1;') == \
'SELECT bql_row_similarity(1, NULL, NULL,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)),' \
' (SELECT _rowid_ FROM "t1" WHERE (("height" = 7) AND ("age" < 10))),' \
' 3) FROM "t1";'
with pytest.raises(bayeslite.BQLError):
# Cannot use all variables for similarity.
bql2sql(
'estimate similarity to (rowid = 5) in the context of * from p1;')
assert bql2sql('estimate similarity to (rowid = 5)'
' in the context of age from p1;') == \
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 2) FROM "t1";'
assert bql2sql('estimate dependence probability of age with weight'
' from p1;') == \
'SELECT bql_column_dependence_probability(1, NULL, NULL, 2, 3) '\
'FROM "t1";'
with pytest.raises(bayeslite.BQLError):
# Need both rows fixed.
bql2sql('estimate similarity to (rowid=2) in the context of r by p1')
with pytest.raises(bayeslite.BQLError):
# Need both rows fixed.
bql2sql('estimate similarity in the context of r within p1')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate dependence probability with age from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate dependence probability from p1;')
assert bql2sql('estimate mutual information of age with weight' +
' from p1;') == \
'SELECT bql_column_mutual_information('\
'1, NULL, NULL, \'[2]\', \'[3]\', NULL)'\
' FROM "t1";'
assert bql2sql('estimate mutual information of age with weight' +
' using 42 samples from p1;') == \
'SELECT bql_column_mutual_information('\
'1, NULL, NULL, \'[2]\', \'[3]\', 42)'\
' FROM "t1";'
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate mutual information with age from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate mutual information from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate mutual information with age using 42 samples'
' from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate mutual information using 42 samples from p1;')
# XXX Should be SELECT, not ESTIMATE, here?
assert bql2sql('estimate correlation of age with weight from p1;') == \
'SELECT bql_column_correlation(1, NULL, NULL, 2, 3) FROM "t1";'
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate correlation with age from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate correlation from p1;')
with pytest.raises(BQLError):
# Variable must exist.
bql2sql('estimate correlation with agee from variables of p1')
def test_predict_outside_infer():
with pytest.raises(bayeslite.BQLError):
# No PREDICT outside INFER.
bql2sql('estimate predict age with confidence 0.9 from p1;')
def test_infer_explicit_predict_confidence():
assert bql2sql('infer explicit predict age with confidence 0.9'
' from p1;') == \
'SELECT bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL) FROM "t1";'
def test_infer_explicit_predict_confidence_nsamples():
assert bql2sql('infer explicit'
' predict age with confidence 0.9 using 42 samples'
' from p1;') == \
'SELECT bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42) FROM "t1";'
def test_infer_explicit_verbatim_and_predict_confidence():
assert bql2sql('infer explicit rowid, age,'
' predict age confidence age_conf from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age",' \
' bql_json_get(c2, \'confidence\') AS "age_conf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_noconfidence():
assert bql2sql('infer explicit rowid, age,'
' predict age from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_confidence_nsamples():
assert bql2sql('infer explicit rowid, age,'
' predict age confidence age_conf using 42 samples from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age",' \
' bql_json_get(c2, \'confidence\') AS "age_conf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 42)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_noconfidence_nsamples():
assert bql2sql('infer explicit rowid, age,'
' predict age using 42 samples from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 42)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_confidence_as():
assert bql2sql('infer explicit rowid, age,'
' predict age as age_inf confidence age_conf from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age_inf",' \
' bql_json_get(c2, \'confidence\') AS "age_conf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_noconfidence_as():
assert bql2sql('infer explicit rowid, age,'
' predict age as age_inf from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age_inf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_confidence_as_nsamples():
assert bql2sql('infer explicit rowid, age,'
' predict age as age_inf confidence age_conf using 87 samples'
' from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age_inf",' \
' bql_json_get(c2, \'confidence\') AS "age_conf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 87)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_noconfidence_as_nsamples():
assert bql2sql('infer explicit rowid, age,'
' predict age as age_inf using 87 samples'
' from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age_inf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 87)' \
' AS c2 FROM "t1");'
def test_infer_auto():
assert bql2sql('infer rowid, age, weight from p1') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, NULL))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_nsamples():
assert bql2sql('infer rowid, age, weight using (1+2) samples from p1') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, (1 + 2)))' \
' AS "age",' \
' "IFNULL"("weight",'\
' bql_predict(1, NULL, NULL, _rowid_, 3, 0, (1 + 2)))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_with_confidence():
assert bql2sql('infer rowid, age, weight with confidence 0.9 from p1') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))' \
' AS "age",' \
' "IFNULL"("weight",'\
' bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, NULL))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_with_confidence_nsamples():
assert bql2sql('infer rowid, age, weight with confidence 0.9'
' using sqrt(2) samples'
' from p1') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9,' \
' "sqrt"(2)))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,' \
' "sqrt"(2)))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_with_confidence_where():
assert bql2sql('infer rowid, age, weight with confidence 0.9 from p1'
' where label = \'foo\'') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,'\
' NULL))' \
' AS "weight"' \
' FROM "t1"' \
' WHERE ("label" = \'foo\');'
def test_infer_auto_with_confidence_nsamples_where():
assert bql2sql('infer rowid, age, weight with confidence 0.9'
' using 42 samples'
' from p1'
' where label = \'foo\'') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, 42))' \
' AS "weight"' \
' FROM "t1"' \
' WHERE ("label" = \'foo\');'
def test_infer_auto_with_confidence_nsamples_where_predict():
assert bql2sql('infer rowid, age, weight with confidence 0.9 from p1'
' where ifnull(label, predict label with confidence 0.7)'
' = \'foo\'') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,' \
' NULL))' \
' AS "weight"' \
' FROM "t1"' \
' WHERE ("ifnull"("label",' \
' bql_predict(1, NULL, NULL, _rowid_, 1, 0.7, NULL))' \
' = \'foo\');'
def test_infer_auto_with_confidence_nsamples_where_predict_nsamples():
assert bql2sql('infer rowid, age, weight with confidence 0.9'
' using 42 samples'
' from p1'
' where ifnull(label, predict label with confidence 0.7'
' using 73 samples)'
' = \'foo\'') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, 42))' \
' AS "weight"' \
' FROM "t1"' \
' WHERE ("ifnull"("label",' \
' bql_predict(1, NULL, NULL, _rowid_, 1, 0.7, 73))' \
' = \'foo\');'
def test_infer_auto_star():
assert bql2sql('infer rowid, * from p1') == \
'SELECT "rowid" AS "rowid", "id" AS "id",' \
' "IFNULL"("label", bql_predict(1, NULL, NULL, _rowid_, 1, 0, NULL))' \
' AS "label",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, NULL))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_star_nsamples():
assert bql2sql('infer rowid, * using 1 samples from p1') == \
'SELECT "rowid" AS "rowid", "id" AS "id",' \
' "IFNULL"("label", bql_predict(1, NULL, NULL, _rowid_, 1, 0, 1))' \
' AS "label",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, 1))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, 1))' \
' AS "weight"' \
' FROM "t1";'
def test_estimate_columns_trivial():
prefix0 = 'SELECT v.name AS name'
prefix1 = ' FROM bayesdb_variable AS v' \
' WHERE v.population_id = 1' \
' AND v.generator_id IS NULL'
prefix = prefix0 + prefix1
assert bql2sql('estimate * from columns of p1;') == \
prefix + ';'
assert bql2sql('estimate * from columns of p1 where' +
' (probability density of value 42) > 0.5') == \
prefix + \
' AND (bql_column_value_probability(1, NULL, NULL, v.colno, 42) > 0.5);'
assert bql2sql('estimate * from columns of p1'
' where (probability density of value 8)'
' > (probability density of age = 16)') == \
prefix + \
' AND (bql_column_value_probability(1, NULL, NULL, v.colno, 8) >' \
' bql_pdf_joint(1, NULL, NULL, 2, 16));'
assert bql2sql('estimate *, probability density of value 8 given (age = 8)'
' from columns of p1;') == \
prefix0 + \
', bql_column_value_probability(1, NULL, NULL, v.colno, 8, 2, 8)' + \
prefix1 + ';'
with pytest.raises(bayeslite.BQLError):
bql2sql('estimate probability density of value 8 given (agee = 8)'
' from columns of p1')
with pytest.raises(bayeslite.BQLError):
# PREDICTIVE PROBABILITY makes no sense without row.
bql2sql('estimate * from columns of p1 where' +
' predictive probability of x > 0;')
with pytest.raises(bayeslite.BQLError):
# SIMILARITY makes no sense without row.
bql2sql('estimate * from columns of p1 where' +
' similarity to (rowid = x) in the context of c > 0;')
assert bql2sql('estimate * from columns of p1 where' +
' dependence probability with age > 0.5;') == \
prefix + \
' AND (bql_column_dependence_probability(1, NULL, NULL, 2, v.colno)' \
' > 0.5);'
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 where' +
' dependence probability of age with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1'
' where dependence probability > 0.5;')
assert bql2sql('estimate * from columns of p1 order by' +
' mutual information with age;') == \
prefix + \
' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2]\','\
' \'[\' || v.colno || \']\', NULL);'
assert bql2sql('estimate * from columns of p1 order by' +
' mutual information with (age, label) using 42 samples;') == \
prefix + \
' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2, 1]\','\
' \'[\' || v.colno || \']\', 42);'
assert bql2sql('estimate * from columns of p1 order by' +
' mutual information with (age, label)'
' given (weight=12) using 42 samples;') == \
prefix + \
' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2, 1]\','\
' \'[\' || v.colno || \']\', 42, 3, 12);'
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 order by' +
' mutual information of age with weight;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1'
' where mutual information > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 order by' +
' mutual information of age with weight using 42 samples;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 where' +
' mutual information using 42 samples > 0.5;')
assert bql2sql('estimate * from columns of p1 order by' +
' correlation with age desc;') == \
prefix + ' ORDER BY bql_column_correlation(1, NULL, NULL, 2, v.colno)' \
' DESC;'
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 order by' +
' correlation of age with weight;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 where correlation > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Makes no sense.
bql2sql('estimate * from columns of p1'
' where predict age with confidence 0.9 > 30;')
assert bql2sql('estimate'
' *, dependence probability with weight as depprob,'
' mutual information with weight as mutinf'
' from columns of p1'
' where depprob > 0.5 order by mutinf desc') == \
prefix0 + \
', bql_column_dependence_probability(1, NULL, NULL, 3, v.colno)' \
' AS "depprob"' \
', bql_column_mutual_information(1, NULL, NULL, \'[3]\',' \
' \'[\' || v.colno || \']\', NULL) AS "mutinf"' \
+ prefix1 + \
' AND ("depprob" > 0.5)' \
' ORDER BY "mutinf" DESC;'
assert bql2sql('estimate'
' *, dependence probability with weight as depprob,'
' mutual information with (age, weight) as mutinf'
' from columns of p1'
' where depprob > 0.5 order by mutinf desc') == \
prefix0 + \
', bql_column_dependence_probability(1, NULL, NULL, 3, v.colno)' \
' AS "depprob"' \
', bql_column_mutual_information(1, NULL, NULL, \'[2, 3]\',' \
' \'[\' || v.colno || \']\', NULL) AS "mutinf"' \
+ prefix1 + \
' AND ("depprob" > 0.5)' \
' ORDER BY "mutinf" DESC;'
# XXX This mixes up target and reference variables, which is OK,
# because MI is symmetric, but...oops.
assert bql2sql('estimate * from variables of p1'
' where probability of (mutual information with age < 0.1)'
' > 0.8') == \
prefix + \
' AND ((SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"' \
' FROM (SELECT mi AS "v0" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[' || v.colno || ']'))) > 0.8);"
assert bql2sql('estimate * from variables of p1'
' order by probability of (mutual information with age < 0.1)') ==\
prefix + \
' ORDER BY (SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"' \
' FROM (SELECT mi AS "v0" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[' || v.colno || ']')));"
def test_estimate_pairwise_trivial():
prefix = 'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1, '
infix = ' AS value'
infix0 = ' FROM bayesdb_population AS p,'
infix0 += ' bayesdb_variable AS v0,'
infix0 += ' bayesdb_variable AS v1'
infix0 += ' WHERE p.id = 1'
infix0 += ' AND v0.population_id = p.id AND v1.population_id = p.id'
infix0 += ' AND v0.generator_id IS NULL'
infix0 += ' AND v1.generator_id IS NULL'
infix += infix0
assert bql2sql('estimate dependence probability'
' from pairwise columns of p1;') == \
prefix + \
'bql_column_dependence_probability(1, NULL, NULL, v0.colno,'\
' v1.colno)' + \
infix + ';'
assert bql2sql('estimate mutual information'
' from pairwise columns of p1 where'
' (probability density of age = 0) > 0.5;') == \
prefix + \
'bql_column_mutual_information(1, NULL, NULL, '\
'\'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL)' + \
infix + \
' AND (bql_pdf_joint(1, NULL, NULL, 2, 0) > 0.5);'
assert bql2sql('estimate mutual information given (label=\'go\', weight)'
' from pairwise columns of p1 where'
' (probability density of age = 0) > 0.5;') == \
prefix + \
'bql_column_mutual_information(1, NULL, NULL,'\
' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL,'\
' 1, \'go\', 3, NULL)' + \
infix + \
' AND (bql_pdf_joint(1, NULL, NULL, 2, 0) > 0.5);'
with pytest.raises(bayeslite.BQLError):
# PROBABILITY DENSITY OF VALUE is 1-column.
bql2sql('estimate correlation from pairwise columns of p1 where' +
' (probability density of value 0) > 0.5;')
with pytest.raises(bayeslite.BQLError):
# PREDICTIVE PROBABILITY OF is a row function.
bql2sql('estimate dependence probability'
' from pairwise columns of p1' +
' where predictive probability of x > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where dependence probability of age with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information from pairwise columns of p1'
' where dependence probability with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information using 42 samples'
' from pairwise columns of p1'
' where dependence probability with weight > 0.5;')
assert bql2sql('estimate correlation from pairwise columns of p1'
' where dependence probability > 0.5;') == \
prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \
infix + ' AND' \
' (bql_column_dependence_probability(1, NULL, NULL, v0.colno,' \
' v1.colno)' \
' > 0.5);'
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where mutual information of age with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where mutual information of age with weight using 42 samples'
' > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information from pairwise columns of p1'
' where mutual information with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information using 42 samples'
' from pairwise columns of p1'
' where mutual information with weight using 42 samples > 0.5;')
assert bql2sql('estimate correlation from pairwise columns of p1' +
' where mutual information > 0.5;') == \
prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \
infix + ' AND' + \
' (bql_column_mutual_information(1, NULL, NULL,'\
' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL) > 0.5);'
assert bql2sql('estimate correlation from pairwise columns of p1' +
' where mutual information using 42 samples > 0.5;') == \
prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \
infix + ' AND' + \
' (bql_column_mutual_information(1, NULL, NULL,'\
' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', 42) > 0.5);'
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where correlation of age with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information from pairwise columns of p1'
' where correlation with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information using 42 samples'
' from pairwise columns of p1'
' where correlation with weight > 0.5;')
assert bql2sql('estimate correlation from pairwise columns of p1'
' where correlation > 0.5;') == \
prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \
infix + ' AND' + \
' (bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno) > 0.5);'
with pytest.raises(bayeslite.BQLError):
# Makes no sense.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where predict age with confidence 0.9 > 30;')
assert bql2sql('estimate dependence probability as depprob,'
' mutual information as mutinf'
' from pairwise columns of p1'
' where depprob > 0.5 order by mutinf desc') == \
prefix + \
'bql_column_dependence_probability(1, NULL, NULL, v0.colno, v1.colno)' \
' AS "depprob",' \
' bql_column_mutual_information(1, NULL, NULL,'\
' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL)'\
' AS "mutinf"' \
+ infix0 + \
' AND ("depprob" > 0.5)' \
' ORDER BY "mutinf" DESC;'
def test_estimate_pairwise_row():
prefix = 'SELECT r0._rowid_ AS rowid0, r1._rowid_ AS rowid1'
infix = ' AS value FROM "t1" AS r0, "t1" AS r1'
assert bql2sql('estimate similarity in the context of age' +
' from pairwise p1;') == \
prefix + ', bql_row_similarity(1, NULL, NULL,'\
' r0._rowid_, r1._rowid_, 2)' + \
infix + ';'
with pytest.raises(bayeslite.BQLError):
# PREDICT is a 1-row function.
bql2sql('estimate predict age with confidence 0.9 from pairwise t1;')
def test_estimate_pairwise_selected_columns():
assert bql2sql('estimate dependence probability'
' from pairwise columns of p1 for label, age') == \
'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,' \
' bql_column_dependence_probability(1, NULL, NULL,' \
' v0.colno, v1.colno)' \
' AS value' \
' FROM bayesdb_population AS p,' \
' bayesdb_variable AS v0,' \
' bayesdb_variable AS v1' \
' WHERE p.id = 1' \
' AND v0.population_id = p.id AND v1.population_id = p.id' \
' AND v0.generator_id IS NULL AND v1.generator_id IS NULL' \
' AND v0.colno IN (1, 2) AND v1.colno IN (1, 2);'
assert bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' for (ESTIMATE * FROM COLUMNS OF p1'
' ORDER BY name DESC LIMIT 2)') == \
'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,' \
' bql_column_dependence_probability(1, NULL, NULL, v0.colno,' \
' v1.colno)' \
' AS value' \
' FROM bayesdb_population AS p,' \
' bayesdb_variable AS v0,' \
' bayesdb_variable AS v1' \
' WHERE p.id = 1' \
' AND v0.population_id = p.id AND v1.population_id = p.id' \
' AND v0.generator_id IS NULL AND v1.generator_id IS NULL' \
' AND v0.colno IN (3, 1) AND v1.colno IN (3, 1);'
def test_select_columns_subquery():
assert bql2sql('select id, t1.(estimate * from columns of p1'
' order by name asc limit 2) from t1') == \
'SELECT "id", "t1"."age", "t1"."label" FROM "t1";'
@pytest.mark.xfail(strict=True, reason='no simulate vars from models of')
def test_simulate_models_columns_subquery():
assert bql2sql('simulate weight, t1.(estimate * from columns of p1'
' order by name asc limit 2) from models of p1') == \
'SELECT * FROM "bayesdb_temp_0";'
assert bql2sql('simulate 0, weight, t1.(estimate * from columns of p1'
' order by name asc limit 2) from models of p1') == \
'SELECT 0, "v0" AS "weight", "v1" AS "age", "v2" AS "label" FROM' \
' (SELECT * FROM "bayesdb_temp_0");'
assert bql2sql('simulate weight + 1, t1.(estimate * from columns of p1'
' order by name asc limit 2) from models of p1') == \
'SELECT ("v0" + 1), "v1" AS "age", "v2" AS "label" FROM' \
' (SELECT * FROM "bayesdb_temp_0");'
assert bql2sql('simulate weight + 1 AS wp1,'
' t1.(estimate * from columns of p1'
' order by name asc limit 2) from models of p1') == \
'SELECT ("v0" + 1) AS "wp1", "v1" AS "age", "v2" AS "label" FROM' \
' (SELECT * FROM "bayesdb_temp_0");'
def test_simulate_columns_subquery():
# XXX This test is a little unsatisfactory -- we do not get to see
# what the variables in the result are named...
assert bql2sql('simulate weight, t1.(estimate * from columns of p1'
' order by name asc limit 2) from p1 limit 10') == \
'SELECT * FROM "bayesdb_temp_0";'
with pytest.raises(parse.BQLParseError):
# Compound columns not yet implemented for SIMULATE.
bql2sql('simulate weight + 1, t1.(estimate * from columns of p1'
' order by name asc limit 2) from p1 limit 10')
def test_simulate_models():
# Base case.
assert bql2sql('simulate mutual information of age with weight'
' from models of p1') == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]';"
# Multiple target variables.
assert bql2sql('simulate mutual information of (label, age) with weight'
' from models of p1') == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[1, 2]'" \
" AND reference_vars = '[3]';"
# Multiple reference variables.
assert bql2sql('simulate mutual information of age with (label, weight)'
' from models of p1') == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[1, 3]';"
# Specified number of samples.
assert bql2sql('simulate mutual information of age with weight'
' using 42 samples from models of p1') == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]'" \
' AND nsamples = 42;'
# Conditional.
assert bql2sql('simulate mutual information of age with weight'
" given (label = 'foo') from models of p1") == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]'" \
" AND conditions = '{\"1\": \"foo\"}';"
# Modeled by a specific generator.
assert bql2sql('simulate mutual information of age with weight'
' from models of p1 modeled by g1',
lambda bdb: bdb.execute('create generator g1 for p1')) == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
' AND generator_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]';"
# Two mutual informations.
assert bql2sql('simulate mutual information of age with weight AS "mi(aw)",'
' mutual information of label with weight AS "mi(lw)"'
' from models of p1') == \
'SELECT t0."mi(aw)" AS "mi(aw)", t1."mi(lw)" AS "mi(lw)"' \
' FROM (SELECT _rowid_, mi AS "mi(aw)" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]') AS t0," \
' (SELECT _rowid_, mi AS "mi(lw)" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[1]'" \
" AND reference_vars = '[3]') AS t1" \
' WHERE t0._rowid_ = t1._rowid_;'
def test_probability_of_mutinf():
assert bql2sql('estimate probability of'
' (mutual information of age with weight < 0.1) > 0.5'
' within p1') == \
'SELECT ((SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"' \
' FROM (SELECT mi AS "v0" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]'))) > 0.5);"
def test_modeledby_usingmodels_trival():
def setup(bdb):
bdb.execute('create generator m1 for p1 using cgpm;')
assert bql2sql('estimate predictive probability of weight + 1'
' from p1 modeled by m1 using models 1-3, 5;', setup=setup) == \
'SELECT (bql_row_column_predictive_probability(1, 1, \'[1, 2, 3, 5]\','\
' _rowid_, \'[3]\', \'[]\') + 1)' \
' FROM "t1";'
assert bql2sql(
'infer rowid, age, weight from p1 modeled by m1 using model 7',
setup=setup) == \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, 1, \'[7]\', _rowid_, 2, 0, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, 1, \'[7]\', _rowid_, 3, 0, NULL))' \
' AS "weight"' \
' FROM "t1";'
assert bql2sql('infer explicit predict age with confidence 0.9'
' from p1 using models 0, 3-5;',
setup=setup) == \
'SELECT bql_predict(1, NULL, \'[0, 3, 4, 5]\', _rowid_, 2, 0.9, NULL)'\
' FROM "t1";'
assert bql2sql('''
estimate predictive relevance
of (label = 'Uganda')
to existing rows (rowid < 4)
and hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'Europe', "weight" = 7)
)
in the context of "weight"
by p1 modeled by m1 using models 8, 10-12
''', setup=setup) == \
'SELECT bql_row_predictive_relevance(1, 1, \'[8, 10, 11, 12]\', ' \
'(SELECT _rowid_ FROM "t1" WHERE ("label" = \'Uganda\')), '\
'\'[1, 2, 3]\', 3, '\
'2, 82, 3, 14, NULL, 2, 74, 1, \'Europe\', 3, 7, NULL);'
assert bql2sql('''
estimate dependence probability
from pairwise columns of p1
for label, age
modeled by m1
using models 1, 4, 12
''', setup=setup) == \
'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,' \
' bql_column_dependence_probability(1, 1, \'[1, 4, 12]\',' \
' v0.colno, v1.colno)' \
' AS value' \
' FROM bayesdb_population AS p,' \
' bayesdb_variable AS v0,' \
' bayesdb_variable AS v1' \
' WHERE p.id = 1' \
' AND v0.population_id = p.id AND v1.population_id = p.id' \
' AND (v0.generator_id IS NULL OR v0.generator_id = 1)' \
' AND (v1.generator_id IS NULL OR v1.generator_id = 1)' \
' AND v0.colno IN (1, 2) AND v1.colno IN (1, 2);'
assert bql2sql('''
estimate mutual information of age with weight
from p1 modeled by m1 using model 1;
''', setup=setup) == \
'SELECT bql_column_mutual_information('\
'1, 1, \'[1]\', \'[2]\', \'[3]\', NULL)'\
' FROM "t1";'
def test_simulate_columns_all():
with pytest.raises(parse.BQLParseError):
bql2sql('simulate * from p1 limit 1')
def test_trivial_commands():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
# XXX Query parameters!
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
with open(fname, 'rU') as f:
with pytest.raises(ValueError):
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True,
ifnotexists=True)
guess.bayesdb_guess_population(bdb, 'p', 't')
with pytest.raises(ValueError):
guess.bayesdb_guess_population(bdb, 'p', 't')
guess.bayesdb_guess_population(bdb, 'p', 't', ifnotexists=True)
bdb.execute('create generator p_cc for p;')
bdb.execute('initialize 2 models for p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('initialize 2 models for p_cc')
bdb.execute('drop models from p_cc')
bdb.execute('drop models from p_cc')
bdb.execute('initialize 2 models for p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('initialize 2 models for p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop models 0-2 from p_cc')
bdb.execute('drop models 0-1 from p_cc')
with bdb.savepoint():
bdb.execute('initialize 2 models for p_cc')
bdb.execute('drop models 0-1 from p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop models 0-1 from p_cc')
bdb.execute('initialize 2 models for p_cc')
bdb.execute('initialize 1 model if not exists for p_cc')
bdb.execute('initialize 2 models if not exists for p_cc')
population_id = core.bayesdb_get_population(bdb, 'p')
generator_id = core.bayesdb_get_generator(bdb, population_id, 'p_cc')
assert core.bayesdb_generator_table(bdb, generator_id) == 't'
bdb.execute('alter table t rename to t')
assert core.bayesdb_generator_table(bdb, generator_id) == 't'
bdb.execute('alter table t rename to T')
assert core.bayesdb_generator_table(bdb, generator_id) == 'T'
bdb.execute('alter population p rename to p')
assert core.bayesdb_population_name(bdb, population_id) == 'p'
bdb.execute('alter population p rename to p2')
assert core.bayesdb_population_name(bdb, population_id) == 'p2'
bdb.execute('alter population p2 rename to p')
assert core.bayesdb_population_name(bdb, population_id) == 'p'
bdb.execute('estimate count(*) from p').fetchall()
bdb.execute('alter table t rename to t')
assert core.bayesdb_generator_table(bdb, generator_id) == 't'
bdb.execute('alter generator p_cc rename to p0_cc')
assert core.bayesdb_generator_name(bdb, generator_id) == 'p0_cc'
bdb.execute('alter generator p0_cc rename to zot, rename to P0_CC')
assert core.bayesdb_generator_name(bdb, generator_id) == 'P0_CC'
bdb.execute('alter generator P0_cc rename to P0_cc')
assert core.bayesdb_generator_name(bdb, generator_id) == 'P0_cc'
bdb.execute('alter generator p0_CC rename to p0_cc')
assert core.bayesdb_generator_name(bdb, generator_id) == 'p0_cc'
bdb.execute('estimate count(*) from p').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate count(*) from p_cc')
bdb.execute('alter generator p0_cc rename to P0_cc')
bdb.execute('analyze p0_cc for 1 iteration')
colno = core.bayesdb_variable_number(bdb, population_id, generator_id,
'gender')
with pytest.raises(parse.BQLParseError):
# Rename the table's columns, not the generator's columns.
bdb.execute('alter generator p0_cc rename gender to sex')
with pytest.raises(NotImplementedError): # XXX
bdb.execute('alter table t rename to t0, rename gender to sex')
assert core.bayesdb_variable_number(
bdb, population_id, generator_id, 'sex') \
== colno
bdb.execute('analyze p0_cc model 0 for 1 iteration')
bdb.execute('alter generator p0_cc rename to p_cc')
assert core.bayesdb_variable_number(
bdb, population_id, generator_id, 'sex') \
== colno
bdb.execute('select sex from t0').fetchall()
with pytest.raises(AssertionError): # XXX
bdb.execute('select gender from t0')
assert False, 'Need to fix quoting of unknown columns!'
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate predict sex with confidence 0.9'
' from p').fetchall()
bdb.execute('infer explicit predict sex with confidence 0.9'
' from p').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate predict gender with confidence 0.9'
' from p')
with pytest.raises(bayeslite.BQLError):
bdb.execute('infer explicit predict gender with confidence 0.9'
' from p')
bdb.execute('alter table t0 rename sex to gender')
assert core.bayesdb_variable_number(
bdb, population_id, generator_id, 'gender') \
== colno
bdb.execute('alter generator p0_cc rename to p_cc') # XXX
bdb.execute('alter table t rename to T0') # XXX
bdb.sql_execute('create table t0_temp(x)')
bdb.execute('alter table T0 rename to t0')
assert bdb.execute('select count(*) from t0_temp').fetchvalue() == 0
assert bdb.execute('select count(*) from t0').fetchvalue() > 0
with pytest.raises(bayeslite.BQLError):
# Cannot specify models with rename.
bdb.execute('alter generator p_cc models (1) rename to p_cc_fail')
bdb.execute('drop table T0_TEMP')
bdb.execute('analyze p_cc model 0 for 1 iteration')
bdb.execute('analyze p_cc model 1 for 1 iteration')
bdb.execute('analyze p_cc models 0-1 for 1 iteration')
bdb.execute('analyze p_cc models 0,1 for 1 iteration')
bdb.execute('analyze p_cc for 1 iteration')
bdb.execute('select * from t0').fetchall()
bdb.execute('select * from T0').fetchall()
bdb.execute('estimate * from p').fetchall()
bdb.execute('estimate * from P').fetchall()
# SIMIARITY IN THE CONTEXT OF requires exactly 1 variable.
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate similarity in the context of * '
'from pairwise p').fetchall()
bdb.execute('estimate similarity in the context of age '
'from pairwise p').fetchall()
bdb.execute('alter population p rename to p2')
assert core.bayesdb_population_name(bdb, population_id) == 'p2'
bdb.execute('estimate similarity to (rowid=1) in the context of rank '
'from p2').fetchall()
bdb.execute('select value from'
' (estimate correlation from pairwise columns of p2)').fetchall()
bdb.execute('infer explicit predict age with confidence 0.9'
' from p2').fetchall()
bdb.execute('infer explicit predict AGE with confidence 0.9'
' from P2').fetchall()
bdb.execute('infer explicit predict aGe with confidence 0.9'
' from P2').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate predict agee with confidence 0.9 from p2')
with pytest.raises(bayeslite.BQLError):
bdb.execute('infer explicit predict agee with confidence 0.9'
' from p2')
guess.bayesdb_guess_population(bdb, 'pe', 't0',
overrides=[
('age', 'numerical'),
('rank', 'numerical'),
])
bdb.execute('create generator pe_cc for pe;')
with pytest.raises(bayeslite.BQLError):
# No models to analyze.
bdb.execute('analyze pe_cc for 1 iteration')
bdb.execute('initialize 1 model if not exists for pe_cc')
bdb.execute('analyze pe_cc for 1 iteration')
bdb.execute('estimate correlation'
' from pairwise columns of pe').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('initialize 4 models if not exists for t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('analyze t0 for 1 iteration')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate * from t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate * from columns of t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate correlation from pairwise columns of t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate similarity in the context of age '
'from pairwise t')
bdb.execute('initialize 6 models if not exists for p_cc')
bdb.execute('analyze p_cc for 1 iteration')
def test_trivial_deadline():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 second')
def test_parametrized():
assert bql2sqlparam('select * from t where id = ?') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where id = :foo') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where id = $foo') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where id = @foo') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where id = ?123') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where a = $foo and b = ?1;') == \
'SELECT * FROM "t" WHERE (("a" = ?1) AND ("b" = ?1));'
assert bql2sqlparam('select * from t' +
' where a = ?123 and b = :foo and c = ?124') == \
'SELECT * FROM "t" WHERE' + \
' ((("a" = ?1) AND ("b" = ?2)) AND ("c" = ?2));'
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
assert bql_execute(bdb, 'select count(*) from t') == [(7,)]
assert bql_execute(bdb, 'select count(distinct division) from t') == \
[(6,)]
assert bql_execute(bdb, 'select * from t where height > ?', (70,)) == \
[
(41, 'M', 65600, 72, 'marketing', 4),
(30, 'M', 70000, 73, 'sales', 4),
(30, 'F', 81000, 73, 'engineering', 3),
]
assert bql_execute(bdb, 'select * from t where height > ?123',
(0,)*122 + (70,)) == \
[
(41, 'M', 65600, 72, 'marketing', 4),
(30, 'M', 70000, 73, 'sales', 4),
(30, 'F', 81000, 73, 'engineering', 3),
]
assert bql_execute(bdb, 'select age from t where division = :division',
{':division': 'sales'}) == \
[(34,), (30,)]
assert bql_execute(bdb, 'select division from t' +
' where age < @age and rank > ?;',
(40, 4)) == \
[('accounting',)]
assert bql_execute(bdb, 'select division from t' +
' where age < @age and rank > :rank;',
{':RANK': 4, '@aGe': 40}) == \
[('accounting',)]
with pytest.raises(ValueError):
bdb.execute('select * from t where age < ? and rank > :r',
{':r': 4})
def traced_execute(query, *args):
bql = []
def trace(string, _bindings):
bql.append(' '.join(string.split()))
bdb.trace(trace)
with bdb.savepoint():
bdb.execute(query, *args)
bdb.untrace(trace)
return bql
def sqltraced_execute(query, *args):
sql = []
def trace(string, _bindings):
sql.append(' '.join(string.split()))
bdb.sql_trace(trace)
with bdb.savepoint():
bdb.execute(query, *args)
bdb.sql_untrace(trace)
return sql
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('create generator p_cc for p;')
bdb.execute('initialize 1 model for p_cc;')
assert traced_execute('estimate similarity to (rowid = 1)'
' in the context of (estimate * from columns of p limit 1)'
' from p;') == [
'estimate similarity to (rowid = 1)' \
' in the context of (estimate * from columns of p limit 1)' \
' from p;',
]
assert sqltraced_execute('estimate similarity to (rowid = 1)'
' in the context of (estimate * from columns of p limit 1)'
' from p;') == [
'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?',
'SELECT id FROM bayesdb_population WHERE name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?',
'SELECT id FROM bayesdb_population WHERE name = ?',
'SELECT v.name AS name FROM bayesdb_variable AS v'
' WHERE v.population_id = 1'
' AND v.generator_id IS NULL'
' LIMIT 1',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT tabname FROM bayesdb_population'
' WHERE id = ?',
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,'
' (SELECT _rowid_ FROM "t" WHERE ("rowid" = 1)), 0) FROM "t"',
'SELECT id FROM bayesdb_generator WHERE population_id = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual '
'WHERE generator_id = ? AND table_rowid = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator '
'WHERE generator_id = ?'
]
assert sqltraced_execute('estimate similarity to (rowid = 1)'
' in the context of (estimate * from columns of p limit ?)'
' from p;',
(1,)) == [
'SELECT COUNT(*) FROM bayesdb_population'
' WHERE name = ?',
'SELECT id FROM bayesdb_population'
' WHERE name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT COUNT(*) FROM bayesdb_population'
' WHERE name = ?',
'SELECT id FROM bayesdb_population'
' WHERE name = ?',
# ESTIMATE * FROM COLUMNS OF:
'SELECT v.name AS name'
' FROM bayesdb_variable AS v'
' WHERE v.population_id = 1'
' AND v.generator_id IS NULL'
' LIMIT ?1',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
# ESTIMATE SIMILARITY TO (rowid=1):
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,'
' (SELECT _rowid_ FROM "t" WHERE ("rowid" = 1)), 0) FROM "t"',
'SELECT id FROM bayesdb_generator WHERE population_id = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?'
]
assert sqltraced_execute(
'create temp table if not exists sim as '
'simulate age, RANK, division '
'from p given gender = \'F\' limit 4') == [
'PRAGMA table_info("sim")',
'PRAGMA table_info("bayesdb_temp_0")',
'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?',
'SELECT id FROM bayesdb_population WHERE name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT CAST(4 AS INTEGER), \'F\'',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT MAX(_rowid_) FROM "t"',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT id FROM bayesdb_generator'
' WHERE population_id = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT 1 FROM "t" WHERE oid = ?',
'SELECT 1 FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ? LIMIT 1',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT code FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND value = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'CREATE TEMP TABLE "bayesdb_temp_0"'
' ("age","RANK","division")',
'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'
' VALUES (?,?,?)',
'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'
' VALUES (?,?,?)',
'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'
' VALUES (?,?,?)',
'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'
' VALUES (?,?,?)',
'CREATE TEMP TABLE IF NOT EXISTS "sim" AS'
' SELECT * FROM "bayesdb_temp_0"',
'DROP TABLE "bayesdb_temp_0"'
]
assert sqltraced_execute(
'select * from (simulate age from p '
'given gender = \'F\' limit 4)') == [
'PRAGMA table_info("bayesdb_temp_1")',
'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?',
'SELECT id FROM bayesdb_population WHERE name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT CAST(4 AS INTEGER), \'F\'',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT MAX(_rowid_) FROM "t"',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT id FROM bayesdb_generator WHERE population_id = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT 1 FROM "t" WHERE oid = ?',
'SELECT 1 FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ? LIMIT 1',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT code FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND value = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'CREATE TEMP TABLE "bayesdb_temp_1" ("age")',
'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)',
'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)',
'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)',
'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)',
'SELECT * FROM (SELECT * FROM "bayesdb_temp_1")',
'DROP TABLE "bayesdb_temp_1"',
]
bdb.execute('''
create population q for t (
age NUMERICAL;
gender NOMINAL; -- Not binary!
salary NUMERICAL;
height NUMERICAL;
division NOMINAL;
rank NOMINAL;
)
''')
bdb.execute('create generator q_cc for q;')
bdb.execute('initialize 1 model for q_cc;')
assert sqltraced_execute('analyze q_cc for 1 iteration;') == [
'SELECT COUNT(*) FROM bayesdb_generator WHERE name = ?',
'SELECT id FROM bayesdb_generator WHERE name = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT engine_json, engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?',
'UPDATE bayesdb_cgpm_generator'
' SET engine_json = :engine_json, engine_stamp = :engine_stamp'
' WHERE generator_id = :generator_id']
def test_create_table_ifnotexists_as_simulate():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
# If not exists table tests
guess.bayesdb_guess_population(bdb, 'p', 't',
overrides=[('age', 'numerical')])
bdb.execute('create generator p_cc for p;')
bdb.execute('initialize 1 model for p_cc')
bdb.execute('analyze p_cc for 1 iteration')
bdb.execute('''
create table if not exists u as
simulate age from p limit 10
''')
bdb.execute("drop table u")
bdb.execute('''
create table if not exists w as simulate age from p
given division='sales' limit 10
''')
bdb.execute("drop table w")
bdb.execute("create table u as simulate age from p limit 10")
x = bdb.execute("select count (*) from u").fetchvalue()
bdb.execute('''
create table if not exists u as simulate age from p limit 10
''')
bdb.execute('''
create table if not exists u as simulate age from p
given division='sales' limit 10
''')
assert x == bdb.execute("select count (*) from u").fetchvalue()
def test_createtab():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with pytest.raises(apsw.SQLError):
bdb.execute('drop table t')
bdb.execute('drop table if exists t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop population p')
bdb.execute('drop population if exists p')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop generator p_cc')
bdb.execute('drop generator if exists p_cc')
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
with bdb.savepoint():
# Savepoint because we don't actually want the new data to
# be inserted.
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True, ifnotexists=True)
guess.bayesdb_guess_population(bdb, 'p', 't',
overrides=[('age', 'numerical')])
bdb.execute('create generator p_cc for p;')
with pytest.raises(bayeslite.BQLError):
# Redefining population.
bdb.execute('create population p for t (age numerical)')
with pytest.raises(bayeslite.BQLError):
# Redefining generator.
bdb.execute('create generator p_cc for p;')
# Make sure ignore columns work.
#
# XXX Also check key columns.
guess.bayesdb_guess_population(bdb, 'p0', 't',
overrides=[('age', 'ignore')])
bdb.execute('drop population p0')
population_id = core.bayesdb_get_population(bdb, 'p')
colno = core.bayesdb_variable_number(bdb, population_id, None, 'age')
assert core.bayesdb_variable_stattype(
bdb, population_id, None, colno) == 'numerical'
bdb.execute('initialize 1 model for p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop table t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop population p')
bdb.execute('drop generator p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop generator p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop table t')
bdb.execute('drop generator if exists p_cc')
bdb.execute('drop population p')
bdb.execute('drop population if exists p')
bdb.execute('drop table t')
bdb.execute('drop table if exists t')
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute("create table u as select * from t where gender = 'F'")
assert bql_execute(bdb, 'select * from u') == [
(23, 'F', 81000, 67, 'data science', 3),
(36, 'F', 96000, 70, 'management', 2),
(30, 'F', 81000, 73, 'engineering', 3),
]
with pytest.raises(bayeslite.BQLError):
bdb.execute("create table u as select * from t where gender = 'F'")
bdb.execute('drop table u')
with pytest.raises(apsw.SQLError):
bql_execute(bdb, 'select * from u')
bdb.execute("create temp table u as"
" select * from t where gender = 'F'")
assert bql_execute(bdb, 'select * from u') == [
(23, 'F', 81000, 67, 'data science', 3),
(36, 'F', 96000, 70, 'management', 2),
(30, 'F', 81000, 73, 'engineering', 3),
]
# XXX Test to make sure TEMP is passed through, and the table
# doesn't persist on disk.
def test_alterpop_addvar():
with bayeslite.bayesdb_open() as bdb:
bayeslite.bayesdb_read_csv(
bdb, 't', StringIO.StringIO(test_csv.csv_data),
header=True, create=True)
bdb.execute('''
create population p for t with schema(
age numerical;
gender nominal;
salary numerical;
height ignore;
division ignore;
rank ignore;
)
''')
population_id = core.bayesdb_get_population(bdb, 'p')
bdb.execute('create generator m for p;')
# Fail when variable does not exist in base table.
with pytest.raises(bayeslite.BQLError):
bdb.execute('alter population p add variable quux;')
# Fail when variable already in population.
with pytest.raises(bayeslite.BQLError):
bdb.execute('alter population p add variable age numerical;')
# Fail when given invalid statistical type.
with pytest.raises(bayeslite.BQLError):
bdb.execute('alter population p add variable heigh numr;')
# Alter pop with stattype.
assert not core.bayesdb_has_variable(bdb, population_id, None, 'height')
bdb.execute('alter population p add variable height numerical;')
assert core.bayesdb_has_variable(bdb, population_id, None, 'height')
# Alter pop multiple without stattype.
assert not core.bayesdb_has_variable(bdb, population_id, None, 'rank')
assert not core.bayesdb_has_variable(
bdb, population_id, None, 'division')
bdb.execute('''
alter population p
add variable rank,
add variable division;
''')
assert core.bayesdb_has_variable(bdb, population_id, None, 'rank')
assert core.bayesdb_has_variable(bdb, population_id, None, 'division')
# Add a new column weight to the base table.
bdb.sql_execute('alter table t add column weight real;')
# Fail when no values in new column.
with pytest.raises(bayeslite.BQLError):
bdb.execute('alter population p add variable weight numerical;')
assert not core.bayesdb_has_variable(bdb, population_id, None, 'weight')
# Update a single value and update the population.
bdb.sql_execute('update t set weight = 1 where oid = 1;')
bdb.execute('alter population p add variable weight numerical;')
assert core.bayesdb_has_variable(bdb, population_id, None, 'weight')
def test_txn():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
# Make sure rollback and commit fail outside a transaction.
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('ROLLBACK')
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('COMMIT')
# Open a transaction which we'll roll back.
bdb.execute('BEGIN')
try:
# Make sure transactions don't nest. (Use savepoints.)
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('BEGIN')
finally:
bdb.execute('ROLLBACK')
# Make sure rollback and commit still fail outside a transaction.
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('ROLLBACK')
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('COMMIT')
# Open a transaction which we'll commit.
bdb.execute('BEGIN')
try:
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('BEGIN')
finally:
bdb.execute('COMMIT')
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('ROLLBACK')
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('COMMIT')
# Make sure ROLLBACK undoes the effects of the transaction.
bdb.execute('BEGIN')
try:
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
bdb.execute('SELECT * FROM t').fetchall()
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('ESTIMATE * FROM p').fetchall()
finally:
bdb.execute('ROLLBACK')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
# Make sure CREATE and DROP both work in the transaction.
bdb.execute('BEGIN')
try:
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
bdb.execute('SELECT * FROM t').fetchall()
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('ESTIMATE * FROM p').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('DROP TABLE t')
bdb.execute('DROP POPULATION p')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
bdb.execute('DROP TABLE t')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
finally:
bdb.execute('ROLLBACK')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
# Make sure CREATE and DROP work even if we commit.
bdb.execute('BEGIN')
try:
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
bdb.execute('SELECT * FROM t').fetchall()
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('ESTIMATE * FROM p').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('DROP TABLE t')
bdb.execute('DROP POPULATION p')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
bdb.execute('DROP TABLE t')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
finally:
bdb.execute('COMMIT')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
# Make sure CREATE persists if we commit.
bdb.execute('BEGIN')
try:
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
bdb.execute('SELECT * FROM t').fetchall()
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('ESTIMATE * FROM p').fetchall()
finally:
bdb.execute('COMMIT')
bdb.execute('SELECT * FROM t').fetchall()
bdb.execute('ESTIMATE * FROM p').fetchall()
# Make sure bdb.transaction works, rolls back on exception,
# and handles nesting correctly in the context of savepoints.
try:
with bdb.transaction():
bdb.sql_execute('create table quagga(x)')
raise StopIteration
except StopIteration:
pass
with pytest.raises(apsw.SQLError):
bdb.execute('select * from quagga')
with bdb.transaction():
with bdb.savepoint():
with bdb.savepoint():
pass
with bdb.savepoint():
with pytest.raises(bayeslite.BayesDBTxnError):
with bdb.transaction():
pass
# XXX To do: Make sure other effects (e.g., analysis) get
# rolled back by ROLLBACK.
def test_predprob_null():
backend = CGPM_Backend({}, multiprocess=False)
with test_core.bayesdb(backend=backend) as bdb:
bdb.sql_execute('''
create table foo (
id integer primary key not null,
x numeric,
y numeric,
z numeric
)
''')
bdb.sql_execute("insert into foo values (1, 1, 'strange', 3)")
bdb.sql_execute("insert into foo values (2, 1.2, 'strange', 1)")
bdb.sql_execute("insert into foo values (3, 0.8, 'strange', 3)")
bdb.sql_execute("insert into foo values (4, NULL, 'strange', 9)")
bdb.sql_execute("insert into foo values (5, 73, 'up', 11)")
bdb.sql_execute("insert into foo values (6, 80, 'up', -1)")
bdb.sql_execute("insert into foo values (7, 60, 'up', NULL)")
bdb.sql_execute("insert into foo values (8, 67, NULL, NULL)")
bdb.sql_execute("insert into foo values (9, 3.1415926, 'down', 1)")
bdb.sql_execute("insert into foo values (10, 1.4142135, 'down', 0)")
bdb.sql_execute("insert into foo values (11, 2.7182818, 'down', -1)")
bdb.sql_execute("insert into foo values (12, NULL, 'down', 10)")
bdb.execute('''
create population pfoo for foo (
id ignore;
x numerical;
y nominal;
z numerical;
)
''')
bdb.execute('create generator pfoo_cc for pfoo using cgpm;')
bdb.execute('initialize 1 model for pfoo_cc')
bdb.execute('analyze pfoo_cc for 1 iteration')
# Null value => null predictive probability.
assert bdb.execute('estimate predictive probability of x'
' from pfoo where id = 4;').fetchall() == \
[(None,)]
# Nonnull value => nonnull predictive probability.
x = bdb.execute('estimate predictive probability of x'
' from pfoo where id = 5').fetchall()
assert len(x) == 1
assert len(x[0]) == 1
assert isinstance(x[0][0], (int, float))
# All null values => null predictive probability.
assert bdb.execute('estimate predictive probability of (y, z)'
' from pfoo where id = 8;').fetchall() == \
[(None,)]
# Some nonnull values => nonnull predictive probability.
x = bdb.execute('estimate predictive probability of (x, z)'
' from pfoo where id = 8;').fetchall()
assert len(x) == 1
assert len(x[0]) == 1
assert isinstance(x[0][0], (int, float))
# All NULL constraints => same result regardless of given clause.
c0 = bdb.execute('estimate predictive probability of x'
' from pfoo where id = 8;')
v0 = cursor_value(c0)
assert v0 is not None
c1 = bdb.execute('estimate predictive probability of x given (y, z)'
' from pfoo where id = 8;')
v1 = cursor_value(c1)
assert relerr(v0, v1) < 0.0001
def test_guess_all():
with test_core.bayesdb() as bdb:
bdb.sql_execute('create table foo (x numeric, y numeric, z numeric)')
bdb.sql_execute('insert into foo values (1, 2, 3)')
bdb.sql_execute('insert into foo values (4, 5, 6)')
# XXX GUESS(*)
guess.bayesdb_guess_population(bdb, 'pfoo', 'foo')
def test_misc_errors():
with test_core.t1() as (bdb, _population_id, _generator_id):
with pytest.raises(bayeslite.BQLError):
bdb.execute('create table t1 as SELECT 1 FROM t1'
# t1 already exists as a table.
' limit 1')
with pytest.raises(bayeslite.BQLError):
# t1 already exists as a table.
bdb.execute('create table t1 as simulate weight from p1'
' limit 1')
with pytest.raises(bayeslite.BQLError):
# t1x does not exist as a population.
bdb.execute('create table t1_sim as simulate weight from t1x'
' limit 1')
with pytest.raises(bayeslite.BQLError):
# p1 does not have a variable waught.
bdb.execute('create table t1_sim as simulate waught from p1'
' limit 1')
with pytest.raises(bayeslite.BQLError):
# p1 does not have a variable agee.
bdb.execute('create table t1_sim as simulate weight from p1'
' given agee = 42 limit 1')
with bdb.savepoint():
bdb.sql_execute('create table t2(x)')
with pytest.raises(bayeslite.BQLError):
# t1 already exists as a table.
bdb.execute('alter table t2 rename to t1')
with pytest.raises(NotImplementedError):
# Renaming columns is not yet implemented.
bdb.execute('alter table t1 rename weight to mass')
with pytest.raises(bayeslite.BQLError):
# xcat does not exist as a backend.
bdb.execute('create generator p1_xc for p1 using xcat()')
with pytest.raises(bayeslite.BQLError):
# p1 already exists as a population.
bdb.execute('create generator p1_cc for p1;')
with pytest.raises(bayeslite.BQLError):
# multinomial is not a known statistical type.
bdb.execute('''
create population q1 for t1(
ignore id, label, weight;
weight multinomial
)
''')
with pytest.raises(bayeslite.BQLError):
# p1_xc does not exist as a generator.
bdb.execute('alter generator p1_xc rename to p1_xcat')
with bdb.savepoint():
bdb.execute('create generator p1_xc for p1;')
with pytest.raises(bayeslite.BQLError):
# p1_xc already exists as a generator.
bdb.execute('alter generator p1_cc rename to p1_xc')
with pytest.raises(bayeslite.BQLParseError):
# WAIT is not allowed.
bdb.execute('analyze p1_cc for 1 iteration wait')
with bdb.savepoint():
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
bdb.execute('initialize 1 model for p1_xc')
bdb.execute('analyze p1_xc for 1 iteration')
with pytest.raises(apsw.SQLError):
bdb.execute('select'
' nonexistent((simulate age from p1 limit 1));')
with pytest.raises(ValueError):
bdb.execute('select :x', {'y': 42})
with pytest.raises(ValueError):
bdb.execute('select :x', {'x': 53, 'y': 42})
with pytest.raises(ValueError):
bdb.execute('select ?, ?', (1,))
with pytest.raises(ValueError):
bdb.execute('select ?', (1, 2))
with pytest.raises(TypeError):
bdb.execute('select ?', 42)
with pytest.raises(NotImplementedError):
bdb.execute('infer explicit predict age confidence ac, *'
' from p1')
with pytest.raises(NotImplementedError):
bdb.execute('infer explicit predict age confidence ac,'
' t1.(select age from t1 limit 1) from p1')
with pytest.raises(bayeslite.BQLError):
try:
bdb.execute('estimate similarity to (rowid=1)'
' in the context of agee from p1')
except bayeslite.BQLError as e:
assert 'No such columns in population:' in str(e)
raise
def test_nested_simulate():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
bdb.execute('select (simulate age from p1 limit 1),'
' (simulate weight from p1 limit 1)').fetchall()
assert bdb.temp_table_name() == 'bayesdb_temp_2'
assert not core.bayesdb_has_table(bdb, 'bayesdb_temp_0')
assert not core.bayesdb_has_table(bdb, 'bayesdb_temp_1')
bdb.execute('simulate weight from p1'
' given age = (simulate age from p1 limit 1)'
' limit 1').fetchall()
# Make sure unwinding doesn't raise an exception. Calling
# __del__ directly, rather than via del(), has two effects:
#
# (a) It actually raises any exceptions in the method, unlike
# del(), which suppresses them.
#
# (b) It may cause a subsequent __del__ to fail and raise an
# exception, so that a subsequent del(), including an implicit
# one at the end of a scope, may print a message to stderr.
#
# Effect (a) is what we are actually trying to test. Effect
# (b) is a harmless consequence as far as pytest is concerned,
# as long as the test otherwise passes.
bdb.execute('simulate weight from p1'
' given age = (simulate age from p1 limit 1)'
' limit 1').__del__()
def test_checkpoint__ci_slow():
with test_core.t1() as (bdb, population_id, generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 10 iterations checkpoint 1 iteration')
# No checkpoint by seconds.
with pytest.raises(NotImplementedError):
bdb.execute('analyze p1_cc for 5 seconds checkpoint 1 second')
bdb.execute('drop models from p1_cc')
bdb.execute('initialize 1 model for p1_cc')
# No checkpoint by seconds.
with pytest.raises(NotImplementedError):
bdb.execute('analyze p1_cc for 5 iterations checkpoint 1 second')
bdb.execute('drop models from p1_cc')
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration checkpoint 2 iterations')
def test_infer_confidence__ci_slow():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
bdb.execute('infer explicit rowid, rowid as another_rowid, 4,'
' age, predict age as age_inf confidence age_conf'
' from p1').fetchall()
def test_infer_as_estimate():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
bdb.execute('infer explicit predictive probability of age'
' from p1').fetchall()
def test_infer_error():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('infer explicit predict age confidence age_conf'
' from p1').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('infer explicit predict agee confidence age_conf'
' from p1').fetchall()
def test_estimate_by():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate predictive probability of age'
' by p1')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate similarity to (rowid=1) '
'in the context of age by p1')
def check(x, bindings=None):
assert len(bdb.execute(x, bindings=bindings).fetchall()) == 1
check('estimate probability density of age = 42 by p1')
check('estimate dependence probability of age with weight by p1')
check('estimate mutual information of age with weight by p1')
check('estimate correlation of age with weight by p1')
check('estimate correlation pvalue of age with weight by p1')
rowid = bdb.execute('select min(rowid) from t1').fetchall()[0][0]
check('''
estimate similarity of (rowid=?) to (rowid=?)
in the context of weight by p1
''', (rowid, rowid,))
def test_empty_cursor():
with bayeslite.bayesdb_open() as bdb:
assert bdb.execute('SELECT 0').connection == bdb
empty(bdb.execute('BEGIN'))
empty(bdb.execute('COMMIT'))
empty(bdb.sql_execute('CREATE TABLE t(x, y, z)'))
empty(bdb.sql_execute('INSERT INTO t VALUES(1,2,3)'))
empty(bdb.sql_execute('INSERT INTO t VALUES(4,5,6)'))
empty(bdb.sql_execute('INSERT INTO t VALUES(7,8,9)'))
empty(bdb.execute('CREATE POPULATION p FOR t '
'(IGNORE z,y; x NOMINAL)'))
empty(bdb.execute('CREATE GENERATOR p_cc FOR p;'))
empty(bdb.execute('INITIALIZE 1 MODEL FOR p_cc'))
empty(bdb.execute('DROP GENERATOR p_cc'))
empty(bdb.execute('DROP POPULATION p'))
empty(bdb.execute('DROP TABLE t'))
def test_create_generator_ifnotexists():
# XXX Test other backends too, because they have a role in ensuring that
# this works. Their create_generator will still be called.
#
# [TRC 20160627: The above comment appears to be no longer true --
# if it was ever true.]
for using_clause in ('cgpm()',):
with bayeslite.bayesdb_open() as bdb:
bdb.sql_execute('CREATE TABLE t(x, y, z)')
bdb.sql_execute('INSERT INTO t VALUES(1,2,3)')
bdb.execute('''
CREATE POPULATION p FOR t (
x NUMERICAL;
y NUMERICAL;
z NOMINAL;
)
''')
for _i in (0, 1):
bdb.execute('CREATE GENERATOR IF NOT EXISTS p_cc FOR p USING '
+ using_clause)
try:
bdb.execute('CREATE GENERATOR p_cc FOR p USING ' + using_clause)
assert False # Should have said it exists.
except bayeslite.BQLError:
pass
def test_bql_rand():
with bayeslite.bayesdb_open() as bdb:
bdb.sql_execute('CREATE TABLE frobotz(x)')
for _ in range(10):
bdb.sql_execute('INSERT INTO frobotz VALUES(2)')
cursor = bdb.execute('SELECT bql_rand() FROM frobotz LIMIT 10;')
rands = cursor.fetchall()
# These are "the" random numbers (internal PRNG is seeded to 0)
ans = [(0.28348770982811367,), (0.4789774612650598,), (0.07824908989551316,),
(0.6091223239372148,), (0.03906608409906187,), (0.3690599096081546,),
(0.8223420512129717,), (0.7777771914916722,), (0.061856771629497986,),
(0.6492586781908201,)]
assert rands == ans
def test_bql_rand2():
seed = struct.pack('<QQQQ', 0, 0, 0, 3)
with bayeslite.bayesdb_open(seed=seed) as bdb:
bdb.sql_execute('CREATE TABLE frobotz(x)')
for _ in range(10):
bdb.sql_execute('INSERT INTO frobotz VALUES(2)')
cursor = bdb.execute('SELECT bql_rand() FROM frobotz LIMIT 10;')
rands = cursor.fetchall()
ans = [(0.8351877951287725,), (0.9735099617243271,), (0.026142315910925418,),
(0.09380653289687524,), (0.1097050387582088,), (0.33154896906379605,),
(0.4579314980719317,), (0.09072802203491703,), (0.5276180968829105,),
(0.9993280772797679,)]
assert rands == ans
class MockTracerOneQuery(bayeslite.IBayesDBTracer):
def __init__(self, q, qid):
self.q = q
self.qid = qid
self.start_calls = 0
self.ready_calls = 0
self.error_calls = 0
self.finished_calls = 0
self.abandoned_calls = 0
def start(self, qid, query, bindings):
assert qid == self.qid
assert query == self.q
assert bindings == ()
self.start_calls += 1
def ready(self, qid, _cursor):
assert qid == self.qid
self.ready_calls += 1
def error(self, qid, _e):
assert qid == self.qid
self.error_calls += 1
def finished(self, qid):
assert qid == self.qid
self.finished_calls += 1
def abandoned(self, qid):
assert qid == self.qid
self.abandoned_calls += 1
def test_tracing_smoke():
with test_core.t1() as (bdb, _population_id, _generator_id):
q = 'SELECT * FROM t1'
tracer = MockTracerOneQuery(q, 1)
bdb.trace(tracer)
cursor = bdb.execute(q)
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
cursor.fetchall()
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 1
assert tracer.abandoned_calls == 0
del cursor
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 1
assert tracer.abandoned_calls == 1
bdb.untrace(tracer)
# XXX Make sure the whole cursor API works.
q = 'SELECT 42'
tracer = MockTracerOneQuery(q, 2)
bdb.trace(tracer)
cursor = bdb.execute(q)
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
assert cursor.fetchvalue() == 42
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 1
assert tracer.abandoned_calls == 0
del cursor
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 1
assert tracer.abandoned_calls == 1
def test_tracing_error_smoke():
with test_core.t1() as (bdb, _population_id, _generator_id):
q = 'SELECT * FROM wrong'
tracer = MockTracerOneQuery(q, 1)
bdb.trace(tracer)
with pytest.raises(apsw.SQLError):
bdb.execute(q)
assert tracer.start_calls == 1
assert tracer.ready_calls == 0
assert tracer.error_calls == 1
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
class Boom(Exception):
pass
class ErroneousBackend(troll.TrollBackend):
def __init__(self):
self.call_ct = 0
def name(self):
return 'erroneous'
def logpdf_joint(self, *_args, **_kwargs):
if self.call_ct > 10: # Wait to avoid raising during sqlite's prefetch
raise Boom()
self.call_ct += 1
return 0
def test_tracing_execution_error_smoke():
with test_core.t1() as (bdb, _population_id, _generator_id):
bayeslite.bayesdb_register_backend(bdb, ErroneousBackend())
bdb.execute('DROP GENERATOR p1_cc')
bdb.execute('CREATE GENERATOR p1_err FOR p1 USING erroneous()')
q = 'ESTIMATE PREDICTIVE PROBABILITY OF age FROM p1'
tracer = MockTracerOneQuery(q, 1)
bdb.trace(tracer)
cursor = bdb.execute(q)
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
with pytest.raises(Boom):
cursor.fetchall()
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 1
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
def test_pdf_var():
with test_core.t1() as (bdb, population_id, _generator_id):
bdb.execute('initialize 6 models for p1_cc;')
c = bdb.execute(
'estimate probability density of label = label from p1')
c.fetchall()
assert bql2sql(
'estimate probability density of label = label from p1') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 1, "label") FROM "t1";'
| 2.21875 | 2 |
tests/test_streaming_language_modeling_task.py | dedsecurity/gpt-ded | 3 | 12762101 | <filename>tests/test_streaming_language_modeling_task.py
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import random
import string
import tempfile
import unittest
import torch
from tests.utils import train_language_model
try:
import tokenizers # noqa
has_hf_tokenizers = True
except ImportError:
has_hf_tokenizers = False
def write_one_jsonl_(jsonl_path, num_lines=5, text_len_min=5, text_len_max=50):
data = []
with open(jsonl_path, "w") as h:
for _ in range(num_lines):
text_len = random.choice(range(text_len_min, text_len_max))
data.append(
{"text": "".join(random.choices(string.ascii_letters, k=text_len))}
)
print(json.dumps(data[-1]), file=h)
return
def write_dummy_jsonl_data_dir_(data_dir, num_lines=500):
for subset in ["train", "valid"]:
for shard in range(2):
shard_dir = os.path.join(data_dir, subset, f"{shard:02}")
os.makedirs(shard_dir)
for dataset in ["a", "b"]:
write_one_jsonl_(
os.path.join(shard_dir, f"dataset_{dataset}.jsonl"),
num_lines=num_lines,
)
def write_dummy_bpe_(data_dir):
from tokenizers import ByteLevelBPETokenizer
tokenizer = ByteLevelBPETokenizer(add_prefix_space=True)
tokenizer.train(
[],
vocab_size=500,
special_tokens=["<s>", "<pad>", "</s>", "<unk>"],
show_progress=False,
)
vocab, merges = tokenizer.save_model(data_dir)
return vocab, merges
class TestReproducibility(unittest.TestCase):
@unittest.skipIf(not has_hf_tokenizers, "skip test if tokenizers is missing")
def _test_reproducibility(
self,
name,
extra_flags=None,
delta=0.0001,
resume_checkpoint="checkpoint1.pt",
max_epoch=3,
):
def get_last_log_stats_containing_string(log_records, search_string):
for log_record in logs.records[::-1]:
if isinstance(log_record.msg, str) and search_string in log_record.msg:
return json.loads(log_record.msg)
if extra_flags is None:
extra_flags = []
with tempfile.TemporaryDirectory(name) as data_dir:
write_dummy_jsonl_data_dir_(data_dir)
vocab, merges = write_dummy_bpe_(data_dir)
# train epochs 1 and 2 together
with self.assertLogs() as logs:
train_language_model(
data_dir=data_dir,
arch="transformer_lm_gpt2_tiny",
extra_flags=[
"--vocab-filename",
vocab,
"--merges-filename",
merges,
"--dropout",
"0.0",
"--log-format",
"json",
"--log-interval",
"1",
"--max-epoch",
str(max_epoch),
"--batch-size",
"2",
]
+ extra_flags,
task="streaming_language_modeling",
max_tokens=None,
)
train_log = get_last_log_stats_containing_string(logs.records, "train_loss")
valid_log = get_last_log_stats_containing_string(logs.records, "valid_loss")
# train epoch 2, resuming from previous checkpoint 1
os.rename(
os.path.join(data_dir, resume_checkpoint),
os.path.join(data_dir, "checkpoint_last.pt"),
)
with self.assertLogs() as logs:
train_language_model(
data_dir=data_dir,
arch="transformer_lm_gpt2_tiny",
extra_flags=[
"--vocab-filename",
vocab,
"--merges-filename",
merges,
"--dropout",
"0.0",
"--log-format",
"json",
"--log-interval",
"1",
"--max-epoch",
str(max_epoch),
"--batch-size",
"2",
]
+ extra_flags,
task="streaming_language_modeling",
max_tokens=None,
)
train_res_log = get_last_log_stats_containing_string(
logs.records, "train_loss"
)
valid_res_log = get_last_log_stats_containing_string(
logs.records, "valid_loss"
)
for k in ["train_loss", "train_ppl", "train_num_updates", "train_gnorm"]:
self.assertAlmostEqual(
float(train_log[k]), float(train_res_log[k]), delta=delta
)
for k in [
"valid_loss",
"valid_ppl",
"valid_num_updates",
"valid_best_loss",
]:
self.assertAlmostEqual(
float(valid_log[k]), float(valid_res_log[k]), delta=delta
)
def test_reproducibility(self):
self._test_reproducibility("test_reproducibility")
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_reproducibility_fp16(self):
self._test_reproducibility(
"test_reproducibility_fp16",
[
"--fp16",
"--fp16-init-scale",
"4096",
],
delta=0.011,
)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_reproducibility_memory_efficient_fp16(self):
self._test_reproducibility(
"test_reproducibility_memory_efficient_fp16",
[
"--memory-efficient-fp16",
"--fp16-init-scale",
"4096",
],
)
def test_mid_epoch_reproducibility(self):
self._test_reproducibility(
"test_mid_epoch_reproducibility",
["--save-interval-updates", "3"],
resume_checkpoint="checkpoint_1_3.pt",
max_epoch=1,
)
if __name__ == "__main__":
unittest.main()
| 2.125 | 2 |
jaeger_client/throttler.py | jaegertracing/jaeger-client-python | 372 | 12762102 | <reponame>jaegertracing/jaeger-client-python
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import random
from threading import Lock
from typing import Any, Optional
from tornado.ioloop import PeriodicCallback
from .constants import DEFAULT_THROTTLER_REFRESH_INTERVAL
from .metrics import Metrics, MetricsFactory
from .utils import ErrorReporter
MINIMUM_CREDITS = 1.0
default_logger = logging.getLogger('jaeger_tracing')
class Throttler(object):
def set_client_id(self, client_id: int) -> None:
"""
Called by tracer to set client ID of throttler.
"""
pass
def is_allowed(self, operation: str) -> bool:
raise NotImplementedError()
def close(self) -> None:
pass
class RemoteThrottler(Throttler):
"""
RemoteThrottler controls the flow of spans emitted from client to prevent
flooding. RemoteThrottler requests credits from the throttling service
periodically. These credits determine the amount of debug spans a client
may emit for a particular operation without receiving more credits.
:param channel: channel for communicating with jaeger-agent
:param service_name: name of this application
:param kwargs: optional parameters
- refresh_interval: interval in seconds for requesting more credits
- logger: Logger instance
- metrics_factory: factory to create throttler-specific metrics
- error_reporter: ErrorReporter instance
"""
def __init__(self, channel: Any, service_name: str, **kwargs: Any) -> None:
self.channel = channel
self.service_name = service_name
self.client_id: Optional[int] = None
self.refresh_interval = \
kwargs.get('refresh_interval', DEFAULT_THROTTLER_REFRESH_INTERVAL)
self.logger = kwargs.get('logger', default_logger)
metrics_factory = kwargs.get('metrics_factory', MetricsFactory())
self.metrics = ThrottlerMetrics(metrics_factory)
self.error_reporter = kwargs.get('error_reporter', ErrorReporter(Metrics()))
self.credits: dict = {}
self.lock = Lock()
self.running = True
self.periodic = None
if not self.channel.io_loop:
self.logger.error(
'Cannot acquire IOLoop, throttler will not be updated')
else:
self.channel.io_loop.add_callback(self._init_polling)
def is_allowed(self, operation: str) -> bool:
with self.lock:
if operation not in self.credits:
self.credits[operation] = 0.0
self.metrics.throttled_debug_spans(1)
return False
value = self.credits[operation]
if value < MINIMUM_CREDITS:
self.metrics.throttled_debug_spans(1)
return False
self.credits[operation] = value - MINIMUM_CREDITS
return True
def set_client_id(self, client_id: int) -> None:
with self.lock:
if self.client_id is None:
self.client_id = client_id
def _init_polling(self):
"""
Bootstrap polling for throttler.
To avoid spiky traffic from throttler clients, we use a random delay
before the first poll.
"""
with self.lock:
if not self.running:
return
r = random.Random()
delay = r.random() * self.refresh_interval
self.channel.io_loop.call_later(
delay=delay, callback=self._delayed_polling)
self.logger.info(
'Delaying throttling credit polling by %d sec', delay)
def _operations(self):
with self.lock:
return self.credits.keys()
def _delayed_polling(self):
def callback():
self._fetch_credits(self._operations())
periodic = PeriodicCallback(
callback=callback,
# convert interval to milliseconds
callback_time=self.refresh_interval * 1000)
self._fetch_credits(self._operations())
with self.lock:
if not self.running:
return
self.periodic = periodic
self.periodic.start()
self.logger.info(
'Throttling client started with refresh interval %d sec',
self.refresh_interval)
def _fetch_credits(self, operations):
if not operations:
return
self.logger.debug('Requesting throttling credits')
fut = self.channel.request_throttling_credits(
self.service_name, self.client_id, operations)
fut.add_done_callback(self._request_callback)
def _request_callback(self, future):
exception = future.exception()
if exception:
self.metrics.throttler_update_failure(1)
self.error_reporter.error(
'Failed to get throttling credits from jaeger-agent: %s',
exception)
return
response = future.result()
# In Python 3.5 response.body is of type bytes and json.loads() does only support str
# See: https://github.com/jaegertracing/jaeger-client-python/issues/180
if hasattr(response.body, 'decode') and callable(response.body.decode):
response_body = response.body.decode('utf-8')
else:
response_body = response.body
try:
throttling_response = json.loads(response_body)
self.logger.debug('Received throttling response: %s',
throttling_response)
self._update_credits(throttling_response)
self.metrics.throttler_update_success(1)
except Exception as e:
self.metrics.throttler_update_failure(1)
self.error_reporter.error(
'Failed to parse throttling credits response '
'from jaeger-agent: %s [%s]', e, response_body)
return
def _update_credits(self, response):
with self.lock:
for op_balance in response['balances']:
op = op_balance['operation']
balance = op_balance['balance']
if op not in self.credits:
self.credits[op] = 0
self.credits[op] += balance
self.logger.debug('credits = %s', self.credits)
def close(self) -> None:
with self.lock:
self.running = False
if self.periodic:
self.periodic.stop()
class ThrottlerMetrics(object):
"""
Metrics specific to throttler.
"""
def __init__(self, metrics_factory: MetricsFactory) -> None:
self.throttled_debug_spans = \
metrics_factory.create_counter(name='jaeger:throttled_debug_spans')
self.throttler_update_success = \
metrics_factory.create_counter(name='jaeger:throttler_update',
tags={'result': 'ok'})
self.throttler_update_failure = \
metrics_factory.create_counter(name='jaeger:throttler_update',
tags={'result': 'err'})
| 2.421875 | 2 |
NSLKDD/original/trainNSLKDD.py | zbs881314/Intrusion-detection | 0 | 12762103 | <reponame>zbs881314/Intrusion-detection
import sys
sys.path.append("..")
import tensorflow as tf
import numpy as np
import os
import SNN
input = tf.placeholder(tf.float32)
input_exp = tf.exp(input)
groundtruth = tf.placeholder(tf.float32)
try:
w1 = np.load('weight_nslkdd11.npy')
w2 = np.load('weight_nslkdd12.npy')
w3 = np.load('weight_nslkdd13.npy')
layer_in = SNN.SNNLayer(122, 100, w1)
layer_out1 = SNN.SNNLayer(100, 100, w2)
layer_out2 = SNN.SNNLayer(100, 5, w3)
print('Weight loaded!')
except:
layer_in = SNN.SNNLayer(122, 100)
layer_out1 = SNN.SNNLayer(100, 100)
layer_out2 = SNN.SNNLayer(100, 5)
print('No weight file found, use random weight')
layerin_out = layer_in.forward(input_exp)
layerout_out1 = layer_out1.forward(layerin_out)
layerout_out2 = layer_out2.forward(layerout_out1)
nnout = tf.log(layerout_out2)
layerout_groundtruth = tf.concat([layerout_out2,groundtruth],1)
loss = tf.reduce_mean(tf.map_fn(SNN.loss_func,layerout_groundtruth))
wsc = layer_in.w_sum_cost() + layer_out1.w_sum_cost() + layer_out2.w_sum_cost()
l2c = layer_in.l2_cost() + layer_out1.l2_cost() + layer_out2.l2_cost()
K = 100
K2 = 1e-3
learning_rate = 1e-4
TRAINING_BATCH = 128
SAVE_PATH = os.getcwd() + '/weight_nslkdd1'
cost = loss + K*wsc + K2*l2c
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = opt.minimize(cost)
config = tf.ConfigProto(device_count={'GPU': 1})
config.gpu_options.allow_growth = True
sess = tf.Session()
sess.run(tf.global_variables_initializer())
scale = 3
mnist = SNN.Nslkdd(path=["dataset1/X_train.npy","dataset1/y_train.npy"])
print('training started')
step = 1
while(True):
xs, ys = mnist.next_batch(TRAINING_BATCH, shuffle=True)
xs = scale*xs
[out,c,_] = sess.run([nnout,cost,train_op],{input:xs,groundtruth:ys})
if step % 20 == 1:
print('step '+repr(step) +', cost='+repr(c))
w1 = sess.run(layer_in.weight)
w2 = sess.run(layer_out1.weight)
w3 = sess.run(layer_out2.weight)
np.save(SAVE_PATH + '1', w1)
np.save(SAVE_PATH + '2', w2)
np.save(SAVE_PATH + '3', w3)
step = step + 1
| 2.296875 | 2 |
esmvaltool/cmorizers/obs/cmorize_obs_ghcn_cams.py | cffbots/ESMValTool | 148 | 12762104 | <filename>esmvaltool/cmorizers/obs/cmorize_obs_ghcn_cams.py
"""ESMValTool CMORizer for GHCN-CAMS data.
Tier
Tier 2: other freely-available dataset.
Source
https://www.esrl.noaa.gov/psd/data/gridded/data.ghcncams.html
ftp://ftp.cdc.noaa.gov/Datasets/ghcncams/air.mon.mean.nc
Last access
20200304
"""
import logging
import os
import iris
from . import utilities as utils
logger = logging.getLogger(__name__)
def _extract_variable(short_name, var, cfg, filepath, out_dir):
"""Extract variable."""
raw_var = var.get('raw', short_name)
cube = iris.load_cube(filepath, utils.var_name_constraint(raw_var))
# Fix units
if 'raw_units' in var:
cube.units = var['raw_units']
cmor_info = cfg['cmor_table'].get_variable(var['mip'], short_name)
cube.convert_units(cmor_info.units)
utils.convert_timeunits(cube, 1950)
# Fix coordinates
utils.fix_coords(cube)
if 'height2m' in cmor_info.dimensions:
utils.add_height2m(cube)
# Fix metadata
attrs = cfg['attributes']
attrs['mip'] = var['mip']
utils.fix_var_metadata(cube, cmor_info)
utils.set_global_atts(cube, attrs)
# Save variable
utils.save_variable(cube,
short_name,
out_dir,
attrs,
unlimited_dimensions=['time'])
def cmorization(in_dir, out_dir, cfg, _):
"""Cmorization func call."""
filepath = os.path.join(in_dir, cfg['filename'])
# Run the cmorization
for (short_name, var) in cfg['variables'].items():
logger.info("CMORizing variable '%s'", short_name)
_extract_variable(short_name, var, cfg, filepath, out_dir)
| 1.773438 | 2 |
observer/client.py | amitkc00/design_patterns | 0 | 12762105 | <filename>observer/client.py
from subject_Weather import weather
from observer_currentCondition import currentCondition
from observer_forecastDisplay import forecastDisplay
from observer_statisticsDisplay import statisticsDisplay
if __name__== "__main__":
weather = weather()
current = currentCondition()
weather.subscribeElem(current)
forecast = forecastDisplay()
weather.subscribeElem(forecast)
stats = statisticsDisplay()
weather.subscribeElem(stats)
# I don't think this is right design. How can the weather object decides itself to notify.
# I think it should precursor an event that would lead to this notification.
weather.notify()
| 2.921875 | 3 |
gf_test.py | autolordz/gradient-descent-optimization | 5 | 12762106 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 30 20:15:18 2019
@author: autol
"""
#%%
from plotxy import plot_gd_xy,iters_gd_plot,plot_gd_contour
from initdata import init_data,init_data1,data_b,init_data_house
from func import gradient_descent_f
from varclass import VarSetX
from sklearn.model_selection import ParameterGrid
import matplotlib.pyplot as plt
import numpy as np
#%% Example
n=20
w = np.ones(2);w
X,y=init_data1(n,45,w,b=0);X # eta = 1e-2
#X,y=init_data_house(n,45,w);X # 1e-7
X_b = data_b(X);X_b
y
#%%
B_b = np.linalg.inv(X_b.T.dot(X_b)) @ (X_b.T.dot(y));B_b
B = np.linalg.inv(X.T.dot(X)) @ (X.T.dot(y));B
#%%
#w = np.array([-2.5,-2.5]);w
#w = np.array([0.,0.]);w
A = 2./len(y)*X.T.dot(X) # ŋ=1 # 海森矩阵
J = lambda w: np.mean((X.dot(w)-y)**2) # 目标函数
gJ = lambda w: 2./len(y)*X.T.dot(X.dot(w)-y) # 梯度函数
#A = X.T@X # ŋ=1/n
#J = lambda w: w.dot(A).dot(w)
#gJ = lambda w: A.dot(w)
pgrid =list(ParameterGrid(dict(sgd=[0,1],
isStep=[0],
# ρ=[.5,5,10],
# n_b=[2,5],
# ŋ_a=[1], # ŋ_a 要大于1
method=['mm21','mm22','mm23','mm24','mm25'],
#method=['mm31','mm32','mm33','mm34','mm30'],
#method=['mm40','mm41','mm42','mm43','mm44','mm45','mm46'],
#method=['mm51','mm52','mm53','mm54','mm55'],
#method=['mm10'],
#method=['mm90','mm91','mm92','mm93','mm94',],
)))
skwargs = dict(A=A,ŋ=.1,ŋ_a=1,tol=0.05,
ε=.001,λ=.1,α=.5,γ=0.5,β1=.9,β2=.999)
wws=[];ess=[];rets=[]
for pg in pgrid:
w0 = w.copy()-np.random.uniform(1,3,2) #任意起点
kwargs=dict(X=X.copy(),y=y.copy(),
gJ=gJ,J=J,w=w0,)
kwargs.update(skwargs) ; kwargs.update(pg) ; var = VarSetX(kwargs)
ret = gradient_descent_f(var,n_iters=20,skipConv=0,
**kwargs)
ww = np.stack(ret['wh'][:,1])
es = ret['wh'][:,2]
wws.append(ww); ess.append(es); rets.append(ret)
print(ww,es)
#%%
x = np.zeros(len(w));x
x = np.vstack([x, np.amax(X,axis=0)]);x
x_b = data_b(x)
yh = x.dot(B); yh
fig, ax = plt.subplots(figsize = (8,8))
ax.plot(X[:,0],y,'o')
ax.plot(x[:,0],yh,color='b',linewidth=5)
ws = [ww[int(i)] for i in np.linspace(0,len(ww)-1,10)]
for wx in ws:
yh = x.dot(wx);yh # 画渐近的基准线
ax.plot(x[:,0],yh,color='r')
ax.set_xlabel('x')
ax.set_ylabel('y')
#%%
plot_gd_contour(J,wws,ess,pgrid,skwargs,B)
#%%
paras = skwargs.copy()
paras.pop('A')
iters_gd_plot(rets,var,pgrid,paras=paras,
**kwargs) | 2.34375 | 2 |
examples/ServiceSchema.py | msitt/blpapi-python | 228 | 12762107 | # ServiceSchema.py
from __future__ import print_function
from __future__ import absolute_import
from optparse import OptionParser, OptionValueError
import os
import platform as plat
import sys
if sys.version_info >= (3, 8) and plat.system().lower() == "windows":
# pylint: disable=no-member
with os.add_dll_directory(os.getenv('BLPAPI_LIBDIR')):
import blpapi
else:
import blpapi
REFERENCE_DATA_RESPONSE = blpapi.Name("ReferenceDataResponse")
ELEMENT_DATATYPE_NAMES = {
blpapi.DataType.BOOL: "BOOL",
blpapi.DataType.CHAR: "CHAR",
blpapi.DataType.BYTE: "BYTE",
blpapi.DataType.INT32: "INT32",
blpapi.DataType.INT64: "INT64",
blpapi.DataType.FLOAT32: "FLOAT32",
blpapi.DataType.FLOAT64: "FLOAT64",
blpapi.DataType.STRING: "STRING",
blpapi.DataType.BYTEARRAY: "BYTEARRAY",
blpapi.DataType.DATE: "DATE",
blpapi.DataType.TIME: "TIME",
blpapi.DataType.DECIMAL: "DECIMAL",
blpapi.DataType.DATETIME: "DATETIME",
blpapi.DataType.ENUMERATION: "ENUMERATION",
blpapi.DataType.SEQUENCE: "SEQUENCE",
blpapi.DataType.CHOICE: "CHOICE",
blpapi.DataType.CORRELATION_ID: "CORRELATION_ID"
}
SCHEMA_STATUS_NAMES = {
blpapi.SchemaStatus.ACTIVE: "ACTIVE",
blpapi.SchemaStatus.DEPRECATED: "DEPRECATED",
blpapi.SchemaStatus.INACTIVE: "INACTIVE",
blpapi.SchemaStatus.PENDING_DEPRECATION: "PENDING"
}
def authOptionCallback(_option, _opt, value, parser):
"""Parse authorization options from user input"""
vals = value.split('=', 1)
if value == "user":
authUser = blpapi.AuthUser.createWithLogonName()
authOptions = blpapi.AuthOptions.createWithUser(authUser)
elif value == "none":
authOptions = None
elif vals[0] == "app" and len(vals) == 2:
appName = vals[1]
authOptions = blpapi.AuthOptions.createWithApp(appName)
elif vals[0] == "userapp" and len(vals) == 2:
appName = vals[1]
authUser = blpapi.AuthUser.createWithLogonName()
authOptions = blpapi.AuthOptions\
.createWithUserAndApp(authUser, appName)
elif vals[0] == "dir" and len(vals) == 2:
activeDirectoryProperty = vals[1]
authUser = blpapi.AuthUser\
.createWithActiveDirectoryProperty(activeDirectoryProperty)
authOptions = blpapi.AuthOptions.createWithUser(authUser)
elif vals[0] == "manual":
parts = []
if len(vals) == 2:
parts = vals[1].split(',')
if len(parts) != 3:
raise OptionValueError("Invalid auth option {}".format(value))
appName, ip, userId = parts
authUser = blpapi.AuthUser.createWithManualOptions(userId, ip)
authOptions = blpapi.AuthOptions.createWithUserAndApp(authUser, appName)
else:
raise OptionValueError("Invalid auth option '{}'".format(value))
parser.values.auth = {'option' : authOptions}
def parseCmdLine():
parser = OptionParser()
parser.add_option("-a",
"--host",
dest="host",
help="HOST address to connect to",
metavar="HOST",
default="localhost")
parser.add_option("-p",
"--port",
dest="port",
type="int",
help="PORT to connect to (%default)",
metavar="PORT",
default=8194)
parser.add_option("-s",
"--service",
default="//blp/apiflds",
help="SERVICE to print the schema of "
"('//blp/apiflds' by default)")
parser.add_option("--auth",
dest="auth",
help="authentication option: "
"user|none|app=<app>|userapp=<app>|dir=<property>"
"|manual=<app,ip,user>"
" (default: user)\n"
"'none' is applicable to Desktop API product "
"that requires Bloomberg Professional service "
"to be installed locally.",
metavar="option",
action="callback",
callback=authOptionCallback,
type="string",
default={"option" :
blpapi.AuthOptions.createWithUser(
blpapi.AuthUser.createWithLogonName())})
(options, _) = parser.parse_args()
return options
def printMessage(msg):
print("[{0}]: {1}".format(", ".join(map(str, msg.correlationIds())), msg))
def getIndent(level):
return "" if level == 0 else " ".ljust(level * 2)
# Print enumeration (constant list)
def printEnumeration(cl, level):
indent = getIndent(level + 1)
print(indent + " {0} {1} {2} \"{3}\" possible values:".format(
cl.name(),
SCHEMA_STATUS_NAMES[cl.status()],
ELEMENT_DATATYPE_NAMES[cl.datatype()],
cl.description()))
# Enumerate and print all constant list's values (constants)
for i in cl:
print(indent + " {0} {1} {2} \"{3}\" = {4!s}".format(
i.name(),
SCHEMA_STATUS_NAMES[i.status()],
ELEMENT_DATATYPE_NAMES[i.datatype()],
i.description(),
i.getValue()))
# Recursively print element definition
def printElementDefinition(ed, level=0):
indent = getIndent(level)
maxValues = ed.maxValues()
if maxValues == blpapi.SchemaElementDefinition.UNBOUNDED:
valuesRange = "[{0}, INF)".format(ed.minValues())
else:
valuesRange = "[{0}, {1}]".format(ed.minValues(), maxValues)
# Get and print alternate element names
alternateNames = ed.alternateNames()
if alternateNames:
alternateNames = "[{0}]".format(",".join(map(str, alternateNames)))
else:
alternateNames = ""
print(indent + "* {0} {1} {2} {3} \"{4}\"".format(
ed.name(),
SCHEMA_STATUS_NAMES[ed.status()],
valuesRange,
alternateNames,
ed.description()))
# Get and print related type definition
td = ed.typeDefinition()
print(indent + " {0} {1} {2} {3}{4}{5}\"{6}\"".format(
td.name(),
SCHEMA_STATUS_NAMES[td.status()],
ELEMENT_DATATYPE_NAMES[td.datatype()],
"complex " if td.isComplexType() else "",
"simple " if td.isSimpleType() else "",
"enum " if td.isEnumerationType() else "",
td.description()))
# Get and print all possible values for enumeration type
enumeration = td.enumeration()
if not enumeration is None:
printEnumeration(enumeration, level)
if td.numElementDefinitions():
print(indent + " Elements[{0}]:".format(
td.numElementDefinitions()))
# Enumerate and print all sub-element definitions
for i in td.elementDefinitions():
printElementDefinition(i, level + 1)
def printOperation(operation, _service):
print("{0} \"{1}\" Request:".format(
operation.name(),
operation.description()))
# Print operation's request definition
printElementDefinition(operation.requestDefinition(), 1)
print("Responses[{0}]:".format(operation.numResponseDefinitions()))
# Enumerate and print all operation's response definitions
for r in operation.responseDefinitions():
printElementDefinition(r, 1)
print()
def main():
options = parseCmdLine()
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost(options.host)
sessionOptions.setServerPort(options.port)
sessionOptions.setSessionIdentityOptions(options.auth['option'])
# Create a Session
session = blpapi.Session(sessionOptions)
# Start a Session
if not session.start():
raise Exception("Can't start session.")
try:
print("Session started.")
# Open service to get reference data from
if not session.openService(options.service):
raise Exception("Can't open '{0}' service.".format(
options.service))
# Obtain previously opened service
service = session.getService(options.service)
print("Service {0}:".format(options.service))
print("Service event definitions[{0}]:".format(
service.numEventDefinitions()))
# Enumerate and print all service's event definitions
for ed in service.eventDefinitions():
printElementDefinition(ed)
print()
print("Operations[{0}]:".format(service.numOperations()))
# Enumerate and print all service's operations
for operation in service.operations():
printOperation(operation, service)
finally:
# Stop the session
session.stop()
if __name__ == "__main__":
print("ServiceSchema")
try:
main()
except KeyboardInterrupt:
print("Ctrl+C pressed. Stopping...")
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
| 1.84375 | 2 |
fileutils.py | HighCWu/import_daz | 0 | 12762108 | <gh_stars>0
# Copyright (c) 2016-2021, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import bpy
import os
from bpy_extras.io_utils import ImportHelper, ExportHelper
from .error import *
from .utils import *
#-------------------------------------------------------------
# Open and check for case change
#-------------------------------------------------------------
def safeOpen(filepath, rw, dirMustExist=False, fileMustExist=False, mustOpen=False):
if dirMustExist:
folder = os.path.dirname(filepath)
if not os.path.exists(folder):
msg = ("Directory does not exist: \n" +
"%s " % folder)
raise DazError(msg)
if fileMustExist:
if not os.path.exists(filepath):
msg = ("File does not exist: \n" +
"%s " % filepath)
raise DazError(msg)
if rw == "w":
encoding="utf_8"
else:
encoding="utf_8_sig"
try:
fp = open(filepath, rw, encoding=encoding)
except FileNotFoundError:
fp = None
if fp is None:
if rw[0] == "r":
mode = "reading"
else:
mode = "writing"
msg = ("Could not open file for %s: \n" % mode +
"%s " % filepath)
if mustOpen:
raise DazError(msg)
reportError(msg, warnPaths=True, trigger=(2,4))
return fp
#-------------------------------------------------------------
# Open and check for case change
#-------------------------------------------------------------
def getFolders(ob, subdirs):
if ob is None:
return []
fileref = ob.DazUrl.split("#")[0]
if len(fileref) < 2:
return []
reldir = os.path.dirname(fileref)
folders = []
for basedir in GS.getDazPaths():
for subdir in subdirs:
folder = "%s/%s/%s" % (basedir, reldir, subdir)
folder = folder.replace("//", "/")
if os.path.exists(folder):
folders.append(folder)
return folders
#-------------------------------------------------------------
# File extensions
#-------------------------------------------------------------
class DbzFile:
filename_ext = ".dbz"
filter_glob : StringProperty(default="*.dbz;*.json", options={'HIDDEN'})
class JsonFile:
filename_ext = ".json"
filter_glob : StringProperty(default="*.json", options={'HIDDEN'})
class JsonExportFile(ExportHelper):
filename_ext = ".json"
filter_glob : StringProperty(default="*.json", options={'HIDDEN'})
filepath : StringProperty(
name="File Path",
description="Filepath used for exporting the .json file",
maxlen=1024,
default = "")
class ImageFile:
filename_ext = ".png;.jpeg;.jpg;.bmp;.tif;.tiff"
filter_glob : StringProperty(default="*.png;*.jpeg;*.jpg;*.bmp;*.tif;*.tiff", options={'HIDDEN'})
class DazImageFile:
filename_ext = ".duf"
filter_glob : StringProperty(default="*.duf;*.dsf;*.png;*.jpeg;*.jpg;*.bmp", options={'HIDDEN'})
class DazFile:
filename_ext = ".dsf;.duf;*.dbz"
filter_glob : StringProperty(default="*.dsf;*.duf;*.dbz", options={'HIDDEN'})
class DufFile:
filename_ext = ".duf"
filter_glob : StringProperty(default="*.duf", options={'HIDDEN'})
class DatFile:
filename_ext = ".dat"
filter_glob : StringProperty(default="*.dat", options={'HIDDEN'})
class TextFile:
filename_ext = ".txt"
filter_glob : StringProperty(default="*.txt", options={'HIDDEN'})
class CsvFile:
filename_ext = ".csv"
filter_glob : StringProperty(default="*.csv", options={'HIDDEN'})
#-------------------------------------------------------------
# SingleFile and MultiFile
#-------------------------------------------------------------
def getExistingFilePath(filepath, ext):
filepath = bpy.path.ensure_ext(bpy.path.abspath(filepath), ext)
filepath = os.path.expanduser(filepath).replace("\\", "/")
if os.path.exists(filepath):
return filepath
else:
raise DazError('File does not exist:\n"%s"' % filepath)
class SingleFile(ImportHelper):
filepath : StringProperty(
name="File Path",
description="Filepath used for importing the file",
maxlen=1024,
default="")
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class MultiFile(ImportHelper):
files : CollectionProperty(
name = "File Path",
type = bpy.types.OperatorFileListElement)
directory : StringProperty(
subtype='DIR_PATH')
def invoke(self, context, event):
G.theFilePaths = []
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def getMultiFiles(self, extensions):
def getTypedFilePath(filepath, exts):
words = os.path.splitext(filepath)
if len(words) == 2:
fname,ext = words
else:
return None
if fname[-4:] == ".tip":
fname = fname[:-4]
if ext in [".png", ".jpeg", ".jpg", ".bmp"]:
if os.path.exists(fname):
words = os.path.splitext(fname)
if (len(words) == 2 and
words[1][1:] in exts):
return fname
for ext1 in exts:
path = fname+"."+ext1
if os.path.exists(path):
return path
return None
elif ext[1:].lower() in exts:
return filepath
else:
return None
filepaths = []
if G.theFilePaths:
for path in G.theFilePaths:
filepath = getTypedFilePath(path, extensions)
if filepath:
filepaths.append(filepath)
else:
for file_elem in self.files:
path = os.path.join(self.directory, file_elem.name)
if os.path.isfile(path):
filepath = getTypedFilePath(path, extensions)
if filepath:
filepaths.append(filepath)
return filepaths
#-------------------------------------------------------------
# Open settings file
#-------------------------------------------------------------
def openSettingsFile(filepath):
filepath = os.path.expanduser(filepath)
try:
fp = open(filepath, "r", encoding="utf-8-sig")
except:
fp = None
if fp:
import json
try:
return json.load(fp)
except json.decoder.JSONDecodeError as err:
print("File %s is corrupt" % filepath)
print("Error: %s" % err)
return None
finally:
fp.close()
else:
print("Could not open %s" % filepath)
return None
| 0.882813 | 1 |
tests/unit/modules/test_profiles.py | renanzulian/oraclecxcommerce-sdk | 0 | 12762109 | from oraclecxcommerce.modules import ProfilesModule
import pytest
def test_instantiate_profile_module_class_should_return_not_implemented_error():
with pytest.raises(NotImplementedError):
occ = ProfilesModule()
| 1.71875 | 2 |
src/integ_test_resources/ios/sdk/integration/cdk/cdk_integration_tests_ios/sqs_stack.py | jhockett/amplify-ci-support | 9 | 12762110 | from aws_cdk import aws_iam, aws_sqs, core
from common.common_stack import CommonStack
from common.region_aware_stack import RegionAwareStack
class SqsStack(RegionAwareStack):
def __init__(self, scope: core.Construct, id: str, common_stack: CommonStack, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self._supported_in_region = self.is_service_supported_in_region()
# Test simply asserts the existence of a queue
aws_sqs.Queue(self, "integ_test_sqs_queue")
queue_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=["sqs:GetQueueAttributes"],
resources=[f"arn:aws:sqs:{self.region}:{self.account}:*"],
)
common_stack.add_to_common_role_policies(self, policy_to_add=queue_policy)
all_resources_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=["sqs:ListQueues"],
resources=["*"],
)
common_stack.add_to_common_role_policies(self, policy_to_add=all_resources_policy)
| 2.0625 | 2 |
nwb_conversion_tools/gui/utils/name_references.py | wuffi/nwb-conversion-tools | 0 | 12762111 | import nwb_conversion_tools.gui.classes as gui_modules
import pynwb
# This carries the reference mapping between pynwb groups names and their
# respective GUI forms constructing classes.
# It can be updated by extensions at the __init__ of nwb_conversion_gui.py
name_to_gui_class = {
'Device': gui_modules.forms_general.GroupDevice,
# Base
'TimeSeries': gui_modules.forms_base.GroupTimeSeries,
# Ophys
'OpticalChannel': gui_modules.forms_ophys.GroupOpticalChannel,
'ImagingPlane': gui_modules.forms_ophys.GroupImagingPlane,
'TwoPhotonSeries': gui_modules.forms_ophys.GroupTwoPhotonSeries,
'CorrectedImageStack': gui_modules.forms_ophys.GroupCorrectedImageStack,
'MotionCorrection': gui_modules.forms_ophys.GroupMotionCorrection,
'PlaneSegmentation': gui_modules.forms_ophys.GroupPlaneSegmentation,
'ImageSegmentation': gui_modules.forms_ophys.GroupImageSegmentation,
'RoiResponseSeries': gui_modules.forms_ophys.GroupRoiResponseSeries,
'DfOverF': gui_modules.forms_ophys.GroupDfOverF,
'Fluorescence': gui_modules.forms_ophys.GroupFluorescence,
'GrayscaleVolume': gui_modules.forms_ophys.GroupGrayscaleVolume,
# Ecephys
'ElectricalSeries': gui_modules.forms_ecephys.GroupElectricalSeries,
'ElectrodeGroup': gui_modules.forms_ecephys.GroupElectrodeGroup,
'SpikeEventSeries': gui_modules.forms_ecephys.GroupSpikeEventSeries,
'EventDetection': gui_modules.forms_ecephys.GroupEventDetection,
'EventWaveform': gui_modules.forms_ecephys.GroupEventWaveform,
'LFP': gui_modules.forms_ecephys.GroupLFP,
'FilteredEphys': gui_modules.forms_ecephys.GroupFilteredEphys,
'FeatureExtraction': gui_modules.forms_ecephys.GroupFeatureExtraction,
# Behavior
'SpatialSeries': gui_modules.forms_behavior.GroupSpatialSeries,
'BehavioralEpochs': gui_modules.forms_behavior.GroupBehavioralEpochs,
'BehavioralEvents': gui_modules.forms_behavior.GroupBehavioralEvents,
'BehavioralTimeSeries': gui_modules.forms_behavior.GroupBehavioralTimeSeries,
'PupilTracking': gui_modules.forms_behavior.GroupPupilTracking,
'EyeTracking': gui_modules.forms_behavior.GroupEyeTracking,
'CompassDirection': gui_modules.forms_behavior.GroupCompassDirection,
'Position': gui_modules.forms_behavior.GroupPosition,
# Ogen
'OptogeneticStimulusSite': gui_modules.forms_ogen.GroupOptogeneticStimulusSite,
'OptogeneticSeries': gui_modules.forms_ogen.GroupOptogeneticSeries,
}
# This carries the reference mapping between pynwb groups names and their
# respective pynwb classes.
# It can be updated by extensions at the __init__ of nwb_conversion_gui.py
name_to_pynwb_class = {
'Device': pynwb.device.Device,
# Ophys
'OpticalChannel': pynwb.ophys.OpticalChannel,
'ImagingPlane': pynwb.ophys.ImagingPlane,
'TwoPhotonSeries': pynwb.ophys.TwoPhotonSeries,
'CorrectedImageStack': pynwb.ophys.CorrectedImageStack,
'MotionCorrection': pynwb.ophys.MotionCorrection,
'PlaneSegmentation': pynwb.ophys.PlaneSegmentation,
'ImageSegmentation': pynwb.ophys.ImageSegmentation,
'RoiResponseSeries': pynwb.ophys.RoiResponseSeries,
'DfOverF': pynwb.ophys.DfOverF,
'Fluorescence': pynwb.ophys.Fluorescence,
# Ecephys
'ElectrodeGroup': pynwb.ecephys.ElectrodeGroup,
'ElectricalSeries': pynwb.ecephys.ElectricalSeries,
'SpikeEventSeries': pynwb.ecephys.SpikeEventSeries,
'EventDetection': pynwb.ecephys.EventDetection,
'EventWaveform': pynwb.ecephys.EventWaveform,
'LFP': pynwb.ecephys.LFP,
'FilteredEphys': pynwb.ecephys.FilteredEphys,
'FeatureExtraction': pynwb.ecephys.FeatureExtraction,
# Behavior
'SpatialSeries': pynwb.behavior.SpatialSeries,
'BehavioralEpochs': pynwb.behavior.BehavioralEpochs,
'BehavioralEvents': pynwb.behavior.BehavioralEvents,
'BehavioralTimeSeries': pynwb.behavior.BehavioralTimeSeries,
'PupilTracking': pynwb.behavior.PupilTracking,
'EyeTracking': pynwb.behavior.EyeTracking,
'CompassDirection': pynwb.behavior.CompassDirection,
'Position': pynwb.behavior.Position,
}
| 1.921875 | 2 |
01 - Expressions, variables and assignments/variables-assignment.py | PableraShow/python-exercises | 8 | 12762112 | <filename>01 - Expressions, variables and assignments/variables-assignment.py
# Variables - placeholders for important values
# Assignment
# To store a value in a variable, the '=' sign is used. Any
# type of data can be stored in variables.
int_num = int(3)
float_num = 4.5
answer = 3 * 2
word = "hello"
phrase = 'How are you?'
boolean = True
# Do not use the '==' to assign a value to a variable
#num == 4
#string == "hi"
# The value stored in a variable can be changed by re-assigning
# the variable using the '=' again
num = 4
print "Ex. 1:", num
num = 6
print "Ex. 2:", num
# The type of value stored in a variable can change as well
thing = 9
print "Ex. 3:", thing
thing = "Hi!"
print "Ex. 4:", thing
# Setting two variables equal to each other copies the value
# stored in one and assigns it to the other. However, if
# the one of the variable is changed later, the other one
# will not.
a = 2
b = 3
print "Ex. 5:", a, b
a = b
print "Ex. 6:", a, b
b = 4
print "Ex. 7:", a, b
# Variables must be given a value, or "defined"
# Not valid:
#c =
#c
#c += 4
#c *= 2
# Valid:
c = 4
| 4.46875 | 4 |
main/settings.py | mcXrd/weatherforecast | 0 | 12762113 | from secrets import OPENWEATHER_API_KEY
OPENWEATHER_API_KEY = OPENWEATHER_API_KEY
| 1.125 | 1 |
hknweb/course_surveys/tests/test_index.py | jyxzhang/hknweb | 0 | 12762114 | <filename>hknweb/course_surveys/tests/test_index.py
from django.test import TestCase
from django.urls import reverse
from hknweb.course_surveys.tests.utils import (
create_user_with_course_surveys_edit_permission,
ModelFactory,
)
from hknweb.markdown_pages.models import MarkdownPage
class IndexViewTests(TestCase):
def setUp(self):
create_user_with_course_surveys_edit_permission(self)
def test_returns_200(self):
response = self.client.get(reverse("course_surveys:index"))
self.assertEqual(response.status_code, 200)
def test_cas_signed_in_returns_200(self):
s = self.client.session
s["signed_in"] = True
s.save()
ModelFactory.create_default_rating()
response = self.client.get(reverse("course_surveys:index"))
self.assertEqual(response.status_code, 200)
def test_search_by_instructors_returns_200(self):
s = self.client.session
s["signed_in"] = True
s.save()
ModelFactory.create_default_rating()
response = self.client.get(
reverse("course_surveys:index") + "?search_by=instructors"
)
self.assertEqual(response.status_code, 200)
def test_course_id_present_returns_200(self):
s = self.client.session
s["signed_in"] = True
s.save()
rating = ModelFactory.create_default_rating()
response = self.client.get(
reverse("course_surveys:index")
+ f"?course={rating.rating_survey.survey_icsr.icsr_course.id}"
)
self.assertEqual(response.status_code, 200)
def test_instructor_id_present_returns_200(self):
s = self.client.session
s["signed_in"] = True
s.save()
rating = ModelFactory.create_default_rating()
rating.rating_survey.survey_icsr.is_private = False
rating.rating_survey.survey_icsr.save()
response = self.client.get(
reverse("course_surveys:index")
+ f"?instructor={rating.rating_survey.survey_icsr.icsr_instructor.instructor_id}"
)
self.assertEqual(response.status_code, 200)
def test_instructor_id_present_returns_200(self):
s = self.client.session
s["signed_in"] = True
s.save()
MarkdownPage.objects.create(
name="test_name",
path="course_surveys_authentication",
description="test_description",
body="test_body",
)
response = self.client.get(reverse("course_surveys:index"))
self.assertEqual(response.status_code, 200)
| 2.40625 | 2 |
listview.py | todd-x86/tkplus | 0 | 12762115 | <reponame>todd-x86/tkplus
from control import Control
from containers import ScrollContainer
from ttk import Treeview as TkTreeView
class ListViewColumn(object):
def __init__(self, listview, caption, width):
self._listview = listview
self._caption = caption
self._width = width
self._index = -1
@property
def index(self):
return self._index
@property
def caption(self):
return self._caption
@caption.setter
def caption(self, value):
self._caption = value
key = '#{}'.format(self.index)
self._listview._ctrl.heading(key, text=self._caption)
@property
def width(self):
return self._width
@width.setter
def width(self, value):
self._width = value
key = '#{}'.format(self.index)
self._listview._ctrl.column(key, width=self._width)
class ListViewColumns(object):
def __init__(self, listview):
self._listview = listview
self._items = []
def append(self, item):
return self.add(item)
@property
def items(self):
return map(lambda x: x[1], self._items)
def add(self, item, width=100):
col = ListViewColumn(self._listview, item, width)
self._items.append(col)
self.refresh()
return col
def insert(self, index, item, width=100):
col = ListViewColumn(self._listview, item, width)
self._items.insert(index, col)
self.refresh()
return col
def delete(self, index):
self._items.delete(index)
self.refresh()
def __getitem__(self, index):
return self._items[index]
@property
def count(self):
return len(self._items)
def refresh(self):
self._listview._control_set('columns', ['#{}'.format(j) for j in range(1, len(self._items))])
for index, col in enumerate(self._items):
col._index = index
key = '#{}'.format(index)
self._listview._ctrl.column(key, width=col.width)
self._listview._ctrl.heading(key, text=col.caption)
class ListViewSubItems(object):
def __init__(self, listview, iid):
self._listview = listview
self._iid = iid
self._values = []
def add(self, value):
self._values.append(value)
self.refresh()
def __getitem__(self, index):
return self._values[index]
def __setitem__(self, index, value):
self._values[index] = value
self.refresh()
@property
def count(self):
return len(self._values)
def insert(self, index, value):
self._values.insert(index, value)
self.refresh()
def delete(self, index):
self._values.delete(index)
self.refresh()
def refresh(self):
self._listview._ctrl.item(self._iid, values=self._values)
class ListViewItem(object):
def __init__(self, listview, text, index='end'):
self._listview = listview
self._text = text
self._index = -1
self._iid = self._listview._ctrl.insert('', index, None, text=text)
self._strings = ListViewSubItems(listview, self._iid)
@property
def index(self):
return self._index
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
self._listview._ctrl.item(self._iid, text=value)
@property
def subitems(self):
return self._strings
def delete(self):
self._listview._ctrl.delete(self._iid)
class ListViewItems(object):
def __init__(self, listview):
self._listview = listview
self._items = []
def add(self, item):
row = ListViewItem(self._listview, item)
self._items.append(row)
return row
def __getitem__(self, index):
return self._items[index]
@property
def count(self):
return len(self._items)
def delete(self, index):
self._items[index].delete()
self._items.delete(index)
def insert(self, index, value):
row = ListViewItem(self._listview, item)
self._items.insert(index, row)
return row
class BaseListView(Control):
def __init__(self, parent, **kwargs):
Control.__init__(self, TkTreeView(parent._frame), **kwargs)
self._columns = ListViewColumns(self)
self._items = ListViewItems(self)
@property
def columns(self):
return self._columns
@property
def items(self):
return self._items
class ListView(ScrollContainer):
def __init__(self, parent, **kwargs):
ScrollContainer.__init__(self, parent, **kwargs)
self._init_container(BaseListView(self, **kwargs))
# TODO: fix scrollbar issue with ListView
@property
def columns(self):
return self._container.columns
@property
def items(self):
return self._container.items
| 2.90625 | 3 |
cafebabel/articles/tags/models.py | cafebabel/backlog | 4 | 12762116 | <reponame>cafebabel/backlog<filename>cafebabel/articles/tags/models.py
from http import HTTPStatus
from flask import abort, current_app, url_for
from flask_mongoengine import BaseQuerySet
from mongoengine import signals
from ... import db
from ...core.exceptions import ValidationError
from ...core.helpers import allowed_file, slugify
from ...core.mixins import UploadableImageMixin
class TagQuerySet(BaseQuerySet):
def categories(self, language, **kwargs):
return self.filter(slug__in=current_app.config['CATEGORIES_SLUGS'],
language=language)
def get_or_create(self, **kwargs):
try:
return self.get(**kwargs)
except Tag.DoesNotExist:
return self.create(**kwargs)
class Tag(db.Document, UploadableImageMixin):
name = db.StringField(required=True)
slug = db.StringField(required=True)
language = db.StringField(max_length=2, required=True, unique_with='slug')
summary = db.StringField()
meta = {
'queryset_class': TagQuerySet
}
def __str__(self):
return f'{self.name} ({self.language})'
@classmethod
def update_slug(cls, sender, document, **kwargs):
if not document.slug:
document.slug = slugify(document.name)
@property
def detail_url(self):
return url_for('tags.detail', slug=self.slug, lang=self.language)
@property
def edit_url(self):
return url_for('tags.edit', slug=self.slug, lang=self.language)
@property
def upload_subpath(self):
return 'tags'
@property
def is_category(self):
return self.name.lower() in current_app.config['CATEGORIES_SLUGS']
def save_from_request(self, request):
data = request.form.to_dict()
files = request.files
if 'name' in data or 'language' in data:
abort(HTTPStatus.BAD_REQUEST)
for field, value in data.items():
setattr(self, field, value)
if data.get('delete-image'):
self.delete_image()
image = files.get('image')
if image:
if image.filename == '':
raise ValidationError('No selected file.')
if not allowed_file(image.filename):
raise ValidationError('Unallowed extension.')
self.attach_image(image)
return self.save()
@classmethod
def clean_name(cls, sender, document, **kwargs):
document.name = document.name.strip()
signals.pre_save.connect(Tag.clean_name, sender=Tag)
signals.pre_save.connect(Tag.update_slug, sender=Tag)
signals.post_save.connect(Tag.store_image, sender=Tag)
signals.pre_delete.connect(Tag.delete_image_file, sender=Tag)
| 2.125 | 2 |
out.py | Arka7Z/NDL | 0 | 12762117 | <filename>out.py
from tensorflow.contrib.layers import fully_connected
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.preprocessing import Imputer
from sklearn import cross_validation
'''Reading and preparing the data'''
data=pd.read_csv('Data4.csv')
species= list(data['Class'].unique())
data['One-hot'] = data['Class'].map(lambda x: np.eye(len(species))[species.index(x)] )
#shuffling the by default sorted data
data=data.iloc[np.random.permutation(len(data))]
data=data.reset_index(drop=True)
#train-test splitting :n_test=100 is taken
keys=data.columns.values.tolist()
keys.pop()
keys.pop()
data.drop('Class',axis=1,inplace=True)
iris_matrix = pd.DataFrame.as_matrix(data[keys])
X = iris_matrix
X=Imputer().fit_transform(X)
Y = data['One-hot']
validation_size=0.20
seed=12
X_train, X_validation, Y_train, Y_validation = cross_validation.train_test_split(X, Y, test_size=validation_size, random_state=seed)
'''preprocessing completed'''
'''defining the RNN Network'''
n_steps = 8 #INTUITION: Since there are 8 threads,the whole of each thread individually may be considered as an input at each time step
n_inputs = 6 #The 6 characterestics present for each thread
n_neurons = 87 #Number of neurons in each layer
n_outputs = 4 # 4 way classification
n_layers=3 # Number of layers
learning_rate = 0.0001
x= tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None,n_outputs])
basic_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=n_neurons) #creating a single LSTM Cell
multi_layer_cell=tf.nn.rnn_cell.MultiRNNCell([basic_cell]*n_layers) #creating 'n_layers' layers of the basic cell
outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, x, dtype=tf.float32) #getting the output sequence for input X
outputs=tf.unstack(tf.transpose(outputs,perm=[1,0,2])) #this and the next line takes the last output of the output seq
in2full=outputs[-1]
logits = fully_connected(in2full, n_outputs, activation_fn=None) #getting the logits(shape=[batch_size,3]) based on input X
cross_entropy=tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y) #cross entropy error is takes as the error
loss=tf.reduce_mean(cross_entropy)
optimizer=tf.train.GradientDescentOptimizer(0.0001).minimize(cross_entropy) #Gradient Descent to minimize the entropy,alpha=0.0001
correct=tf.equal(tf.argmax(logits,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct,tf.float32))
'''Running the Session'''
init = tf.global_variables_initializer()
n_epochs = 5000
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
x_batch=X_train
x_batch=x_batch.reshape(-1,n_steps,n_inputs)
x_test=X_validation
x_test=x_test.reshape(-1,n_steps,n_inputs)
y_batch=[t for t in Y_train]
y_test=[t for t in Y_validation]
sess.run(optimizer, feed_dict={x: x_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={x: x_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={x: x_test, y: y_test})
print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test)
acc_test = accuracy.eval(feed_dict={x: x_test, y: y_test})
print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test)
| 2.921875 | 3 |
ex011.py | lhardt/PythonCourse | 0 | 12762118 | <reponame>lhardt/PythonCourse
# Fala um programa que leia a largura e altura
# de uma parede, em metros, e calcule a sua área,
# bem como a quantidade de tinta necessária para
# pintar, supondo que 1L rende 2m^2 de parede.
altura = float(input('Altura da parede: '))
largura = float(input('Largura da parede: '))
area = altura * largura
tinta = area / 2
print('A parede tem {} m^2, e usaria {} L de tinta'.format(
area, tinta
))
| 4.03125 | 4 |
netgpibdata/R9211.py | daccordeon/summerSHG | 1 | 12762119 | <filename>netgpibdata/R9211.py
"""
Provides data access to Advantest R9211 servo analyzer
"""
import re
import netgpib
import numpy as np
import struct
import sys
import time
class R9211:
"""
A class to represent an R9211 servo analyzer
"""
def __init__(self, ip, gpibAddr=8):
self.dev = netgpib.netGPIB(ip, gpibAddr, auto=False)
self.dev.query('active?')
# self.dev.query('active?')
self.dev.command('hed0')
time.sleep(0.1)
def getdata(self, disp=[1], verbose=False, binary=False):
"""
Download data from R9211
"""
if binary:
# Set the format to 64bit float big-endian
self.dev.command('fmt2')
self.dev.command('hed0')
time.sleep(0.1)
stride = 8 # 64bit is 8 bytes
else:
self.dev.command('fmt0') # ASCII Mode
time.sleep(0.1)
data = []
hdr = []
for dispID in disp:
if verbose:
print(('Downloading data from display ' + str(dispID)))
# set the display to read
self.dev.command('sel' + str(dispID))
time.sleep(0.1)
# First, read from X axis
self.dev.command('selxy1')
time.sleep(0.1)
# Get data length
numPoint = int(self.dev.query('reqdtn')[:-2])
# Get data
if binary:
if verbose:
print('Downloading X axis data ')
self.dev.debug = 1
x = self.dev.query('reqdt', stride * numPoint)
else:
self.dev.command('hed1')
time.sleep(0.1)
if verbose:
print('Downloading X axis data ')
self.dev.debug = 1
x = self.dev.query('reqdt', 12 * numPoint + 10)
self.dev.debug = 0
# self.dev.query('selxy?');
self.dev.command('hed0')
time.sleep(0.1)
# Next, read from Y axis
self.dev.command('selxy0')
time.sleep(0.1)
# Get data
if binary:
if verbose:
print('Downloading Y axis data ')
self.dev.debug = 1
y = self.dev.query('reqdt', stride * numPoint)
else:
self.dev.command('hed1')
time.sleep(0.1)
if verbose:
print('Downloading Y axis data ')
self.dev.debug = 1
y = self.dev.query('reqdt', 12 * numPoint + 10)
self.dev.debug = 0
# self.dev.query('selxy?');
self.dev.command('hed0')
time.sleep(0.1)
if binary:
# Unpack the binary data
x = np.array(struct.unpack('>' + str(numPoint) + 'd', x))
y = np.array(struct.unpack('>' + str(numPoint) + 'd', y))
(Chan, xDataType, xUnit, yDataType, yUnit) = (
False, False, False, False, False)
else:
head = x[0:6] # Extract the header
# decode header
(xDataType, Chan, xUnit) = self.decodeHeader(head)
# Convert the string data into numpy array
x = [np.float(a) for a in x[7:-2].split(',')]
head = y[0:6] # Extract the header
# decode header
(yDataType, Chan, yUnit) = self.decodeHeader(head)
# Convert the string data into numpy array
y = [np.float(a) for a in y[7:-2].split(',')]
data.append((x, y))
hdr.append({'Chan': Chan, 'xDataType': xDataType, 'xUnit': xUnit,
'yDataType': yDataType, 'yUnit': yUnit})
return (data, hdr)
# {{{
def getparams(self, disp=[1], verbose=False):
"""
Download measurement parameters.
"""
self.dev.command('hed0')
time.sleep(0.1)
if verbose:
print('Reading parameters ', end=' ')
sys.stdout.flush()
# {{{ Common parameters
MEAS = {'0': 'Waveform',
'1': 'Specrum',
'2': 'Time-Frequency',
'3': 'FRF',
'4': 'Servo'}[self.dev.query('MEAS?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
FUNC = {'0': 'Time',
'1': 'Auto Correlation',
'2': 'Cross Correlation',
'3': 'Auto Correlation',
'4': 'Power Spectrum',
'5': 'Cross Spectrum',
'6': 'Complex Spectrum',
'10': 'FRF'
}[self.dev.query('FUNC?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
ACTIVE = {'0': 'ChA',
'1': 'ChB',
'3': 'ChA-ChB'
}[self.dev.query('ACTIVE?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
HISTP = self.dev.query('HISTP?')[:-2]
if verbose:
print('.', end=' ')
sys.stdout.flush()
SENSA = {'0': 'MAN',
'1': 'AUTO',
}[self.dev.query('SENSA?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
SENSB = {'0': 'MAN',
'1': 'AUTO',
}[self.dev.query('SENSB?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
SENSADV = self.dev.query('SENSADV?')[:-2] + "dBV"
if verbose:
print('.', end=' ')
sys.stdout.flush()
SENSBDV = self.dev.query('SENSBDV?')[:-2] + "dBV"
if verbose:
print('.', end=' ')
sys.stdout.flush()
ACOUPLE = {'0': 'AC',
'1': 'DC',
}[self.dev.query('ACOUPLE?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
BCOUPLE = {'0': 'AC',
'1': 'DC',
}[self.dev.query('BCOUPLE?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
FRANGE = self.dev.query('FRANGE?')[:-2] + "Hz"
if verbose:
print('.', end=' ')
sys.stdout.flush()
FILTER = {'0': 'OFF',
'1': 'ON',
}[self.dev.query('FILTER?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
ZOOM = {'0': 'Zero start',
'1': 'Zoom On',
}[self.dev.query('ZOOM?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
LWBAND = self.dev.query('LWBAND?')[:-2] + "Hz"
if verbose:
print('.', end=' ')
sys.stdout.flush()
UPBAND = self.dev.query('UPBAND?')[:-2] + "Hz"
if verbose:
print('.', end=' ')
sys.stdout.flush()
WINDOWA = {'1': 'Rect',
'2': 'Hanning',
'3': 'Minimum',
'4': 'Flat-pass',
'5': 'Force',
'6': 'Respons'
}[self.dev.query('WINDOWA?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
WINDOWB = {'1': 'Rect',
'2': 'Hanning',
'3': 'Minimum',
'4': 'Flat-pass',
'5': 'Force',
'6': 'Respons'
}[self.dev.query('WINDOWB?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
WEIGHT = {'0': 'No-Weight',
'1': 'A-WGT',
'2': 'B-WGT',
'3': 'C-WGT',
'4': 'C-MES-WGT'
}[self.dev.query('WEIGHT?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
AVGNO = self.dev.query('AVGNO?')[:-2]
if verbose:
print('.', end=' ')
sys.stdout.flush()
AVGLIMIT = self.dev.query('AVGLIMIT?')[:-2]
if verbose:
print('.', end=' ')
sys.stdout.flush()
AVGMODE = {'1': 'Sum',
'2': 'Exp',
'3': 'Peak',
'4': 'Sub'
}[self.dev.query('AVGMODE?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
FREQRES = {'0': 'Lin f',
'1': 'Log f',
'2': '1/3 Oct f',
'3': '1/1 Oct f'
}[self.dev.query('FREQRES?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
LINESPAN = self.dev.query('LINESPAN?')[:-2]
if verbose:
print('.', end=' ')
sys.stdout.flush()
# }}}
cparams = {'Measurement': MEAS, 'Function': FUNC, 'Active channels': ACTIVE,
'Histogram points': HISTP, 'ChA sensitivity': SENSADV, 'ChB sensitivity': SENSBDV,
'ChA Range': SENSA, 'ChB Range': SENSB, 'ChA Coupling': ACOUPLE, 'ChB Coupling': BCOUPLE,
'Freq Range': FRANGE, 'Input Filter': FILTER, 'Zoom': ZOOM, 'Start Frequency': LWBAND,
'Stop Frequency': UPBAND, 'ChA Window': WINDOWA, 'ChB Window': WINDOWB,
'Weight': WEIGHT, 'Average number': AVGNO, 'Average limit': AVGLIMIT,
'Averaging Mode': AVGMODE, 'Frequency Scale': FREQRES, 'Line Span': LINESPAN}
# {{{ Display specific parameters
dparams = {}
for i in disp:
# Select a display
self.dev.query('sel' + str(i))
if verbose:
print('.', end=' ')
sys.stdout.flush()
dparams['Disp' + str(i) + ' View defined in'] = {
'0': 'Normal',
'1': 'Memory',
'2': 'Math',
'3': 'T-F',
'4': 'Curve fit',
'5': 'Synsesis'
}[self.dev.query('VDEFIN?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
dparams['Disp' + str(i) + ' View type'] = {
'2': 'Time Series',
'7': 'Auto Correlation',
'8': 'Cross Correlation',
'9': 'Impulse Response',
'10': 'Step Response',
'11': 'Cepstrum',
'12': 'Histogram',
'14': 'Complex Spectrum',
'15': 'Power Spectrum',
'24': 'Cross Spectrum',
'29': 'Hxy',
'32': 'Coherence',
'35': 'T-F Gxx(f) Sum(Gxx(f))',
'36': 'T-F f-Peak',
'37': 'T-F Real Imag Phase'
}[self.dev.query('VTYPE?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
dparams['Disp' + str(i) + ' Channel'] = {
'0': 'A',
'1': 'B',
'65': 'A&B'
}[self.dev.query('VCHNL?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
dparams['Disp' + str(i) + ' View Type'] = {
'0': 'Instant',
'1': 'Averaged'
}[self.dev.query('VDSW?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
dparams['Disp' + str(i) + ' X coordinate'] = {
'0': 'Lin',
'1': 'Log',
'2': '1/3 Oct',
'3': '1/1 Oct'
}[self.dev.query('VXCORD?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
dparams['Disp' + str(i) + ' X coordinate'] = {
'0': 'Lin',
'1': 'Log',
'2': '1/3 Oct',
'3': '1/1 Oct'
}[self.dev.query('VXCORD?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
dparams['Disp' + str(i) + ' Y coordinate'] = {
'0': 'Real',
'1': 'Imag',
'2': 'Mag',
'3': 'Mag2',
'4': 'dBMag',
'5': 'Phase',
'6': '-Phase',
'7': 'Group Delay',
'8': 'Nyquest/Orbit',
'9': 'Cole-Cole',
'11': 'Nichols'
}[self.dev.query('VYCORD?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
dparams['Disp' + str(i) + ' T-F data'] = {
'-1': 'None',
'0': 'Gxx(f)',
'1': 'Sum(Gxx(f))',
'2': 'Real',
'3': 'Imag',
'4': 'Phase',
'5': 'f Peak'
}[self.dev.query('TFDATA?')[:-2]]
if verbose:
print('.', end=' ')
sys.stdout.flush()
# }}}
return (cparams, dparams)
# }}}
def decodeHeader(self, hdr):
"""
Decode a data header
"""
dtypes = {'TIM': 'Time series',
'ACR': 'Auto correlation',
'CCR': 'Cross correlation',
'HST': 'Histogram',
'SPC': 'Spectrum',
'CSP': 'Cross spectrun',
'FRF': 'Transfer function',
'COH': 'Coherence',
'IMR': 'Impulse response',
'COP': 'COP',
'SNR': 'SNR',
'CEP': 'Cepstrum',
'OCT': '1/3 Octave',
'OCO': '1/1 Octave',
'CLK': 'Time',
'FRQ': 'Frequency',
'AMP': 'Amplitude',
'LAG': 'Time lag',
'CEF': 'Quefrency'}
units = {'__': '',
'_S': 'sec',
'HZ': 'Hz',
'_V': 'V',
'DG': 'deg',
'PC': '%',
'DB': 'dB',
'DV': 'dBV',
'VZ': 'V/rtHz',
'DH': 'dBV/rtHz',
'EU': 'EU',
'DE': 'dBEU'}
return (dtypes[hdr[0:3]], hdr[3:4], units[hdr[4:6]])
def saveData(self, dataFile, data):
# A list to hold the length of each display data
k = np.array([len(a[0]) for a in data])
# If all the displays have the same data length (true for most cases)
if (k == k[0]).prod():
# Multi column file
mcol = True
for i in range(k[0]):
for j in range(len(k)): # Loop through displays
dataFile.write(
np.str(
data[j][0][i]) +
"," +
np.str(
data[j][1][i]))
if j != (len(k) - 1):
dataFile.write(",")
else:
dataFile.write("\n")
else:
# Two column file
mcol = False
for j in range(len(k)): # Loop through displays
dataFile.write("Disp" + str(disp[j]) + "\n")
for i in range(k[j]):
dataFile.write(
np.str(
data[j][0][i]) +
"," +
np.str(
data[j][1][i]) +
"\n")
return mcol
def saveParam(self, paramFile, cparams, dparams, hdr, mcol, disp):
# Write to the parameter file
paramFile.write("------------------------------------------\n")
paramFile.write(
"R9211 servo analyzer parameter file" +
time.ctime() +
"\n")
paramFile.write("------------------------------------------\n")
paramFile.write("\n")
paramFile.write("Data file format: ")
if mcol:
msg =\
"""Multi-colum style
Each row has the following format
Disp1X, Disp1Y, Disp2X, Disp2Y, ...
"""
else:
msg =\
"""Two column format
Each row has the following format:
X,Y
Since the data lengths of the displays are different, data from
each display is written one after another.
A line with "Disp1", "Disp2", "Disp3" or "Disp4" appears to separate
each display's data.
"""
paramFile.write(msg)
for i in range(len(disp)):
paramFile.write("Display " + str(disp[i]) + ":\n")
paramFile.write("Channel: " + hdr[i]['Chan'] + "\n")
paramFile.write(
"X axis: " +
hdr[i]['xDataType'] +
"[" +
hdr[i]['xUnit'] +
"]\n")
paramFile.write(
"Y axis: " +
hdr[i]['yDataType'] +
"[" +
hdr[i]['yUnit'] +
"]\n")
paramFile.write("\n")
[paramFile.write(key + ": " + cparams[key] + "\n")
for key in sorted(cparams.keys())]
[paramFile.write(key + ": " + dparams[key] + "\n")
for key in sorted(dparams.keys())]
def query(self, string, buf=100, sleep=0):
return self.dev.query(string, buf, sleep)
def command(self, string, sleep=0):
self.dev.command(string, sleep)
def spoll(self):
return self.dev.spoll()
def isAveraging(self):
"""Returns True if the device is averaging"""
s = int(self.dev.spoll()) # Status byte
# The third bit of the status byte is 1 when averaging is complete
return not (s & 0b100)
def waitAvg(self, interval=0.2, timeOut=False):
"""Wait until the averaging completes"""
startTime = time.time()
while self.isAveraging():
time.sleep(interval)
if timeOut:
if time.time() - startTime > timeOut:
break
def close(self):
"""
Close the connection to R9211
"""
self.dev.close()
| 2.875 | 3 |
core/probe_node.py | seanrivera/rosploit | 7 | 12762120 | <reponame>seanrivera/rosploit<filename>core/probe_node.py
#!/usr/bin/python3
import argparse
import xmlrpc.client
from core.node import Node
def probe_node(node: Node):
"""
This is an information gathering function. It calls the getBusInfo function and parses all the info
into a more usable form
"""
node_id = '/rosnode'
topic_list = []
with xmlrpc.client.ServerProxy("http://" + node.ip_addr + ":" + node.port) as proxy:
topic_info = proxy.getBusInfo(node_id)
node_name = proxy.getName(node_id)
if topic_info[0] == 1 and node_name[0] == 1:
print("Successfully got the bus info")
print(node_name[2])
for topic in topic_info[2]:
print(topic)
topic_list.append(topic[4])
print(topic[4])
return node_name[2], topic_list, topic_info[2]
else:
print("Got an error message with the command. " + topic_info[1] + topic_info[2])
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Get the information about a given node (Name/Topics publisher of/Topics subscriber to)')
parser.add_argument('-a', '--address', help="Address of the ROS node you want info on", required=True)
parser.add_argument('-p', '--port', help="Port of the ROS node you want info on", required=True)
args = parser.parse_args()
cur_node = Node(ip_addr=args.address, port=args.port)
nodeInfo = probe_node(cur_node)
print(nodeInfo)
| 3.15625 | 3 |
js2json.py | ppetoumenos/election_data_gr | 0 | 12762121 | <reponame>ppetoumenos/election_data_gr<gh_stars>0
#lifted from http://stackoverflow.com/questions/3601864/python-load-text-as-python-object/3602436#3602436
# Author: <NAME>
import json
import codecs
from pyparsing import (Suppress, Regex, quotedString, Word, alphas,
alphanums, oneOf, Forward, Optional, dictOf, delimitedList, Group, removeQuotes)
def transform(txt):
idx1 = txt.find('[')
idx2 = txt.find('{')
if idx1 < idx2 and idx1 > 0:
txt = txt[idx1:txt.rfind(']')+1]
elif idx2 < idx1 and idx2 > 0:
txt = txt[idx2:txt.rfind('}')+1]
try:
json.loads(txt)
except:
# parse dict-like syntax
LBRACK,RBRACK,LBRACE,RBRACE,COLON,COMMA = map(Suppress,"[]{}:,")
integer = Regex(r"[+-]?\d+").setParseAction(lambda t:int(t[0]))
real = Regex(r"[+-]?\d+\.\d*").setParseAction(lambda t:float(t[0]))
string_ = Word(alphas,alphanums+"_") | quotedString.setParseAction(removeQuotes)
bool_ = oneOf("true false").setParseAction(lambda t: t[0]=="true")
item = Forward()
key = string_
dict_ = LBRACE - Optional(dictOf(key+COLON, item+Optional(COMMA))) + RBRACE
list_ = LBRACK - Optional(delimitedList(item)) + RBRACK
item << (real | integer | string_ | bool_ | Group(list_ | dict_ ))
result = item.parseString(txt,parseAll=True)[0]
print result
txt = result
return txt
| 2.5 | 2 |
2-control-flow/sals-shipping/shipping.py | elenaidon/learn-python | 0 | 12762122 | # Sal's Shipping
# <NAME>.
weight = 41.5
GS_FLAT = 20
GSP_FLAT = 125
# Basic Scale Shipping
def basic_shipping(weight):
if weight <= 2:
cost = weight * 1.50
elif weight <= 6:
cost = weight * 3
elif weight <=10:
cost = weight * 4
else:
cost = weight * 4.75
return cost
# Ground Shipping
def ground_shipping(weight):
cost = basic_shipping(weight)
return cost + GS_FLAT
print("Ground Shipping:", ground_shipping(weight))
# Ground Shipping Premium
print("Ground Shipping Premium:", GSP_FLAT)
# Drone Shipping
def drone_shipping(weight):
cost = basic_shipping(weight)
return cost * 3
print("Drone Shipping:", drone_shipping(weight))
| 3.359375 | 3 |
tests/old_tests/classification.py | xaviermouy/ecosound | 3 | 12762123 | <filename>tests/old_tests/classification.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 12:24:25 2020
@author: xavier.mouy
"""
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from ecosound.core.measurement import Measurement
from ecosound.classification.CrossValidation import StratifiedGroupKFold
from ecosound.classification.CrossValidation import RepeatedStratifiedGroupKFold
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import copy
import pickle
import os
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelEncoder
from sklearn import metrics
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.combine import SMOTETomek
from imblearn.under_sampling import TomekLinks
from datetime import datetime
def add_class_ID(fulldataset, positive_class_label):
labels = list(set(fulldataset['label_class']))
# force positive class to be in position 1
labels.remove(positive_class_label)
labels.insert(1,positive_class_label)
# assign class ID (integer with 1 being the positive class)
IDs = [*range(0,len(labels))]
fulldataset.insert(0, 'class_ID', -1)
for n, label in enumerate(labels):
mask = fulldataset['label_class'] == label
fulldataset.loc[mask,'class_ID'] = IDs[n]
class_encoder = pd.DataFrame({'label':labels, 'ID': IDs})
return fulldataset, class_encoder
def add_subclass(fulldataset):
# subclass for split = label_class + deployment_ID
fulldataset.insert(0, 'subclass_label', fulldataset['label_class'] + '__' + fulldataset['deployment_ID'])
labels = list(set(fulldataset['subclass_label']))
IDs = [*range(0,len(labels))]
fulldataset.insert(0,'subclass_ID', -1)
for n, label in enumerate(labels):
fulldataset.loc[fulldataset['subclass_label'] == label, 'subclass_ID'] = IDs[n]
class_encoder = pd.DataFrame({'label':labels, 'ID': IDs})
return fulldataset, class_encoder
def subclass2class_conversion(fulldataset):
subclass_labels = list(set(fulldataset['subclass_label']))
subclass_IDs = [*range(0,len(subclass_labels))]
class_labels = []
class_IDs = []
for n, subclass_label in enumerate(subclass_labels):
idx = fulldataset.index[fulldataset['subclass_label'] == subclass_labels[n]].tolist()
class_labels.append(fulldataset.iloc[idx[0]]['label_class'])
class_IDs.append(fulldataset.iloc[idx[0]]['class_ID'])
class_encoder = pd.DataFrame({'subclass_labels': subclass_labels, 'subclass_ID': subclass_IDs, 'class_labels': class_labels, 'class_IDs': class_IDs})
return class_encoder
def add_group(fulldataset):
# # groups for splits = label_class + dateHour + deployment_ID
fulldataset.insert(0,'TimeLabel',fulldataset['time_min_date'].dt.round("H").apply(lambda x: x.strftime('%Y%m%d%H%M%S')))
# subclass for split = label_class + deployment_ID
fulldataset.insert(0,'group_label',fulldataset['label_class'] + '_' + fulldataset['TimeLabel'] + '_' + fulldataset['deployment_ID'])
labels = list(set(fulldataset['group_label']))
IDs = [*range(0,len(labels))]
fulldataset.insert(0,'group_ID', -1)
for n, label in enumerate(labels):
fulldataset.loc[fulldataset['group_label'] == label, 'group_ID'] = IDs[n]
encoder = pd.DataFrame({'label':labels, 'ID': IDs})
fulldataset.drop(columns = ['TimeLabel'])
return fulldataset, encoder
def plot_dataset_distrib(dataset,attr_list=['subclass_label'], title=None):
title = title + ' (' + str(len(dataset)) + ' data points)'
nb_plots=len(attr_list)
fig, ax = plt.subplots(1, nb_plots,
sharey=False,
constrained_layout=True,)
for i in range(0,nb_plots):
if nb_plots == 1:
current_ax = ax
else:
current_ax = ax[i]
#distrib = data_train.groupby(attr_list[i])[attr_list[i]].count().to_frame()
distrib = dataset.groupby(attr_list[i])[attr_list[i]].count().to_frame()
distrib['pct']= distrib[attr_list[i]]/ sum(distrib[attr_list[i]])*100
#current_ax.bar(distrib.index,distrib['pct'], color='bkrgymc')
current_ax.bar(distrib.index,distrib['pct'])
current_ax.set_ylabel('Distribution of data points (%)')
current_ax.set_title(attr_list[i])
current_ax.grid()
current_ax.tick_params(labelrotation=90 )
#plt.xticks()
fig.suptitle(title, fontsize=12)
def plot_datasets_groups(data_train, data_test, show=True):
train_groups = list(set(data_train['group_ID']))
test_groups = list(set(data_test['group_ID']))
groups_intersection = list(set(train_groups) & set(test_groups))
if show:
plt.figure()
#plt.bar(['Train set','Test set'],[len(train_groups),len(test_groups)],color='bkrgymc')
plt.bar(['Train set','Test set'],[len(train_groups),len(test_groups)])
plt.ylabel('Number of unique groups')
plt.grid()
plt.title('Number of shared groups: ' + str(len(groups_intersection)))
return groups_intersection
def plot_datasets_distrib(data_train, data_test):
ntrain = len(data_train)
ntest = len(data_test)
ntotal = ntrain + ntest
ntrain = (ntrain/ntotal)*100
ntest = (ntest/ntotal)*100
plt.figure()
#plt.bar(['Train set','Test set'],[ntrain,ntest], color='bkrgymc')
plt.bar(['Train set','Test set'],[ntrain,ntest])
plt.ylabel('% of data points')
plt.title('Train/test sets data repartition')
plt.grid()
def calc_tp_fp_fn_tn(Y_true,Y_prob,threshold):
# init
tp = np.zeros(len(Y_prob))
fp = np.zeros(len(Y_prob))
fn = np.zeros(len(Y_prob))
tn = np.zeros(len(Y_prob))
# thresholding
Y_pred = np.zeros(len(Y_prob))
Y_pred[Y_prob>=threshold] = 1
idx=-1
for true, pred in zip(Y_true, Y_pred):
idx+=1
if (true == 1) & (pred == 1): # true positive
tp[idx]=1
elif (true == 0) & (pred == 1): # false positive
fp[idx]=1
elif (true == 1) & (pred == 0): # false negative
fn[idx]=1
elif (true == 0) & (pred == 0): # true negative
tn[idx]=1
return tp, fp, fn, tn
def calc_performance_metrics(Y_true,Y_prob,thresholds=np.arange(0,1.01,0.01)):
n = len(thresholds)
precision = np.zeros(n)
recall = np.zeros(n)
f1_score = np.zeros(n)
AUC_PR=0
AUC_f1=0
for idx, threshold in enumerate(thresholds):
tp, fp, fn, tn = calc_tp_fp_fn_tn(Y_true,Y_prob,threshold=threshold)
tp_tot = sum(tp)
fp_tot = sum(fp)
fn_tot = sum(fn)
if (tp_tot == 0) | (fp_tot == 0):
precision[idx] = np.nan
else:
precision[idx] = tp_tot /(tp_tot + fp_tot)
recall[idx] = tp_tot /(tp_tot + fn_tot)
f1_score[idx] = (2*precision[idx]*recall[idx]) / (precision[idx]+recall[idx])
AUC_PR = metrics.auc(recall, precision)
AUC_f1 = metrics.auc(thresholds, f1_score)
out = pd.DataFrame({'thresholds': thresholds,'precision':precision,'recall':recall,'f1-score':f1_score})
#out['AUC-PR'] = AUC_PR
#out['AUC-f1'] = AUC_f1
return out
def cross_validation(data_train, models, features, cv_splits=10,cv_repeats=10, rebalance=True):
cv_predictions = pd.DataFrame({'CV_iter':[],'classifier':[],'uuid':[],'Y_true':[],'Y_pred':[],'Y_prob':[]})
cv_performance = pd.DataFrame({'CV_iter':[],'classifier':[],'precision':[],'recall':[],'f1-score':[],'thresholds':[]})
skf = RepeatedStratifiedGroupKFold(n_splits=cv_splits, n_repeats=cv_repeats, random_state=None)
it=-1
for cv_train_index, cv_val_index in skf.split(data_train, data_train['subclass_ID'],groups=data_train['group_ID']):
it+=1
# Split data train vs validation
cv_data_train, cv_data_val = data_train.iloc[cv_train_index], data_train.iloc[cv_val_index]
groups_intersection = plot_datasets_groups(cv_data_train, cv_data_val, show=False)
# CV summary counts
distrib_train = cv_data_train.groupby('label_class')['label_class'].count().to_frame()
distrib_train.rename(columns={'label_class':'train'}, inplace=True)
distrib_val = cv_data_val.groupby('label_class')['label_class'].count().to_frame()
distrib_val.rename(columns={'label_class':'Validation'}, inplace=True)
cv_summary = pd.concat([distrib_train, distrib_val],axis=1)
# display CV info
print(' ')
print(' ')
print('Cross validation #', str(it) + ' ---------------------------------------')
print(cv_summary)
print('Intersecting groups:' + str(len(groups_intersection)))
#plot_dataset_distrib(cv_data_train,attr_list=['subclass_label','label_class'],title='Training set (CV #' + str(it) +')' )
#plot_dataset_distrib(cv_data_val,attr_list=['subclass_label','label_class'],title='Evaluation set (CV #' + str(it) +')' )
# reformat data
X_train = cv_data_train[features] # features
Y_train = cv_data_train['class_ID'] #labels
X_val = cv_data_val[features] # features
Y_val = cv_data_val['class_ID'] #labels
Y_uuid = cv_data_val['uuid']
# feature normalization
Norm_mean = X_train.mean()
Norn_std = X_train.std()
X_train = (X_train-Norm_mean)/Norn_std
X_val = (X_val-Norm_mean)/Norn_std
print(' Positive training samples:', sum(Y_train == 1))
print(' Negative training samples:', sum(Y_train == 0))
if rebalance:
# Making balanced dataset by oversampling
print('Resampling training set with SMOTE + TomekLinks')
#oversample = SMOTE(sampling_strategy='minority')
#X_train, Y_train = oversample.fit_resample(X_train, Y_train)
#undersample = RandomUnderSampler(sampling_strategy=0.5)
#X_train, Y_train = undersample.fit_resample(X_train, Y_train)
resample = SMOTETomek(tomek=TomekLinks(sampling_strategy='majority'))
X_train, Y_train = resample.fit_resample(X_train, Y_train)
print(' Positive samples:', sum(Y_train == 1))
print(' Negatice samples:', sum(Y_train == 0))
# Train and predict
print('Classifiers:')
for model_name, model in models:
print('-> ' + model_name)
# train model
model.fit(X_train, Y_train)
# predict
pred_class = model.predict(X_val)
pred_prob = model.predict_proba(X_val)
# stack prediction info
tmp = pd.DataFrame({'CV_iter':[],'classifier':[],'uuid':[],'Y_true':[],'Y_pred':[],'Y_prob':[]})
tmp['uuid']= cv_data_val['uuid']
tmp['CV_iter'] = it
tmp['classifier'] = model_name
tmp['Y_true'] = Y_val
tmp['Y_pred'] = pred_class
tmp['Y_prob'] = pred_prob[:,1]
cv_predictions = pd.concat([cv_predictions,tmp],ignore_index=True)
# calculate performance metrics
performance = calc_performance_metrics(Y_val.values,pred_prob[:,1])
performance['classifier'] = model_name
performance['CV_iter'] = it
cv_performance = pd.concat([cv_performance,performance],ignore_index=True)
return cv_predictions, cv_performance
def summarize_performance(cv_performance, threshold=0.5):
# evaluate predictions
summary = pd.DataFrame({'Classifier':[],'Precision (mean)':[],'Precision (std)':[],'Recall (mean)':[],'Recall (std)':[],'f1-score (mean)':[],'f1-score (std)':[]})
# plot PR curves
classifiers = list(set(cv_performance['classifier']))
cv_iterations = list(set(cv_performance['CV_iter']))
for classifier in classifiers:
temp_classif = cv_performance[cv_performance['classifier']==classifier]
temp_classif = temp_classif[temp_classif['thresholds']==threshold]
p_mean = round(temp_classif['precision'].mean(),3)
p_std = round(temp_classif['precision'].std(),3)
r_mean = round(temp_classif['recall'].mean(),3)
r_std = round(temp_classif['recall'].std(),3)
f_mean = round(temp_classif['f1-score'].mean(),3)
f_std = round(temp_classif['f1-score'].std(),3)
tmp = pd.DataFrame({'Classifier': [classifier],'Precision (mean)': [p_mean],'Precision (std)':[p_std],'Recall (mean)':[r_mean],'Recall (std)':[r_std],'f1-score (mean)':[f_mean],'f1-score (std)':[f_std]})
summary = pd.concat([summary, tmp], ignore_index=True)
return summary.T
def plot_PR_curves(cv_performance):
# plot PR curves
classifiers = list(set(cv_performance['classifier']))
fig, ax = plt.subplots(1, 1,
sharey=False,
constrained_layout=True,)
for classifier in classifiers:
temp = cv_performance[cv_performance['classifier']==classifier]
temp2 = temp.groupby(['thresholds']).mean()
ax.plot(temp2['recall'],temp2['precision'], label=classifier)
ax.set_ylabel('Precision')
ax.set_xlabel('Recall')
ax.set_title('Average Precision and Recall curve')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.grid()
ax.legend()
def plot_F_curves(cv_performance):
# plot PR curves
classifiers = list(set(cv_performance['classifier']))
fig, ax = plt.subplots(1, 1,
sharey=False,
constrained_layout=True,)
for classifier in classifiers:
temp = cv_performance[cv_performance['classifier']==classifier]
temp2 = temp.groupby(['thresholds']).mean()
ax.plot(temp2.index,temp2['f1-score'], label=classifier)
ax.set_ylabel('f1-score')
ax.set_xlabel('Decision threshold')
ax.set_title('Average f1-score curve')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.grid()
ax.legend()
def classification_train(X_train, Y_train, model, rebalance=True):
if rebalance:
# Making balanced dataset by oversampling
print('Resampling training set with SMOTE + TomekLinks')
resample = SMOTETomek(tomek=TomekLinks(sampling_strategy='majority'))
X_train, Y_train = resample.fit_resample(X_train, Y_train)
print(' Positive training samples:', sum(Y_train == 1))
print(' Negative training samples:', sum(Y_train == 0))
model_trained = model.fit(X_train, Y_train)
return model_trained
def classification_predict(X_test, model_trained):
pred_class = model_trained.predict(X_test)
pred_prob = model_trained.predict_proba(X_test)
return pred_class, pred_prob[:,1]
def plot_2d_space(X, y, label='Classes'):
plt.figure()
colors = ['#1F77B4', '#FF7F0E']
markers = ['o', '.']
for l, c, m in zip(np.unique(y), colors, markers):
plt.scatter(
X[y==l, 0],
X[y==l, 1],
c=c, label=l, alpha = 0.5, marker=m
)
plt.title(label)
plt.legend(loc='upper right')
plt.show()
def main():
# input arguments
input_args = dict()
input_args['positive_class_label'] ='FS'
input_args['train_ratio'] = 0.75
input_args['cv_splits'] = 10 #5
input_args['cv_repeats'] = 1
input_args['rebalance_classes'] = True
#input_args['data_file']= r'C:\Users\xavier.mouy\Documents\PhD\Projects\Detector\results\dataset_FS-NN_modified_20201105145300.nc'
input_args['data_file']= r'C:\Users\xavier.mouy\Documents\PhD\Projects\Detector\results\dataset_FS-NN_modified_20200902194334.nc'
input_args['out_dir'] = r'C:\Users\xavier.mouy\Documents\PhD\Projects\Detector\results\Classification'
input_args['run_CV'] = False
input_args['train_final_model'] = True
input_args['final_model_name'] = 'RF50'
## DEFINITION OF CLASSIFIERS -------------------------------------------------
models = []
models.append(('Dummy', DummyClassifier(strategy="constant",constant=1)))
models.append(('LR', LogisticRegression(solver='liblinear', multi_class='ovr')))
models.append(('LDA', LinearDiscriminantAnalysis()))
#models.append(('KNN', KNeighborsClassifier()))
#models.append(('KNN', KNeighborsClassifier(n_neighbors=4, metric='euclidean')))
models.append(('CART', DecisionTreeClassifier()))
#models.append(('NB', GaussianNB()))
models.append(('XGBoost', XGBClassifier()))
#models.append(('MLP', MLPClassifier(solver='sgd', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=0)))
models.append(('RF5', RandomForestClassifier(n_estimators=5,min_samples_split= 100, min_samples_leaf=50,random_state=0)))
models.append(('RF10', RandomForestClassifier(n_estimators=10,min_samples_split= 100, min_samples_leaf=50,random_state=0)))
models.append(('RF30', RandomForestClassifier(n_estimators=30,min_samples_split= 100, min_samples_leaf=50, random_state=0)))
models.append(('RF50', RandomForestClassifier(n_estimators=50,min_samples_split= 100, min_samples_leaf=50, random_state=0)))
#models.append(('RF100', RandomForestClassifier(n_estimators=100,min_samples_split= 100, min_samples_leaf=50,random_state=0)))
## setup output folder
now = datetime.now()
now_str = now.strftime("%Y%m%dT%H%M%S")
out_dir = os.path.join(input_args['out_dir'],now_str)
os.mkdir(out_dir)
## Save input args to txt file
text_file = open(os.path.join(out_dir, 'input_args_' + now_str + '.txt'), "w")
n = text_file.write(str(input_args))
text_file.close()
## Checks that model name exists before running all the processing
if input_args['train_final_model']:
model_idx = [model[0] for model in models].index(input_args['final_model_name'] )
## LOAD DATSET ---------------------------------------------------------------
dataset = Measurement()
dataset.from_netcdf(input_args['data_file'])
print(dataset.summary())
## DATA PREPARATION ----------------------------------------------------------
# features
features = dataset.metadata['measurements_name'][0] # list of features used for the classification
# data
data = dataset.data
# drop FS observations at Mill Bay
indexNames = data[(data['label_class'] == 'FS') & (data['location_name'] == 'Mill bay') ].index
data.drop(indexNames, inplace=True)
# add subclass + IDs
data, class_encoder = add_class_ID(data, input_args['positive_class_label'])
data, _ = add_subclass(data)
#subclass2class_table = subclass2class_conversion(data)
# add group ID
data, group_encoder = add_group(data)
## DATA CLEAN-UP -------------------------------------------------------------
# Basic stats on all features
data_stats = data[features].describe()
#print(data_stats)
# how many NaNs and Infs per column
data = data.replace([np.inf, -np.inf], np.nan)
Nnan = data[features].isna().sum()
ax = Nnan.plot(kind='bar',title='Number of NaN/Inf',grid=True)
ax.set_ylabel('Number of observations with NaNs/Infs')
# Drop some features with too many NaNs
features.remove('freq_flatness')
features.remove('snr')
features.remove('uuid')
# drop observations/rows with NaNs
data.dropna(subset=features, axis=0, how='any', thresh=None, inplace=True)
data_stats2 = data[features].describe()
# ## VISUALIZATION -------------------------------------------------------------
# # box and whisker plots
# data[features].plot(kind='box', subplots=True, layout=(7,7), sharex=False, sharey=False)
# # histograms
# data[features].hist()
# # scatter plot matrix
# pd.plotting.scatter_matrix(data[features])
# scatter plot PCA
# pca = PCA(n_components=2)
# X = pca.fit_transform(data[features])
# y = data['class_ID']
# plot_2d_space(X, y, 'Imbalanced dataset (2 PCA components)')
## SPLIT DATA INTO TRAIN & TEST SETS ------------------------------------------
n_splits = round(1/(1-input_args['train_ratio']))
skf = StratifiedGroupKFold(n_splits=n_splits, shuffle=True, random_state=None)
for train_index, test_index in skf.split(data, data['subclass_ID'],groups=data['group_ID']):
data_train, data_test = data.iloc[train_index], data.iloc[test_index]
break
# plot class repartition
plot_datasets_distrib(data_train, data_test)
plot_dataset_distrib(data,attr_list=['subclass_label','label_class'],title='Full dataset')
plot_dataset_distrib(data_train,attr_list=['subclass_label','label_class'],title='Training set')
plot_dataset_distrib(data_test,attr_list=['subclass_label','label_class'],title='Test set')
# verify groups are not used in both datasets
groups_intersection = plot_datasets_groups(data_train, data_test, show=True)
## CROSS VALIDATION ON TRAIN SET ----------------------------------------------
if input_args['run_CV']:
# run train/test experiments
cv_predictions, cv_performance = cross_validation(data_train, models, features, cv_splits=input_args['cv_splits'],cv_repeats=input_args['cv_repeats'], rebalance=input_args['rebalance_classes'])
# display summary results
performance_report = summarize_performance(cv_performance, threshold=0.5)
print(performance_report)
# plot mean Precision and Recall curves
plot_PR_curves(cv_performance)
plot_F_curves(cv_performance)
# save results
CV_results ={'cv_predictions': cv_predictions,
'cv_performance': cv_performance,
'models': models,
'input_args': input_args,
}
pickle.dump(CV_results, open(os.path.join(out_dir, 'CV_' + now_str + '.sav'), 'wb'))
## FINAL EVALUATION ON TEST SET -----------------------------------------------
if input_args['train_final_model']:
print(' ')
print('Final evaluation on test set:')
print(' ')
model_name = models[model_idx][0]
model = models[model_idx][1] # RF50
print(model)
X_train = data_train[features] # features
Y_train = data_train['class_ID'] #labels
X_test = data_test[features] # features
Y_test = data_test['class_ID'] #labels
# feature normalization
Norm_mean = X_train.mean()
Norm_std = X_train.std()
X_train = (X_train-Norm_mean)/Norm_std
X_test = (X_test-Norm_mean)/Norm_std
# Train on entire train set
final_model = classification_train(X_train, Y_train, model, rebalance=input_args['rebalance_classes'])
# Evaluate on full test set
pred_class, pred_prob = classification_predict(X_test, final_model)
# Print evaluation report
CR = classification_report(Y_test, pred_class)
print(CR)
# save the model to disk
model= {'name': model_name,
'model':final_model,
'features': features,
'normalization_mean': Norm_mean,
'normalization_std': Norm_std,
'classes': class_encoder,
'input_args': input_args,
}
pickle.dump(model, open(os.path.join(out_dir, model_name + '_model_' + now_str + '.sav'), 'wb'))
# precision, recall, thresholds = precision_recall_curve(Y_val, pred_prob[:,0])
# pr_auc = metrics.auc(recall, precision)
# f1 = f1_score(Y_val, pred_class, average='binary')
# CR = classification_report(Y_val, pred_class)
# CM = confusion_matrix(Y_val, pred_class)
if __name__ == "__main__":
main()
| 1.71875 | 2 |
lamby/src/uninit.py | lamby-ml/lamby-cli | 0 | 12762124 | <reponame>lamby-ml/lamby-cli<filename>lamby/src/uninit.py
import os
import shutil
import sys
import click
@click.command('uninit', short_help="un-initialize .lamby file in cwd")
def uninit():
"""Un-initializes the .lamby file in the repository"""
lamby_dir = './.lamby'
if not os.path.isdir(lamby_dir):
click.echo('Lamby project has not been initialized in ' + os.getcwd())
sys.exit(1)
click.echo('Removing Lamby project in ' + os.getcwd())
shutil.rmtree(lamby_dir)
| 2.609375 | 3 |
scripts/practice/FB-reRun/ProductofArrayExceptSelf.py | bhimeshchauhan/competitive_programming | 0 | 12762125 | """
Product of Array Except Self
Given an integer array nums, return an array answer such that answer[i] is equal to the product of
all the elements of nums except nums[i].
The product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.
You must write an algorithm that runs in O(n) time and without using the division operation.
Example 1:
Input: nums = [1,2,3,4]
Output: [24,12,8,6]
Example 2:
Input: nums = [-1,1,0,-3,3]
Output: [0,0,9,0,0]
Constraints:
2 <= nums.length <= 105
-30 <= nums[i] <= 30
The product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.
Follow up: Can you solve the problem in O(1) extra space complexity? (The output array does not count as extra space for space complexity analysis.)
"""
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
leftProduct = [1]
for i in range(len(nums)-1):
leftProduct.append(leftProduct[-1]*nums[i])
revRightProduct = [1]
for i in range(len(nums)-1, 0, -1):
revRightProduct.append(revRightProduct[-1]*nums[i])
rightProduct = list(reversed(revRightProduct))
ans = []
for i in range(len(rightProduct)):
ans.append(rightProduct[i]*leftProduct[i])
return ans
| 3.703125 | 4 |
save_raw_fea.py | insad/pytorch-kaldi | 2,248 | 12762126 | ##########################################################
# pytorch-kaldi v.0.1
# <NAME>, <NAME>
# Mila, University of Montreal
# October 2018
#
# Description: This script generates kaldi ark files containing raw features.
# The file list must be a file containing "snt_id file.wav".
# Note that only wav files are supported here (sphere or other format are not supported)
##########################################################
import scipy.io.wavfile
import math
import numpy as np
import os
from data_io import read_vec_int_ark, write_mat
# Run it for all the data chunks (e.g., train, dev, test) => uncomment
lab_folder = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/exp/dnn4_pretrain-dbn_dnn_ali_test"
lab_opts = "ali-to-pdf"
out_folder = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/raw_TIMIT_200ms/test"
wav_lst = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/test/wav.lst"
scp_file_out = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/raw_TIMIT_200ms/test/feats_raw.scp"
# lab_folder='quick_test/dnn4_pretrain-dbn_dnn_ali_dev'
# lab_opts='ali-to-pdf'
# out_folder='raw_TIMIT_200ms/dev'
# wav_lst='/home/mirco/pytorch-kaldi-new/quick_test/data/dev/wav_lst.scp'
# scp_file_out='quick_test/data/dev/feats_raw.scp'
# lab_folder='quick_test/dnn4_pretrain-dbn_dnn_ali_test'
# lab_opts='ali-to-pdf'
# out_folder='raw_TIMIT_200ms/test'
# wav_lst='/home/mirco/pytorch-kaldi-new/quick_test/data/test/wav_lst.scp'
# scp_file_out='quick_test/data/test/feats_raw.scp'
sig_fs = 16000 # Hz
sig_wlen = 200 # ms
lab_fs = 16000 # Hz
lab_wlen = 25 # ms
lab_wshift = 10 # ms
sig_wlen_samp = int((sig_fs * sig_wlen) / 1000)
lab_wlen_samp = int((lab_fs * lab_wlen) / 1000)
lab_wshift_samp = int((lab_fs * lab_wshift) / 1000)
# Create the output folder
try:
os.stat(out_folder)
except:
os.makedirs(out_folder)
# Creare the scp file
scp_file = open(scp_file_out, "w")
# reading the labels
lab = {
k: v
for k, v in read_vec_int_ark(
"gunzip -c " + lab_folder + "/ali*.gz | " + lab_opts + " " + lab_folder + "/final.mdl ark:- ark:-|", out_folder
)
}
# reading the list file
with open(wav_lst) as f:
sig_lst = f.readlines()
sig_lst = [x.strip() for x in sig_lst]
for sig_file in sig_lst:
sig_id = sig_file.split(" ")[0]
sig_path = sig_file.split(" ")[1]
[fs, signal] = scipy.io.wavfile.read(sig_path)
signal = signal.astype(float) / 32768
signal = signal / np.max(np.abs(signal))
cnt_fr = 0
beg_samp = 0
frame_all = []
while beg_samp + lab_wlen_samp < signal.shape[0]:
sample_fr = np.zeros(sig_wlen_samp)
central_sample_lab = int(((beg_samp + lab_wlen_samp / 2) - 1))
central_fr_index = int(((sig_wlen_samp / 2) - 1))
beg_signal_fr = int(central_sample_lab - (sig_wlen_samp / 2))
end_signal_fr = int(central_sample_lab + (sig_wlen_samp / 2))
if beg_signal_fr >= 0 and end_signal_fr <= signal.shape[0]:
sample_fr = signal[beg_signal_fr:end_signal_fr]
else:
if beg_signal_fr < 0:
n_left_samples = central_sample_lab
sample_fr[central_fr_index - n_left_samples + 1 :] = signal[0:end_signal_fr]
if end_signal_fr > signal.shape[0]:
n_right_samples = signal.shape[0] - central_sample_lab
sample_fr[0 : central_fr_index + n_right_samples + 1] = signal[beg_signal_fr:]
frame_all.append(sample_fr)
cnt_fr = cnt_fr + 1
beg_samp = beg_samp + lab_wshift_samp
frame_all = np.asarray(frame_all)
# Save the matrix into a kaldi ark
out_file = out_folder + "/" + sig_id + ".ark"
write_mat(out_folder, out_file, frame_all, key=sig_id)
print(sig_id)
scp_file.write(sig_id + " " + out_folder + "/" + sig_id + ".ark:" + str(len(sig_id) + 1) + "\n")
N_fr_comp = 1 + math.floor((signal.shape[0] - 400) / 160)
# print("%s %i %i "%(lab[sig_id].shape[0],N_fr_comp,cnt_fr))
scp_file.close()
| 2.03125 | 2 |
indi2.py | A1zak/Lab-7 | 0 | 12762127 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Вариант2
# В списке, состоящем из вещественных элементов, вычислить:
# 1) сумму положительных элементов списка;
# 2) произведение элементов списка, расположенных между максимальным по модулю и
# минимальным по модулю элементами.
# Упорядочить элементы списка по убыванию
if __name__ == '__main__':
A = tuple(map(float, input().split()))
D = list(A)
sum = 0
# Задание №1
for i in A:
if i > 0:
sum += i
print(sum)
# Задание №2
B = []
n_min = n_max = A[0]
i_min = i_max = 0
b = [abs(i) for i in A]
for i, item in enumerate(b):
if item < n_min:
i_min, n_min = i, item
if item >= n_max:
i_max, n_max = i, item
С = A[i_min:i_max+1]
sum = 1
for j in С:
sum *= j
print(sum)
D.sort(reverse=True)
print(f"{A} ") | 3.59375 | 4 |
django_basic_feedback/urls.py | PaulGregor/django-basic-feedback | 0 | 12762128 | <gh_stars>0
# -*- coding: utf-8 -*-
#Copyright (C) 2011 <NAME>
from django.conf.urls.defaults import *
urlpatterns = patterns('django_basic_feedback.views',
url(r'^$', 'add', name="feedback"),
)
| 1.210938 | 1 |
pymoji/__init__.py | KoffeinFlummi/pymoji | 7 | 12762129 | #!/usr/bin/env python3
import re
from .codes import codes
class Emoji:
def __init__(self, const):
if len(const) == 1:
self.__fromUnicode(const)
elif const[0] == ":":
self.__fromAlias(const)
else:
self.__fromEscape(const)
self.aliases = codes[self.escape]
self.alias = self.aliases[0]
self.char = bytes("\\u"+self.escape, "ascii").decode("unicode-escape")[0]
self.is_supported = hex(ord(self.char))[2:] == self.escape
def __fromUnicode(self, char):
escape = hex(ord(char))[2:]
if escape in codes:
self.escape = escape
else:
raise ValueError
def __fromAlias(self, alias):
for k, v in codes.items():
if alias in v:
self.escape = k
break
else:
raise ValueError
def __fromEscape(self, escape):
if escape in codes.keys():
self.escape = escape
else:
raise ValueError
def replaceAliases(text, trailingSpaces=0, force=False):
""" Replaces all supported emoji-cheat-sheet aliases in a text with the corresponding emoji. """
def replAlias(m):
alias = ":"+m.group(1)+":"
if not Emoji(alias).is_supported and not force:
return alias
try:
return Emoji(alias).char + trailingSpaces * " "
except ValueError:
return alias
return re.sub(":([^s:]?[\w-]+):", replAlias, text)
def replaceEmoji(text, trailingSpaces=0):
""" Replaces all emojis with their primary emoji-cheat-sheet alias. """
i = 0
while i < len(text):
escape = hex(ord(text[i]))[2:]
if escape in codes.keys():
text = text.replace(text[i] + trailingSpaces*" ", Emoji(escape).alias)
i += len(Emoji(escape).alias)
else:
i += 1
return text
| 2.8125 | 3 |
ansible/venv/lib/python2.7/site-packages/ansible/modules/windows/win_defrag.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 17 | 12762130 | <reponame>gvashchenkolineate/gvashchenkolineate_infra_trytravis<filename>ansible/venv/lib/python2.7/site-packages/ansible/modules/windows/win_defrag.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: 2017, <NAME> (@dagwieers) <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_defrag
version_added: '2.4'
short_description: Consolidate fragmented files on local volumes
description:
- Locates and consolidates fragmented files on local volumes to improve system performance.
- 'More information regarding C(win_defrag) is available from: U(https://technet.microsoft.com/en-us/library/cc731650(v=ws.11).aspx)'
requirements:
- defrag.exe
options:
include_volumes:
description:
- A list of drive letters or mount point paths of the volumes to be defragmented.
- If this parameter is omitted, all volumes (not excluded) will be fragmented.
type: list
exclude_volumes:
description:
- A list of drive letters or mount point paths to exclude from defragmentation.
type: list
freespace_consolidation:
description:
- Perform free space consolidation on the specified volumes.
type: bool
default: no
priority:
description:
- Run the operation at low or normal priority.
type: str
choices: [ low, normal ]
default: low
parallel:
description:
- Run the operation on each volume in parallel in the background.
type: bool
default: no
author:
- <NAME> (@dagwieers)
'''
EXAMPLES = r'''
- name: Defragment all local volumes (in parallel)
win_defrag:
parallel: yes
- name: 'Defragment all local volumes, except C: and D:'
win_defrag:
exclude_volumes: [ C, D ]
- name: 'Defragment volume D: with normal priority'
win_defrag:
include_volumes: D
priority: normal
- name: Consolidate free space (useful when reducing volumes)
win_defrag:
freespace_consolidation: yes
'''
RETURN = r'''
cmd:
description: The complete command line used by the module.
returned: always
type: str
sample: defrag.exe /C /V
rc:
description: The return code for the command.
returned: always
type: int
sample: 0
stdout:
description: The standard output from the command.
returned: always
type: str
sample: Success.
stderr:
description: The error output from the command.
returned: always
type: str
sample:
msg:
description: Possible error message on failure.
returned: failed
type: str
sample: Command 'defrag.exe' not found in $env:PATH.
changed:
description: Whether or not any changes were made.
returned: always
type: bool
sample: true
'''
| 1.6875 | 2 |
docs/tutorials/datasets/imagenet.py | PistonY/gluon-cv | 41 | 12762131 | <gh_stars>10-100
"""Prepare the ImageNet dataset
============================
The `ImageNet <http://www.image-net.org/>`_ project contains millions of images
and thounds of objects for image classification. It is widely used in the
research community for benchmarking state-of-the-art models.
.. image:: https://www.fanyeong.com/wp-content/uploads/2018/01/v2-718f95df083b2d715ee29b018d9eb5c2_r.jpg
:width: 500 px
The dataset has multiple versions. The one commonly used for image
classification is `ILSVRC 2012
<http://www.image-net.org/challenges/LSVRC/2012/>`_. This tutorial will go
through the steps of preparing this dataset for GluonCV.
.. note::
You need at least 300 GB disk space to download and extract the dataset. SSD
(Solid-state disks) is prefered over HDD because of faster speed.
Download
--------
First, go to the `download page <http://www.image-net.org/download-images>`_
(you may need to register an account), and find the page for
ILSVRC2012. Next, find and download the following two files:
======================== ======
Filename Size
======================== ======
ILSVRC2012_img_train.tar 138 GB
ILSVRC2012_img_val.tar 6.3 GB
======================== ======
Setup
-----
First, please download the helper script
:download:`imagenet.py<../../../scripts/datasets/imagenet.py>`
validation image info :download:`imagenet_val_maps.pklz<../../../scripts/datasets/imagenet_val_maps.pklz>`.
Make sure to put them in the same directory.
Assuming the tar files are saved in folder ``~/ILSVRC2012``. We can use the
following command to prepare the dataset automatically.
.. code-block:: bash
python imagenet.py --download-dir ~/ILSVRC2012
.. note::
Extracting the images may take a while. For example, it takes
about 30min on an AWS EC2 instance with EBS.
By default ``imagenet.py`` will extract the images into
``~/.mxnet/datasets/imagenet``. You
can specify a different target folder by setting ``--target-dir``.
Read with GluonCV
-----------------
The prepared dataset can be loaded with utility class :py:class:`gluoncv.data.ImageNet`
directly. Here is an example that randomly reads 128 images each time and
performs randomized resizing and cropping.
"""
from gluoncv.data import ImageNet
from mxnet.gluon.data import DataLoader
from mxnet.gluon.data.vision import transforms
train_trans = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.ToTensor()
])
# You need to specify ``root`` for ImageNet if you extracted the images into
# a different folder
train_data = DataLoader(
ImageNet(train=True).transform_first(train_trans),
batch_size=128, shuffle=True)
#########################################################################
for x, y in train_data:
print(x.shape, y.shape)
break
#########################################################################
# Plot some validation images
from gluoncv.utils import viz
val_dataset = ImageNet(train=False)
viz.plot_image(val_dataset[1234][0]) # index 0 is image, 1 is label
viz.plot_image(val_dataset[4567][0])
| 2.03125 | 2 |
manage_license_headers.py | ax-meyer/ProgressDialog | 2 | 12762132 | <filename>manage_license_headers.py
"""
____ Copyright Start ____
MIT License
Copyright (c) 2020 ax-meyer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
____ Copyright End ____
"""
import os
import sys
import re
###### parameter section ######
# path where a file with the license header to use is found
header_path = "license_header.txt"
# dictionary with file types to process and in and out markers for block comments
file_types = {".cs": [r"/*", r"*/"], ".py": [r'"""', r'"""'], ".proto": [r"/*", r"*/"], ".xaml":[r"<!--", r"-->"]}
# list with files / directories to ignore. please use '/' as directory seperator, will be replaced with system standard later.
# matching is done via regex against the relative file path.
exclude_list = [
".*/bin/.*",
".*/obj/.*",
".*/submodules/.*",
".*/.git/.*",
".*/.vs/.*",
".*/AssemblyInfo.cs"
]
###### end parameter section ######
with open(header_path, "r") as header_handle:
license_header = header_handle.read()
license_header = license_header.strip("\n\r")
header_start = license_header.split("\n",1)[0]
header_end = license_header.rsplit("\n",1)[-1]
# check if run in CI - no replacing, just break at bad header
ci_mode = False
if (len(sys.argv) > 1 and sys.argv[1].lower() == "ci"):
ci_mode = True
success = True
reg_patterns = []
for i in range(len(exclude_list)):
exclude_list[i] = exclude_list[i].replace("/", os.sep.replace("\\", "\\\\"))
reg_patterns.append(re.compile(exclude_list[i]))
for subdir, dirs, files in os.walk(r'.'):
for filename in files:
filepath = subdir + os.sep + filename
# check if file should be excluded
exclude = False
for reg_pattern in reg_patterns:
if reg_pattern.match(filepath):
exclude = True
break
if exclude:
continue
# check if file is in the list of supported file types
file_ext = "." + filepath.rsplit(".", 1)[-1]
if file_ext in file_types:
with open(filepath, "r") as source_handle:
source_file = source_handle.read()
# check if correct license header is already present
if source_file.find(license_header) >= 0:
continue
# if run inside of the CI - record bad header and continue, no replacing in CI
elif ci_mode:
print("[" + filepath + "]: no or invalid copyright header.")
success = False
continue
comment_marker_in = file_types[file_ext][0]
comment_marker_out = file_types[file_ext][1]
# chekc if header start / end mark is present
# if yes, replace header
# if not, insert new header at begining of file
start = source_file.find(header_start)
end = source_file.find(header_end)
if (start > end):
sys.exit(-1)
elif (start >= 0 and end > 0):
print("[" + filepath + "]: replace header")
source_start = source_file.split(header_start)[0]
source_end = source_file.split(header_end, 1)[-1]
new_source = source_start + license_header + source_end
else:
if (source_file.lower().find("copyright") >= 0):
print("[" + filepath + "]: found different copyright header - please remove")
sys.exit(1)
print("[" + filepath + "]: insert new header")
new_source = comment_marker_in + '\n' + license_header + '\n' + comment_marker_out +'\n\n' + source_file
with open(filepath, "w") as source_handle:
source_handle.write(new_source)
continue
if ci_mode and not success:
print("Invalid copyright headers found. Please run the " + sys.argv[0].rsplit(os.sep, 1)[-1] + " script locally to fix and commit again.")
sys.exit(-1)
| 1.945313 | 2 |
flask_boilerplate/models/linkModel.py | wellls/flask_boilerplate | 0 | 12762133 | # -*- coding:utf-8 -*-
# __author__ = '<NAME>'
# Link Model
from flask_boilerplate.extensions import db
# 表前缀
prefix = 'flask_boilerplate'
class Link(db.Model):
__tablename__ = '%s_link' % prefix
id = db.Column(db.Integer, primary_key=True)
sitename = db.Column(db.VARCHAR(30), nullable=False, default='')
siteurl = db.Column(db.VARCHAR(75), nullable=False, default='')
description = db.Column(db.VARCHAR(255), nullable=False, default='')
hide = db.Column(db.Enum('n', 'y'), nullable=False, default='n')
taxis = db.Column(db.Integer, nullable=False, default=0)
def __repr__(self):
return '<Link %r>' % (self.sitename) | 2.5 | 2 |
Linux/Linux_with_ARM_A9/PyDE/include/LEDR.py | fpgacademy/Tutorials | 0 | 12762134 | <gh_stars>0
def open_dev( ):
''' Opens the red light LEDR device
:return: 1 on success, else 0
'''
def set(data):
''' Sets the red light LEDR device
:param data: the integer data to write to LEDR. If data = 0 all lights will be
turned off. If data = 0b1111111111 all lights will be turned on
:return: none
'''
def close( ):
''' Closes the red light LEDR device
:return: none
'''
| 2.609375 | 3 |
rawdata/ships/ship_complements.py | jrnold/acw_battle_data | 15 | 12762135 | """
Download civil war ships and their complements from dbpedia
"""
from os import path
import json
from SPARQLWrapper import SPARQLWrapper, JSON
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("""
select distinct ?ship, ?complement where {
{
{?ship dcterms:subject category:Ships_of_the_Union_Navy}
UNION
{?ship dcterms:subject [skos:broader category:Ships_of_the_Confederate_States_Navy]}
}
?ship dbpprop:shipComplement ?complement
FILTER (datatype(?complement) = xsd:integer)
}
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
data = []
for x in results['results']['bindings']:
data.append({'ship': x['ship']['value'],
'complement': x['complement']['value']})
with open("ships.csv", "w") as f:
writer = csv.DictWriter(f, ('ship', 'complement'))
writer.writeheader()
writer.writerows(data)
| 3.328125 | 3 |
pyllusion/image/__init__.py | RealityBending/Pyllusion | 17 | 12762136 | <reponame>RealityBending/Pyllusion
"""
Pyllusion submodule.
"""
from .image_blob import image_blob, image_blobs
from .image_circle import image_circle, image_circles
from .image_line import image_line
from .image_mosaic import image_mosaic
from .image_noise import image_noise
from .image_rectangle import image_rectangle
from .image_text import image_text
from .rescale import rescale
__all__ = [
"image_noise",
"image_circle",
"image_circles",
"image_text",
"image_blobs",
"image_blob",
"image_line",
"image_rectangle",
"image_mosaic",
"rescale",
]
| 1.226563 | 1 |
quadratic.py | gd-zhang/Follow-the-Ridge | 20 | 12762137 | import autograd.numpy as np
import autograd
import os
from autograd import grad
from autograd import jacobian
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
from scipy.linalg import pinv
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--function", type=int, default=1, help="choose from three low dimensional example functions, 1-3")
opt = parser.parse_args()
function = opt.function
# GDA
def gda(z_0, alpha=0.05, num_iter=100):
z = [z_0]
grad_fn = grad(target)
for i in range(num_iter):
g = grad_fn(z[-1])
z1 = z[-1] + g*np.array([-1,1])*alpha
z.append(z1)
z = np.array(z)
return z
# Extra Gradient
def eg(z_0, alpha=0.05, num_iter=100):
z = [z_0]
grad_fn = grad(target)
for i in range(num_iter):
g = grad_fn(z[-1])
z1 = z[-1] + g*np.array([-1,1])*alpha
g = grad_fn(z1)
z2 = z[-1] + g*np.array([-1,1])*alpha
z.append(z2)
z = np.array(z)
return z
# Optimistic Gradient
def ogda(z_0, alpha=0.05, num_iter=100):
z = [z_0,z_0]
grads = []
grad_fn = grad(target)
for i in range(num_iter):
g = grad_fn(z[-1])
gg = grad_fn(z[-2])
z1 = z[-1] + 2*g*np.array([-1,1])*alpha - gg*np.array([-1,1])*alpha
z.append(z1)
z = np.array(z)
return z
# Consensus Optimization
def co(z_0, alpha=0.01, gamma=0.1, num_iter=100):
z = [z_0]
grads = []
grad_fn = grad(target)
hessian = jacobian(grad_fn)
for i in range(num_iter):
g = grad_fn(z[-1])
H = hessian(z[-1])
#print(np.matmul(H,g), z[-1])
v = g*np.array([1,-1]) + gamma*np.matmul(H,g)
z1 = z[-1] - alpha*v
z.append(z1)
z = np.array(z)
return z
# Symplectic gradient adjustment
def sga(z_0, alpha=0.05, lamb=0.1, num_iter = 100):
z = [z_0]
grad_fn = grad(target)
hessian = jacobian(grad_fn)
for i in range(num_iter):
g = grad_fn(z[-1])
w = g * np.array([1,-1])
H = hessian(z[-1])
HH = np.array([[1, -lamb*H[0,1]],[lamb*H[0,1],1]])
v = HH @ w
z1 = z[-1] - alpha*v
z.append(z1)
z = np.array(z)
return z
# Follow the ridge
def follow(z_0, alpha=0.05, num_iter = 100):
z = [z_0]
grad_fn = grad(target)
hessian = jacobian(grad_fn)
for i in range(num_iter):
g = grad_fn(z[-1])
H = hessian(z[-1])
v = np.array([g[0], -g[1]-H[0,1]*np.squeeze(pinv(H[1:,1:]))*g[0]])
z1 = z[-1] - alpha*v
z.append(z1)
z = np.array(z)
return z
def f1(z):
x = z[0]
y = z[1]
f = -3*x*x-y*y+4*x*y
return f
def f2(z):
x = z[0]
y = z[1]
f = 3*x*x+y*y+4*x*y
return f
def f3(z):
x = z[0]
y = z[1]
f = (0.4*x*x-0.1*(y-3*x+0.05*x*x*x)**2-0.01*y*y*y*y)*np.exp(-0.01*(x*x+y*y))
return f
# Select target function
if function==1:
target = f1 # (0,0) is local minimax and global minimax
z_0 = np.array([5., 7.]) # Set initial point
plot_width = 12 # Set range of the plot
root_dir = 'results/f1.pdf'
elif function==2:
target = f2 # (0,0) is not local minimax and not global minimax
z_0 = np.array([6., 5.])
plot_width = 12
root_dir = 'results/f2.pdf'
elif function==3:
target = f3 # (0,0) is local minimax
z_0 = np.array([7., 5.])
plot_width = 8
root_dir = 'results/f3.pdf'
# Run all algorithms on target
zfr=follow(z_0, num_iter = 1000, alpha = 0.05)
zgda=gda(z_0, num_iter = 1000, alpha = 0.05)
zogda=ogda(z_0, num_iter = 1000, alpha = 0.05)
zeg=eg(z_0, num_iter = 1000, alpha = 0.05)
zco=co(z_0, num_iter = 1000, alpha = 0.05, gamma=0.1)
zsga=sga(z_0, num_iter = 1000, alpha = 0.01, lamb=1.0)
# Plot trajectory with contour
plt.rcParams.update({'font.size': 14})
def_colors=(plt.rcParams['axes.prop_cycle'].by_key()['color'])
#plot_width=12
plt.figure(figsize=(5,5))
axes = plt.gca()
axes.set_xlim([-plot_width,plot_width])
axes.set_ylim([-plot_width,plot_width])
x1 = np.arange(-plot_width,plot_width,0.1)
y1 = np.arange(-plot_width,plot_width,0.1)
X,Y = np.meshgrid(x1,y1)
Z = np.zeros_like(X)
for i in range(len(x1)):
for j in range(len(y1)):
Z[j][i] = target(np.array([x1[i] ,y1[j]]))
plt.contourf(X,Y,Z,30,cmap=plt.cm.gray)
lw = 2
hw = 0.7
line6,=plt.plot(zfr[:,0],zfr[:,1],'-',color='r',linewidth=lw,zorder=10)
line1,=plt.plot(zgda[:,0],zgda[:,1],'--',linewidth=lw,color=def_colors[9],zorder=2)
line2,=plt.plot(zogda[:,0],zogda[:,1],'--',linewidth=lw,color=def_colors[1])
line3,=plt.plot(zeg[:,0],zeg[:,1],'--',linewidth=lw,color=def_colors[2])
line4,=plt.plot(zsga[:,0],zsga[:,1],'--',color=def_colors[0],linewidth=lw)
line5,=plt.plot(zco[:,0],zco[:,1],'--',color='xkcd:violet',linewidth=lw)
init=plt.plot(zfr[0,0],zfr[0,1],'^',zorder=20,ms=12.0,color='r')
plt.legend((line6,line1, line2, line3, line4, line5), ('FR','GDA', 'OGDA', 'EG', 'SGA', 'CO'), loc=4)
os.makedirs('results/', exist_ok=True)
plt.savefig(root_dir, dpi=300)
#plt.show() | 2.625 | 3 |
plugins/typo_squatter/komand_typo_squatter/util/utils.py | jakub-kaluza/insightconnect-plugins | 0 | 12762138 | import re
import math
from tld import get_tld
from Levenshtein import distance
from .suspicious import keywords, tlds
def entropy(string: str) -> float:
"""
Calculates the Shannon entropy of a string
Original code: https://github.com/x0rz/phishing_catcher/blob/master/catch_phishing.py
"""
prob = [float(string.count(c)) / len(string) for c in dict.fromkeys(list(string))]
ent = -sum([p * math.log(p) / math.log(2.0) for p in prob])
return ent
def score_domain(domain: str) -> int:
"""Score `domain`.
The highest score, the most probable `domain` is a phishing site.
Args:
domain (str): the domain to check.
Returns:
int: the score of `domain`.
#https://github.com/x0rz/phishing_catcher/blob/master/catch_phishing.py
"""
score = 0
for t in tlds:
if domain.endswith(t):
score += 20
# Remove initial '*.' for wildcard certificates bug
if domain.startswith("*."):
domain = domain[2:]
# Removing TLD to catch inner TLD in subdomain (ie. paypal.com.domain.com)
try:
res = get_tld(domain, as_object=True, fail_silently=True, fix_protocol=True)
domain = ".".join([res.subdomain, res.domain])
except: # noqa: B110
pass
words_in_domain = re.split("\W+", domain)
# Remove initial '*.' for wildcard certificates bug
if domain.startswith("*."):
domain = domain[2:]
# ie. detect fake .com (ie. *.com-account-management.info)
if words_in_domain[0] in ["com", "net", "org"]:
score += 10
# Testing keywords
for word in keywords.items():
if word[0] in domain:
score += word[1]
# Higher entropy is kind of suspicious
score += int(round(entropy(domain) * 10))
# Testing Levenshtein distance for strong keywords (>= 70 points) (ie. paypol)
for key in [k for (k, s) in keywords.items() if s >= 70]:
# Removing too generic keywords (ie. mail.domain.com)
for word in [w for w in words_in_domain if w not in ["email", "mail", "cloud"]]:
if distance(str(word), str(key)) == 1:
score += 70
# Lots of '-' (ie. www.paypal-datacenter.com-acccount-alert.com)
if "xn--" not in domain and domain.count("-") >= 4:
score += domain.count("-") * 3
# Deeply nested subdomains (ie. www.paypal.com.security.accountupdate.gq)
if domain.count(".") >= 3:
score += domain.count(".") * 3
return score
| 3.15625 | 3 |
muller/graphics/plottimeseries.py | andreashirley/Lolipop | 6 | 12762139 | <reponame>andreashirley/Lolipop
import matplotlib.pyplot as plt
plt.clf()
# plt.switch_backend('agg')
import pandas
from pathlib import Path
from typing import Dict, Optional, List, Tuple, Union
from muller import widgets
from muller.graphics.palettes import palette_distinctive, Palette
from loguru import logger
class TimeseriesPlot:
def __init__(self, render: bool = True, legend: bool = True, scale: int = 1, style: Optional[str] = None):
# Set the default aspect ratio
self.length_x = 12
self.length_y = 10
self.scale = scale
if self.scale < 1:
self.scale = 1
self.dpi = 250
# Parameters concerning the overall plot
self.default_color = "#333333"
self.style = style
if self.style == 'nature':
self._set_style_nature()
else:
self._set_style_default()
# Parameters concerning the plot legend.
self.legend = legend
self.legend_font_properties = {
'size': 12 * self.scale
} # The size of the font used to label each series in the legend.
self.legend_location = 'right'
self.legend_title = 'Genotypes'
# Set up the fontsizes for each labeltype
self.label_size_axis, self.label_size_title, self.label_size_ticks = self.set_scale(scale)
@staticmethod
def set_scale(scale: int = 1) -> Tuple[int, int, int]:
# Made staticmethod so that pycharm doesn't complain about object properties being defined outside of __init__()
label_size_axis = 24 * scale
label_size_title = 42 * scale
label_size_ticks = 18 * scale
return label_size_axis, label_size_title, label_size_ticks
def _set_style_default(self) -> None:
# Parameters concerning the overall plot
self.xaxis_label = "Generation"
self.yaxis_label = 'Frequency'
self.background_color = 'white'
self.markersize = 4 * self.scale # The size of individual markers in the plot
self.markertype = 'o' # The type of marker to use.
self.linestyle = 'solid'
self.linewidth = 2 * self.scale
def _set_style_nature(self):
""" Configures `TimeseriesPlot` to use a style similar to that in the yeast nature paper. """
self.xaxis_label = "Generation"
self.yaxis_label = "Frequency"
self.background_color = "white"
self.markertype = 'o'
self.markersize = 12 * self.scale
self.linestyle = 'solid'
self.linewidth = 3 * self.scale
def _apply_style(self, axes: plt.Axes, plot_title, xmax: Optional[int] = None) -> plt.Axes:
# Parameters concerning subfeatures of the plot.
axes.set_ylabel(self.yaxis_label, fontsize = self.label_size_axis)
axes.set_xlabel(self.xaxis_label, fontsize = self.label_size_axis)
axes.set_facecolor(self.background_color)
axes.set_title(plot_title, fontsize = self.label_size_title)
axes.set_ylim(0,
1.01) # The maximum possible value for our data is 100% AKA 1. Leave a little room so the lines at 100% aren't obscured.
if xmax:
axes.set_xlim(0, xmax + 1) # SMall offset so the final point isn't sut off
if self.style == 'nature':
axes.set_yticks([0, 0.5, 1])
axes.tick_params(axis = 'both', labelsize = self.label_size_ticks)
return axes
def _initialize_plot(self, ax: Optional[plt.axes]) -> plt.Axes:
if ax is None:
fig, ax = plt.subplots(figsize = (self.length_x * self.scale, self.length_y * self.scale))
return ax
@staticmethod
def get_palette(table: pandas.DataFrame) -> Dict[str, str]:
return palette_distinctive.generate_distinctive_palette(table.index)
def plot_multiple(self, timeseries: pandas.DataFrame, palettes: List[Palette], ax: Optional[plt.Axes] = None,
filenames: Optional[List[Path]] = None):
for palette in palettes:
fnames = filenames[palette.name]
self.plot(timeseries, palette, ax, fnames)
def plot(self, timeseries: pandas.DataFrame, palette: Union[Dict[str, str], Palette] = None,
ax: Optional[plt.Axes] = None,
filename: Optional[Path] = None) -> plt.Axes:
""" Plots a generic timeseries dataframe. The plot labels are inferred from how the index labels are formatted.
Parameters
----------
timeseries: pandas.DataFrame
A dataframe where each column is a timepoint and each row is a specific series to plot.
palette: Dict[str,str]
Maps each series id to the proper color to use.
ax: Optional[plt.Axes]
Specifies the plt.Axes object to use.
filename: Optional[Path]
The resulting figure will be saved to this filename if it is provided.
"""
# Set up the plotting area.
if palette is None: palette = {}
self.set_scale()
ax = self._initialize_plot(ax)
try:
plot_title = 'Genotypes' if 'genotype' in timeseries.index[0] else 'Trajectories'
except TypeError:
message = f"Could not iterate over the first element of the index. Is the table indexed by genotype?"
logger.debug(message)
plot_title = "Timeseries"
numeric_columns = list(widgets.get_numeric_columns(timeseries.columns))
timeseries = timeseries[numeric_columns]
for series_id, series in timeseries.iterrows():
color = palette.get(series_id, self.default_color)
# Make sure that the timeseries is in the right order
# Some datasets may be out of order.
trajectory_timeseries = sorted((column, series[column]) for column in numeric_columns)
x_values, y_values = zip(*trajectory_timeseries)
ax.plot(
x_values, y_values,
self.markertype,
color = color,
label = series_id,
marker = self.markertype,
markersize = self.markersize,
linewidth = self.linewidth,
linestyle = self.linestyle
)
ax = self._apply_style(ax, plot_title, max(int(i) for i in timeseries.columns))
ax.set_xlim(0, max(timeseries.columns))
if self.legend and False:
legend = ax.legend(
loc = self.legend_location,
prop = self.legend_font_properties,
title = self.legend_title
)
legend.get_title().set_fontsize(str(self.legend_font_properties['size']))
if filename:
self.save_figure(filename)
return ax
def save_figure(self, filename: Path):
""" Saves the diagram in every format available in self.filetypes"""
plt.savefig(filename, dpi = self.dpi)
if __name__ == "__main__":
pass
| 2.4375 | 2 |
adfd/parse.py | ADFD/adfd | 0 | 12762140 | <filename>adfd/parse.py
import html
import itertools
import logging
import re
from collections import OrderedDict
from typing import Set, List
from adfd.cnf import NAME
from adfd.process import RE, slugify
log = logging.getLogger(__name__)
class TagOptions:
tagName = None
"""name of the tag - all lowercase"""
newlineCloses = False
"""a newline should automatically close this tag"""
sameTagCloses = False
"""another start of the same tag should close this tag"""
standalone = False
"""this tag does not have a closing tag"""
renderEmbedded = True
"""tags should be rendered inside this tag"""
transformNewlines = True
"""newlines should be converted to markup"""
escapeHtml = True
"""HTML characters (<, >, and &) should be escaped inside this tag"""
replaceLinks = True
"""URLs should be replaced with link markup inside this tag"""
replaceCosmetic = True
"""perform cosmetic replacements (elipses, dashes, etc.) in tag"""
strip = False
"""leading and trailing whitespace should be stripped inside tag"""
swallowTrailingNewline = False
"""tag should swallow first trailing newline (i.e. for block elements)"""
def __init__(self, tagName, **kwargs):
self.tagName = tagName
for attr, value in list(kwargs.items()):
setattr(self, attr, bool(value))
class Token:
"""
type
TAG_START, TAG_END, NEWLINE or DATA
tag
The name of the tag if token_type=TAG_*, otherwise None
options
dict of options specified for TAG_START, otherwise None
text
The original token text
"""
TAG_START = "start"
TAG_END = "end"
NEWLINE = "newline"
DATA = "data"
def __init__(self, *args):
self.type, self.tag, self.options, self.text = args
self.isOpener = self.type == Token.TAG_START
self.isCloser = self.type == Token.TAG_END
self.isHeaderStart = self.isOpener and re.match(r"h\d", self.tag)
self.isQuoteStart = self.isOpener and self.tag == "quote"
self.isQuoteEnd = self.isCloser and self.tag == "quote"
self.isListStart = self.isOpener and self.tag == "list"
self.isListEnd = self.isCloser and self.tag == "list"
self.isMetaStart = self.isOpener and self.tag == "meta"
self.isMetaEnd = self.isCloser and self.tag == "meta"
self.isNewline = self.type == Token.NEWLINE
def __str__(self):
return self.__repr__()
def __repr__(self):
if self.tag:
return "<{}{}>".format("/" if self.isCloser else "", self.tag)
return "<%s>" % self.text
@property
def asTuple(self):
return self.type, self.tag, self.options, self.text
class Parser:
def __init__(
self,
newline="\n",
normalizeNewlines=True,
escapeHtml=True,
replaceLinks=True,
replaceCosmetic=True,
tagOpener="[",
tagCloser="]",
linker=None,
linkerTakesContext=False,
dropUnrecognized=False,
):
self.tagOpener = tagOpener
self.tagCloser = tagCloser
self.newline = newline
self.normalizeNewlines = normalizeNewlines
self.recognizedTags = {}
self.dropUnrecognized = dropUnrecognized
self.escapeHtml = escapeHtml
self.replaceCosmetic = replaceCosmetic
self.replaceLinks = replaceLinks
self.linker = linker
self.linkerTakesContext = linkerTakesContext
def add_formatter(self, tagName, render_func, **kwargs):
"""Install render function for specified tag name.
The render function should have the following signature:
def render(tagName, value, options, parent, context)
The arguments are as follows:
tagName
The name of the tag being rendered.
value
context between start and end tags (None for standalone tags).
Depends on renderEmbedded tag option whether this has
been rendered.
options
A dictionary of options specified on the opening tag.
parent
The parent TagOptions, if the tag is being rendered inside
another tag, otherwise None.
context
The keyword argument dictionary passed into the format call.
"""
options = TagOptions(tagName.strip().lower(), **kwargs)
self.recognizedTags[options.tagName] = (render_func, options)
def add_simple(self, tagName, format_string, **kwargs):
"""Install a formatter.
Takes the tag options dictionary, puts a value key in it
and uses it as a format dictionary to the given format string.
"""
# noinspection PyUnusedLocal
def _render(name, value, options, parent, context):
fmt = {}
if options:
fmt.update(options)
fmt.update({"value": value})
return format_string % fmt
self.add_formatter(tagName, _render, **kwargs)
def _newline_tokenize(self, data):
"""Create a list of NEWLINE and DATA tokens.
If you concatenate their data, you will have the original string.
:type data: str
:returns: list of Token
"""
parts = data.split("\n")
tokens = []
""":type: list of Token"""
for num, part in enumerate(parts):
if part:
tokens.append(Token(*(Token.DATA, None, None, part)))
if num < (len(parts) - 1):
tokens.append(Token(*(Token.NEWLINE, None, None, "\n")))
return tokens
def _parse_opts(self, data):
"""Parse options out of given a tag string.
This function will parse any options and return a tuple of
(tagName, options_dict).
Options may be quoted in order to preserve spaces, and free-standing
options are allowed. The tag name itself may also serve as an option
if it is immediately followed by an equal
sign. Here are some examples:
quote author="<NAME>"
tagName=quote, options={'author': '<NAME>'}
url="http://test.com/s.php?a=bcd efg" popup
tagName=url, options={
'url': 'http://test.com/s.php?a=bcd efg', 'popup': ''}
"""
name = None
opts = OrderedDict()
in_value = False
in_quote = False
attr = ""
value = ""
attr_done = False
for pos, ch in enumerate(data.strip()):
if in_value:
if in_quote:
if ch == in_quote:
in_quote = False
in_value = False
if attr:
opts[attr.lower()] = value.strip()
attr = ""
value = ""
else:
value += ch
else:
if ch in ('"', "'"):
in_quote = ch
elif ch == " " and data.find("=", pos + 1) > 0:
# If there is no = after this, value may accept spaces
opts[attr.lower()] = value.strip()
attr = ""
value = ""
in_value = False
else:
value += ch
else:
if ch == "=":
in_value = True
if name is None:
name = attr
elif ch == " ":
attr_done = True
else:
if attr_done:
if attr:
if name is None:
name = attr
else:
opts[attr.lower()] = ""
attr = ""
attr_done = False
attr += ch
if attr:
if name is None:
name = attr
opts[attr.lower()] = value.strip()
return name.lower(), opts
def _parse_tag(self, tag):
"""
Given a tag string (characters enclosed by []), this function will
parse any options and return a tuple of the form:
(valid, tagName, closer, options)
"""
if (
(not tag.startswith(self.tagOpener))
or (not tag.endswith(self.tagCloser))
or ("\n" in tag)
or ("\r" in tag)
):
return (False, tag, False, None)
tagName = tag[len(self.tagOpener) : -len(self.tagCloser)].strip()
if not tagName:
return (False, tag, False, None)
closer = False
opts = {}
if tagName[0] == "/":
tagName = tagName[1:]
closer = True
# Parse options inside the opening tag, if needed.
if (("=" in tagName) or (" " in tagName)) and not closer:
tagName, opts = self._parse_opts(tagName)
return (True, tagName.strip().lower(), closer, opts)
def _tag_extent(self, data, start):
"""Find extent of a tag.
Accounting for option quoting and new tags starting before the
current one closes.
Returns (found_close, end_pos) where valid is False if another tag
started before this one closed.
"""
in_quote = False
# noinspection PyTypeChecker
for i in range(start + 1, len(data)):
ch = data[i]
if ch in ('"', "'"):
if not in_quote:
in_quote = ch
elif in_quote == ch:
in_quote = False
if not in_quote and data[i : i + len(self.tagOpener)] == self.tagOpener:
return i, False
if not in_quote and data[i : i + len(self.tagCloser)] == self.tagCloser:
return i + len(self.tagCloser), True
return len(data), False
def tokenize(self, data, get_unknowns=False):
"""Create list of tokens from original data
:returns: list of Token
"""
if self.normalizeNewlines:
data = data.replace("\r\n", "\n").replace("\r", "\n")
pos = 0
tokens: List[Token] = []
unknown_tags: Set[str] = set()
while pos < len(data):
start = data.find(self.tagOpener, pos)
if start >= pos:
# Check if there was data between this start and the last end
if start > pos:
tokens.extend(self._newline_tokenize(data[pos:start]))
# noinspection PyUnusedLocal
pos = start
# Find the extent of this tag, if it's ever closed.
end, found_close = self._tag_extent(data, start)
if found_close:
tag = data[start:end]
valid, tagName, closer, opts = self._parse_tag(tag)
# Make sure this is a well-formed, recognized tag,
# otherwise it's just data
if valid and tagName in self.recognizedTags:
if closer:
args = (Token.TAG_END, tagName, None, tag)
tokens.append(Token(*args))
else:
args = (Token.TAG_START, tagName, opts, tag)
tokens.append(Token(*args))
elif valid and tagName not in self.recognizedTags:
# If we found a valid (but unrecognized) tag and
# self.dropUnrecognized is True, just drop it
unknown_tags.add(tagName)
if not self.dropUnrecognized:
tokens.extend(self._newline_tokenize(tag))
else:
# We didn't find a closing tag, tack it on as text.
tokens.extend(self._newline_tokenize(data[start:end]))
pos = end
else:
# No more tags left to parse.
break
if pos < len(data):
tokens.extend(self._newline_tokenize(data[pos:]))
if get_unknowns:
return unknown_tags
return tokens
def _find_closer(self, tag, tokens, pos):
"""Find position of closing token.
Given the current tag options, a list of tokens, and the current
position in the token list, this function will find the position of the
closing token associated with the specified tag. This may be a closing
tag, a newline, or simply the end of the list (to ensure tags are
closed). This function should return a tuple of the form (end_pos,
consume), where consume should indicate whether the ending token
should be consumed or not.
"""
embedCount = 0
blockCount = 0
while pos < len(tokens):
token = tokens[pos]
""":type: Token"""
if tag.newlineCloses and token.type in (Token.TAG_START, Token.TAG_END):
# If we're finding the closing token for a tag that is
# closed by newlines, but there is an embedded tag that
# doesn't transform newlines (i.e. a code tag that keeps
# newlines intact), we need to skip over that.
innerTag = self.recognizedTags[token.tag][1]
if not innerTag.transformNewlines:
if token.type == Token.TAG_START:
blockCount += 1
else:
blockCount -= 1
if token.type == Token.NEWLINE and tag.newlineCloses and blockCount == 0:
# If for some crazy reason there are embedded tags that
# both close on newline, the first newline will automatically
# close all those nested tags.
return pos, True
elif token.type == Token.TAG_START and token.tag == tag.tagName:
if tag.sameTagCloses:
return pos, False
if tag.renderEmbedded:
embedCount += 1
elif token.type == Token.TAG_END and token.tag == tag.tagName:
if embedCount > 0:
embedCount -= 1
else:
return pos, True
pos += 1
return (pos, True)
def _link_replace(self, match, **context):
"""Callback for re.sub to replace link text with markup.
Turns out using a callback function is actually faster than using
backrefs, plus this lets us provide a hook for user customization.
linkerTakesContext=True means that the linker gets passed context
like a standard format function.
"""
url = match.group(0)
if self.linker:
if self.linkerTakesContext:
return self.linker(url, context)
else:
return self.linker(url)
else:
href = url
if "://" not in href:
href = "http://" + href
# Escape quotes to avoid XSS, let the browser escape the rest.
return '<a href="{}">{}</a>'.format(href.replace('"', "%22"), url)
def _transform(self, tokens, escapeHtml, replaceLinks, replaceCosmetic, **context):
"""Transforms the input string based on the options specified.
Takes into account if option is enabled globally for this parser.
"""
text = "".join([t.text for t in tokens])
urlMatches = {}
if self.replaceLinks and replaceLinks:
# If we're replacing links in the text (i.e. not those in [url]
# tags) then we need to be careful to pull them out before doing
# any escaping or cosmetic replacement.
pos = 0
while True:
match = RE.URL.search(text, pos)
if not match:
break
# Replace any link with a token that we can substitute back
# in after replacements.
token = "{{ bbcode-link-%s }}" % len(urlMatches)
urlMatches[token] = self._link_replace(match, **context)
# noinspection PyUnresolvedReferences
start, end = match.span()
text = text[:start] + token + text[end:]
# To be perfectly accurate, this should probably be
# len(text[:start] + token), but start will work, because the
# token itself won't match as a URL.
pos = start
if self.escapeHtml and escapeHtml:
text = Replacer.replace(text, Replacer.HTML_ESCAPE)
if self.replaceCosmetic and replaceCosmetic:
text = Replacer.replace(text, Replacer.COSMETIC)
# Now put the replaced links back in the text.
for token, replacement in urlMatches.items():
text = text.replace(token, replacement)
return text
def _format_tokens(self, tokens, parent, **context):
out = []
idx = 0
while idx < len(tokens):
token = tokens[idx]
""":type: Token"""
if token.type == Token.TAG_START:
fn, tag = self.recognizedTags[token.tag]
if tag.standalone:
ret = fn(token.tag, None, token.options, parent, context)
out.append(ret)
else:
# First, find the extent of this tag's tokens.
# noinspection PyTypeChecker
end, consume = self._find_closer(tag, tokens, idx + 1)
subtokens = tokens[idx + 1 : end]
# If the end tag should not be consumed, back up one
# (after grabbing the subtokens).
if not consume:
end -= 1
if tag.renderEmbedded:
# This tag renders embedded tags, simply recurse.
inner = self._format_tokens(subtokens, tag, **context)
else:
# Otherwise, just concatenate all the token text.
inner = self._transform(
subtokens,
tag.escapeHtml,
tag.replaceLinks,
tag.replaceCosmetic,
**context,
)
# Strip and replace newlines, if necessary.
if tag.strip:
inner = inner.strip()
if tag.transformNewlines:
inner = inner.replace("\n", self.newline)
# Append the rendered contents.
ret = fn(token.tag, inner, token.options, parent, context)
out.append(ret)
# If the tag should swallow the first trailing newline,
# check the token after the closing token.
if tag.swallowTrailingNewline:
nextPos = end + 1
if (
nextPos < len(tokens)
and tokens[nextPos].type == Token.NEWLINE
):
end = nextPos
# Skip to the end tag.
idx = end
elif token.type == Token.NEWLINE:
# If this is a top-level newline, replace it. Otherwise,
# it will be replaced (if necessary) by the code above.
out.append(self.newline if parent is None else token.text)
elif token.type == Token.DATA:
escape = self.escapeHtml if parent is None else parent.escapeHtml
links = self.replaceLinks if parent is None else parent.replaceLinks
cosmetic = (
self.replaceCosmetic if parent is None else parent.replaceCosmetic
)
ret = self._transform([token], escape, links, cosmetic, **context)
out.append(ret)
idx += 1
return "".join(out)
def strip(self, data, strip_newlines=False):
"""Strip out any tags from the input text.
Using the same tokenization as the formatter.
"""
text = []
for token in self.tokenize(data):
if token.type == Token.DATA:
text.append(token.text)
elif token.type == Token.NEWLINE and not strip_newlines:
text.append(token.text)
return "".join(text)
class Chunk:
"""Forms token groups to fix missing formatting in forum articles"""
HEADER = "header"
PARAGRAPH = "paragraph"
QUOTE = "quote"
LIST = "list"
META = "meta"
TYPES = [HEADER, PARAGRAPH, QUOTE, LIST, META]
def __init__(self, tokens, chunkType):
"""
:type tokens: list Token
:param chunkType: one of Chunk.TYPES
"""
self.tokens = tokens
self.chunkType = chunkType
self.clean()
self.modify()
def __repr__(self):
return " ".join([str(c) for c in self.tokens])
def clean(self):
"""remove newlines at beginning and end of chunk"""
for idx in [0, -1]:
try:
while self.tokens[idx].isNewline:
self.tokens.pop(idx)
except IndexError:
pass
def modify(self):
"""This innocent method is the reason why we have chunks"""
if self.isEmpty:
return
if self.chunkType == self.PARAGRAPH:
startToken = Token(Token.TAG_START, "p", None, "[p]")
endToken = Token(Token.TAG_END, "p", None, "[/p]")
self.tokens.insert(0, startToken)
self.tokens.append(endToken)
@property
def isEmpty(self):
if not self.tokens:
return True
for token in self.tokens:
if not token.isNewline:
return False
class Chunkman:
"""create chunks specific to forum articles for preparation"""
def __init__(self, tokens):
"""
:type tokens: list of Token
"""
self.tokens = tokens
self._chunks = []
@property
def flattened(self):
return list(itertools.chain(*[chunk.tokens for chunk in self.chunks]))
@property
def chunks(self):
"""article chunks which can be converted individually
:rtype: list of list of TransformableChunk
"""
currentTokens = []
idx = 0
while idx < len(self.tokens):
token = self.tokens[idx]
if token.isHeaderStart:
currentTokens = self.flush(currentTokens)
newIdx = idx + 3
self.flush(self.tokens[idx:newIdx], Chunk.HEADER)
idx = newIdx
continue
if token.isQuoteStart:
self.flush(currentTokens)
sIdx = idx
while not token.isQuoteEnd:
idx += 1
token = self.tokens[idx]
idx += 1
currentTokens = self.flush(self.tokens[sIdx:idx], Chunk.QUOTE)
continue
if token.isListStart:
self.flush(currentTokens)
sIdx = idx
while not token.isListEnd:
idx += 1
token = self.tokens[idx]
idx += 1
currentTokens = self.flush(self.tokens[sIdx:idx], Chunk.LIST)
continue
if token.isMetaStart:
self.flush(currentTokens)
sIdx = idx
while not token.isMetaEnd:
idx += 1
token = self.tokens[idx]
idx += 1
currentTokens = self.flush(self.tokens[sIdx:idx], Chunk.META)
continue
if self.is_block_change(self.tokens, idx):
currentTokens = self.flush(currentTokens)
idx += 1
continue
currentTokens.append(token)
idx += 1
self.flush(currentTokens)
return self._chunks
def flush(self, tokens, chunkType=Chunk.PARAGRAPH):
"""append cleaned tokens and return a fresh (empty) list"""
chunk = Chunk(tokens, chunkType)
if not chunk.isEmpty:
self._chunks.append(chunk)
return []
def is_block_change(self, tokens, idx):
try:
nextToken = tokens[idx + 1]
return tokens[idx].isNewline and nextToken.isNewline
except IndexError:
pass
class AdfdParser(Parser):
ORPHAN_MATCHER = re.compile(r"^<p></p>")
HEADER_TAGS = ["h%s" % i for i in range(1, 6)]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._add_formatters()
def to_html(self, data=None, tokens=None, **context):
"""context will be passed along to the render functions"""
if data:
assert not tokens, tokens
rawTokens = self.tokenize(data)
tokens = Chunkman(rawTokens).flattened
assert tokens
_html = self._format_tokens(tokens, parent=None, **context).strip()
return self.cleanup(_html)
def cleanup(self, text):
out = []
for line in text.split("\n"):
if not line.strip():
continue
if not re.match(self.ORPHAN_MATCHER, line):
out.append(line)
return "\n".join(out)
def _add_formatters(self):
self.add_simple(
"code",
"<pre><code>%(value)s</code></pre>\n",
renderEmbedded=False,
transformNewlines=False,
swallowTrailingNewline=True,
)
self.add_simple("em", "<em>%(value)s</em>")
self.add_simple("strong", "<strong>%(value)s</strong>")
self._add_bbvideo_formatter()
self._add_header_formatters()
self._add_img_formatter()
self._add_list_formatter()
self._add_mod_formatter()
self._add_attachment_formatter()
self._add_quote_formatter()
self._add_raw_formatter()
self._add_meta_formatter()
self._add_url_formatter()
self.add_simple("p", "<p>%(value)s</p>\n")
"""intermittent helper for paragraphs"""
# FIXME remove them, when articles are all updated to semantic formatting
self._add_unsemantic_formatters()
def _add_unsemantic_formatters(self):
self.add_simple("b", "<strong>%(value)s</strong>")
self.add_simple("br", "<br>\n", standalone=True)
self.add_simple("center", '<div style="text-align:center;">%(value)s</div>\n')
self.add_simple("hr", "<hr>\n", standalone=True)
self.add_simple("i", "<em>%(value)s</em>")
self.add_simple("s", "<strike>%(value)s</strike>")
self.add_simple(
"u", '<span style="text-decoration: underline;">%(value)s</span>'
)
self.add_simple("sub", "<sub>%(value)s</sub>")
self.add_simple("sup", "<sup>%(value)s</sup>")
self._add_color_formatter()
def _add_bbvideo_formatter(self):
self.add_formatter(
"BBvideo", self._render_bbvideo, replaceLinks=False, replaceCosmetic=False
)
# noinspection PyUnusedLocal
@staticmethod
def _render_bbvideo(name, value, options, parent, context):
width, height = options["bbvideo"].strip().split(",")
dataMap = {"width": width, "height": height, "url": value}
return (
'<a href="%(url)s" class="bbvideo" '
'data-bbvideo="%(width)s,%(height)s" '
'target="_blank">%(url)s</a>' % dataMap
)
def _add_color_formatter(self):
self.add_formatter("color", self._render_color)
# noinspection PyUnusedLocal
@staticmethod
def _render_color(name, value, options, parent, context):
if "color" in options:
color = options["color"].strip()
elif options:
color = list(options.keys())[0].strip()
else:
return value
match = re.match(r"^([a-z]+)|^(#[a-f0-9]{3,6})", color, re.I)
color = match.group() if match else "inherit"
return f'<span style="color:{color};">{value}</span>'
def _add_mod_formatter(self):
self.add_formatter("mod", self._render_mod)
# noinspection PyUnusedLocal
@staticmethod
def _render_mod(name, value, options, parent, context):
if "mod" in options:
name = options["mod"].strip()
elif options:
name = list(options.keys())[0].strip()
else:
return value
match = re.match(r"^([a-z]+)|^(#[a-f0-9]{3,6})", name, re.I)
name = match.group() if match else "inherit"
return f'<div style="background: orange;">[{name}] {value}</div>'
def _add_img_formatter(self):
self.add_formatter(
"img", self._render_img, replaceLinks=False, replaceCosmetic=False
)
# noinspection PyUnusedLocal
@staticmethod
def _render_img(name, value, options, parent, context):
href = value
# Only add http:// if it looks like it starts with a domain name.
if "://" not in href and RE.DOMAIN.match(href):
href = "http://" + href
return '<img src="%s">' % (href.replace('"', "%22"))
def _add_attachment_formatter(self):
self.add_formatter(
"attachment",
self._render_attachment,
replaceLinks=False,
replaceCosmetic=False,
)
# noinspection PyUnusedLocal
@staticmethod
def _render_attachment(name, value, options, parent, context):
"""Assumes that the file names are unique across complete website.
Anything else would mean completely different handling for this.
Possibilities would be putting the images in topic folders or
pre-processing bbcode and replace real_filename with physical_filename.
Currently also only images are supported.
"""
return f'<img src="/{NAME.STATIC}/{NAME.ATTACHMENTS}/{value}">'
def _add_list_formatter(self):
self.add_formatter(
"list",
self._render_list,
transformNewlines=False,
strip=True,
swallowTrailingNewline=True,
)
# Make sure transformNewlines = False for [*], so [code]
# tags can be embedded without transformation.
self.add_simple(
"*",
"<li>%(value)s</li>",
newlineCloses=True,
transformNewlines=False,
sameTagCloses=True,
strip=True,
)
# noinspection PyUnusedLocal
@staticmethod
def _render_list(name, value, options, parent, context):
listType = options["list"] if (options and "list" in options) else "*"
cssOpts = {
"1": "decimal",
"01": "decimal-leading-zero",
"a": "lower-alpha",
"A": "upper-alpha",
"i": "lower-roman",
"I": "upper-roman",
}
tag = "ol" if listType in cssOpts else "ul"
css = (
' style="list-style-type:%s;"' % cssOpts[listType]
if listType in cssOpts
else ""
)
return f"<{tag}{css}>{value}</{tag}>\n"
def _add_header_formatters(self):
for tag in self.HEADER_TAGS:
self.add_formatter(tag, self._render_header)
@staticmethod
def _render_header(tag, value, options, parent, context):
demotionLevel = 1 # number of levels header tags get demoted
level = int(tag[1]) + demotionLevel
slug = slugify(value)
r = f'<h{level} id="{slug}">'
r += f'<a class="header" href="#{slug}">{value}'
# r += ' <i class="paragraph icon"></i>'
r += "</a></h%s>" % level
return r
def _add_quote_formatter(self):
self.add_formatter(
"quote",
self._render_quote,
transformNewlines=False,
strip=True,
swallowTrailingNewline=True,
)
# noinspection PyUnusedLocal
@staticmethod
def _render_quote(name, value, options, parent, context):
author = options["quote"] if (options and "quote" in options) else ""
if author:
cite = (
'<div class="ui inverted secondary segment">'
'<i class="comment outline icon"></i>%s</div>' % author
)
else:
cite = ""
value = value.replace("\n", "<br>")
return f'<div class="ui raised segment">{value}{cite}</div>\n'
def _add_raw_formatter(self):
self.add_formatter(
"raw", self._render_raw, replaceLinks=False, replaceCosmetic=False
)
# noinspection PyUnusedLocal
def _render_raw(self, name, value, options, parent, context):
return html.unescape(value)
def _add_meta_formatter(self):
self.add_formatter(
"meta", self._render_meta, replaceLinks=False, replaceCosmetic=False
)
# noinspection PyUnusedLocal
@staticmethod
def _render_meta(name, value, options, parent, context):
return f'<div style="display: none;">{value}</div>\n'
def _add_url_formatter(self):
self.add_formatter(
"url", self._render_url, replaceLinks=False, replaceCosmetic=False
)
# noinspection PyUnusedLocal
@staticmethod
def _render_url(name, value, options, parent, context):
href = options["url"] if options and "url" in options else value
if "://" not in href and RE.DOMAIN.match(href):
href = "http://" + href
# Completely ignore javascript: and data: "links".
if re.sub(r"[^a-z0-9+]", "", href.lower().split(":", 1)[0]) in (
"javascript",
"data",
"vbscript",
):
return ""
if "<" in href or ">" in href:
return ""
return '<a href="{}">{}</a>'.format(href.replace('"', "%22"), value)
class Replacer:
HTML_ESCAPE = (
("&", "&"),
("<", "<"),
(">", ">"),
('"', """),
("'", "'"),
)
COSMETIC = (
("---", "—"),
("--", "–"),
("...", "…"),
("(c)", "©"),
("(reg)", "®"),
("(tm)", "™"),
)
@staticmethod
def replace(data, replacements):
"""
Given a list of 2-tuples (find, repl) this function performs all
replacements on the input and returns the result.
"""
for find, repl in replacements:
data = data.replace(find, repl)
return data
ADFD_PARSER = AdfdParser()
if __name__ == "__main__":
print(ADFD_PARSER)
| 2.625 | 3 |
source/pkgsrc/devel/py-rlp/patches/patch-setup.py | Scottx86-64/dotfiles-1 | 1 | 12762141 | $NetBSD: patch-setup.py,v 1.1 2021/04/11 16:59:36 wiz Exp $
setuptools-markdown is deprecated for functionality included in setuptools.
--- setup.py.orig 2020-11-23 15:09:47.000000000 +0000
+++ setup.py
@@ -52,7 +52,7 @@ setup(
url='https://github.com/ethereum/pyrlp',
packages=find_packages(exclude=["tests", "tests.*"]),
include_package_data=True,
- setup_requires=['setuptools-markdown'],
+ setup_requires=[],
install_requires=[
"eth-utils>=1.0.2,<2",
],
| 0.832031 | 1 |
byceps/services/shop/order/actions/award_badge.py | GyBraLAN/byceps | 0 | 12762142 | <filename>byceps/services/shop/order/actions/award_badge.py
"""
byceps.services.shop.order.actions.award_badge
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from .....typing import UserID
from ....user_badge import awarding_service, badge_service
from ....user_badge.transfer.models import BadgeAwarding
from ...article.transfer.models import ArticleNumber
from .. import log_service
from ..transfer.action import ActionParameters
from ..transfer.order import Order, OrderID
def award_badge(
order: Order,
article_number: ArticleNumber,
quantity: int,
initiator_id: UserID,
parameters: ActionParameters,
) -> None:
"""Award badge to user."""
badge = badge_service.get_badge(parameters['badge_id'])
user_id = order.placed_by_id
for _ in range(quantity):
awarding, _ = awarding_service.award_badge_to_user(badge.id, user_id)
_create_order_log_entry(order.id, awarding)
def _create_order_log_entry(order_id: OrderID, awarding: BadgeAwarding) -> None:
event_type = 'badge-awarded'
data = {
'awarding_id': str(awarding.id),
'badge_id': str(awarding.badge_id),
'recipient_id': str(awarding.user_id),
}
log_service.create_entry(event_type, order_id, data)
| 1.820313 | 2 |
src/gcj/__init__.py | shang-lin/gcj | 0 | 12762143 | from .codejam import CodeJam
from .utils import CodeJamUtils
__all__ = ['CodeJam', 'CodeJamUtils'] | 1.046875 | 1 |
psx/_dump_/6/_dump_ida_/overlay_c/set_funcs.py | maoa3/scalpel | 15 | 12762144 | <filename>psx/_dump_/6/_dump_ida_/overlay_c/set_funcs.py
del_items(0x801234F4)
SetType(0x801234F4, "void GameOnlyTestRoutine__Fv()")
del_items(0x801234FC)
SetType(0x801234FC, "int vecleny__Fii(int a, int b)")
del_items(0x80123520)
SetType(0x80123520, "int veclenx__Fii(int a, int b)")
del_items(0x8012354C)
SetType(0x8012354C, "void GetDamageAmt__FiPiT1(int i, int *mind, int *maxd)")
del_items(0x80123B44)
SetType(0x80123B44, "int CheckBlock__Fiiii(int fx, int fy, int tx, int ty)")
del_items(0x80123C2C)
SetType(0x80123C2C, "int FindClosest__Fiii(int sx, int sy, int rad)")
del_items(0x80123DC8)
SetType(0x80123DC8, "int GetSpellLevel__Fii(int id, int sn)")
del_items(0x80123E3C)
SetType(0x80123E3C, "int GetDirection8__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80124058)
SetType(0x80124058, "void DeleteMissile__Fii(int mi, int i)")
del_items(0x801240B0)
SetType(0x801240B0, "void GetMissileVel__Fiiiiii(int i, int sx, int sy, int dx, int dy, int v)")
del_items(0x8012420C)
SetType(0x8012420C, "void PutMissile__Fi(int i)")
del_items(0x80124310)
SetType(0x80124310, "void GetMissilePos__Fi(int i)")
del_items(0x80124438)
SetType(0x80124438, "void MoveMissilePos__Fi(int i)")
del_items(0x801245A0)
SetType(0x801245A0, "unsigned char MonsterTrapHit__FiiiiiUc(int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x80124914)
SetType(0x80124914, "unsigned char MonsterMHit__FiiiiiiUc(int pnum, int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x80125074)
SetType(0x80125074, "unsigned char PlayerMHit__FiiiiiiUcUc(int pnum, int m, int dist, int mind, int maxd, int mtype, int shift, int earflag)")
del_items(0x80125AE0)
SetType(0x80125AE0, "unsigned char Plr2PlrMHit__FiiiiiiUc(int pnum, int p, int mindam, int maxdam, int dist, int mtype, int shift)")
del_items(0x801262BC)
SetType(0x801262BC, "void CheckMissileCol__FiiiUciiUc(int i, int mindam, int maxdam, unsigned char shift, int mx, int my, int nodel)")
del_items(0x801269FC)
SetType(0x801269FC, "unsigned char GetTableValue__FUci(unsigned char code, int dir)")
del_items(0x80126A90)
SetType(0x80126A90, "void SetMissAnim__Fii(int mi, int animtype)")
del_items(0x80126B60)
SetType(0x80126B60, "void SetMissDir__Fii(int mi, int dir)")
del_items(0x80126BA4)
SetType(0x80126BA4, "void AddLArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80126D84)
SetType(0x80126D84, "void AddArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80126F40)
SetType(0x80126F40, "void GetVileMissPos__Fiii(int mi, int dx, int dy)")
del_items(0x80127064)
SetType(0x80127064, "void AddRndTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801273D4)
SetType(0x801273D4, "void AddFirebolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x80127640)
SetType(0x80127640, "void AddMagmaball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80127754)
SetType(0x80127754, "void AddTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012794C)
SetType(0x8012794C, "void AddLightball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80127AA0)
SetType(0x80127AA0, "void AddFirewall__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80127C88)
SetType(0x80127C88, "void AddFireball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80127EE4)
SetType(0x80127EE4, "void AddLightctrl__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80127FCC)
SetType(0x80127FCC, "void AddLightning__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80128194)
SetType(0x80128194, "void AddMisexp__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801283A0)
SetType(0x801283A0, "unsigned char CheckIfTrig__Fii(int x, int y)")
del_items(0x80128484)
SetType(0x80128484, "void AddTown__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801288A8)
SetType(0x801288A8, "void AddFlash__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80128AB8)
SetType(0x80128AB8, "void AddFlash2__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80128CAC)
SetType(0x80128CAC, "void AddManashield__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80128D74)
SetType(0x80128D74, "void AddFiremove__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80128ED0)
SetType(0x80128ED0, "void AddGuardian__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012933C)
SetType(0x8012933C, "void AddChain__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80129398)
SetType(0x80129398, "void AddRhino__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80129554)
SetType(0x80129554, "void AddFlare__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012984C)
SetType(0x8012984C, "void AddAcid__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80129950)
SetType(0x80129950, "void AddAcidpud__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80129A28)
SetType(0x80129A28, "void AddStone__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80129D20)
SetType(0x80129D20, "void AddGolem__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80129ED8)
SetType(0x80129ED8, "void AddBoom__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80129F6C)
SetType(0x80129F6C, "void AddHeal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012A194)
SetType(0x8012A194, "void AddHealOther__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012A1FC)
SetType(0x8012A1FC, "void AddElement__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012A428)
SetType(0x8012A428, "void AddIdentify__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012A4D8)
SetType(0x8012A4D8, "void AddFirewallC__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012A788)
SetType(0x8012A788, "void AddInfra__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012A884)
SetType(0x8012A884, "void AddWave__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012A908)
SetType(0x8012A908, "void AddNova__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012AB20)
SetType(0x8012AB20, "void AddRepair__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012ABD0)
SetType(0x8012ABD0, "void AddRecharge__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012AC80)
SetType(0x8012AC80, "void AddDisarm__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012ACE8)
SetType(0x8012ACE8, "void AddApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012AF24)
SetType(0x8012AF24, "void AddFlame__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int seqno)")
del_items(0x8012B140)
SetType(0x8012B140, "void AddFlamec__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012B230)
SetType(0x8012B230, "void AddCbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x8012B424)
SetType(0x8012B424, "void AddHbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x8012B5E4)
SetType(0x8012B5E4, "void AddResurrect__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012B658)
SetType(0x8012B658, "void AddResurrectBeam__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012B6E0)
SetType(0x8012B6E0, "void AddTelekinesis__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012B748)
SetType(0x8012B748, "void AddBoneSpirit__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012B944)
SetType(0x8012B944, "void AddRportal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012B9E4)
SetType(0x8012B9E4, "void AddDiabApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012BB20)
SetType(0x8012BB20, "int AddMissile__Fiiiiiiciii(int sx, int sy, int v1, int v2, int midir, int mitype, int micaster, int id, int v3, int spllvl)")
del_items(0x8012BE6C)
SetType(0x8012BE6C, "int Sentfire__Fiii(int i, int sx, int sy)")
del_items(0x8012C050)
SetType(0x8012C050, "void MI_Dummy__Fi(int i)")
del_items(0x8012C058)
SetType(0x8012C058, "void MI_Golem__Fi(int i)")
del_items(0x8012C2B4)
SetType(0x8012C2B4, "void MI_SetManashield__Fi(int i)")
del_items(0x8012C2F0)
SetType(0x8012C2F0, "void MI_LArrow__Fi(int i)")
del_items(0x8012CAAC)
SetType(0x8012CAAC, "void MI_Arrow__Fi(int i)")
del_items(0x8012CCC8)
SetType(0x8012CCC8, "void MI_Firebolt__Fi(int i)")
del_items(0x8012D394)
SetType(0x8012D394, "void MI_Lightball__Fi(int i)")
del_items(0x8012D61C)
SetType(0x8012D61C, "void MI_Acidpud__Fi(int i)")
del_items(0x8012D72C)
SetType(0x8012D72C, "void MI_Firewall__Fi(int i)")
del_items(0x8012D9F0)
SetType(0x8012D9F0, "void MI_Fireball__Fi(int i)")
del_items(0x8012E3B4)
SetType(0x8012E3B4, "void MI_Lightctrl__Fi(int i)")
del_items(0x8012E920)
SetType(0x8012E920, "void MI_Lightning__Fi(int i)")
del_items(0x8012EA9C)
SetType(0x8012EA9C, "void MI_Town__Fi(int i)")
del_items(0x8012ED40)
SetType(0x8012ED40, "void MI_Flash__Fi(int i)")
del_items(0x8012F178)
SetType(0x8012F178, "void MI_Flash2__Fi(int i)")
del_items(0x8012F3C0)
SetType(0x8012F3C0, "void MI_Manashield__Fi(int i)")
del_items(0x8012F9C8)
SetType(0x8012F9C8, "void MI_Firemove__Fi(int i)")
del_items(0x8012FE04)
SetType(0x8012FE04, "void MI_Guardian__Fi(int i)")
del_items(0x801301D0)
SetType(0x801301D0, "void MI_Chain__Fi(int i)")
del_items(0x801304CC)
SetType(0x801304CC, "void MI_Misexp__Fi(int i)")
del_items(0x801307CC)
SetType(0x801307CC, "void MI_Acidsplat__Fi(int i)")
del_items(0x80130968)
SetType(0x80130968, "void MI_Teleport__Fi(int i)")
del_items(0x80130D30)
SetType(0x80130D30, "void MI_Stone__Fi(int i)")
del_items(0x80130EDC)
SetType(0x80130EDC, "void MI_Boom__Fi(int i)")
del_items(0x80130FD4)
SetType(0x80130FD4, "void MI_Rhino__Fi(int i)")
del_items(0x80131380)
SetType(0x80131380, "void MI_FirewallC__Fi(int i)")
del_items(0x801316E8)
SetType(0x801316E8, "void MI_Infra__Fi(int i)")
del_items(0x801317A0)
SetType(0x801317A0, "void MI_Apoca__Fi(int i)")
del_items(0x80131A34)
SetType(0x80131A34, "void MI_Wave__Fi(int i)")
del_items(0x80131F30)
SetType(0x80131F30, "void MI_Nova__Fi(int i)")
del_items(0x801321F0)
SetType(0x801321F0, "void MI_Flame__Fi(int i)")
del_items(0x801323E8)
SetType(0x801323E8, "void MI_Flamec__Fi(int i)")
del_items(0x80132670)
SetType(0x80132670, "void MI_Cbolt__Fi(int i)")
del_items(0x80132974)
SetType(0x80132974, "void MI_Hbolt__Fi(int i)")
del_items(0x80132C80)
SetType(0x80132C80, "void MI_Element__Fi(int i)")
del_items(0x80133338)
SetType(0x80133338, "void MI_Bonespirit__Fi(int i)")
del_items(0x80133740)
SetType(0x80133740, "void MI_ResurrectBeam__Fi(int i)")
del_items(0x801337B0)
SetType(0x801337B0, "void MI_Rportal__Fi(int i)")
del_items(0x801339D4)
SetType(0x801339D4, "void ProcessMissiles__Fv()")
del_items(0x80133DC8)
SetType(0x80133DC8, "void ClearMissileSpot__Fi(int mi)")
del_items(0x80133E80)
SetType(0x80133E80, "void MoveToScrollTarget__7CBlocks(struct CBlocks *this)")
del_items(0x80133E94)
SetType(0x80133E94, "void MonstPartJump__Fi(int m)")
del_items(0x80134028)
SetType(0x80134028, "void DeleteMonster__Fi(int i)")
del_items(0x80134060)
SetType(0x80134060, "int M_GetDir__Fi(int i)")
del_items(0x801340BC)
SetType(0x801340BC, "void M_StartDelay__Fii(int i, int len)")
del_items(0x80134104)
SetType(0x80134104, "void M_StartRAttack__Fiii(int i, int missile_type, int dam)")
del_items(0x8013421C)
SetType(0x8013421C, "void M_StartRSpAttack__Fiii(int i, int missile_type, int dam)")
del_items(0x80134340)
SetType(0x80134340, "void M_StartSpAttack__Fi(int i)")
del_items(0x80134428)
SetType(0x80134428, "void M_StartEat__Fi(int i)")
del_items(0x801344F8)
SetType(0x801344F8, "void M_GetKnockback__Fi(int i)")
del_items(0x801346D0)
SetType(0x801346D0, "void M_StartHit__Fiii(int i, int pnum, int dam)")
del_items(0x801349C8)
SetType(0x801349C8, "void M_DiabloDeath__FiUc(int i, unsigned char sendmsg)")
del_items(0x80134CEC)
SetType(0x80134CEC, "void M2MStartHit__Fiii(int mid, int i, int dam)")
del_items(0x80134F98)
SetType(0x80134F98, "void MonstStartKill__FiiUc(int i, int pnum, unsigned char sendmsg)")
del_items(0x8013526C)
SetType(0x8013526C, "void M2MStartKill__Fii(int i, int mid)")
del_items(0x80135634)
SetType(0x80135634, "void M_StartKill__Fii(int i, int pnum)")
del_items(0x80135724)
SetType(0x80135724, "void M_StartFadein__FiiUc(int i, int md, unsigned char backwards)")
del_items(0x80135878)
SetType(0x80135878, "void M_StartFadeout__FiiUc(int i, int md, unsigned char backwards)")
del_items(0x801359C0)
SetType(0x801359C0, "void M_StartHeal__Fi(int i)")
del_items(0x80135A40)
SetType(0x80135A40, "void M_ChangeLightOffset__Fi(int monst)")
del_items(0x80135AE0)
SetType(0x80135AE0, "int M_DoStand__Fi(int i)")
del_items(0x80135B48)
SetType(0x80135B48, "int M_DoWalk__Fi(int i)")
del_items(0x80135DCC)
SetType(0x80135DCC, "int M_DoWalk2__Fi(int i)")
del_items(0x80135FB8)
SetType(0x80135FB8, "int M_DoWalk3__Fi(int i)")
del_items(0x8013627C)
SetType(0x8013627C, "void M_TryM2MHit__Fiiiii(int i, int mid, int hper, int mind, int maxd)")
del_items(0x80136444)
SetType(0x80136444, "void M_TryH2HHit__Fiiiii(int i, int pnum, int Hit, int MinDam, int MaxDam)")
del_items(0x80136A60)
SetType(0x80136A60, "int M_DoAttack__Fi(int i)")
del_items(0x80136C04)
SetType(0x80136C04, "int M_DoRAttack__Fi(int i)")
del_items(0x80136D7C)
SetType(0x80136D7C, "int M_DoRSpAttack__Fi(int i)")
del_items(0x80136F6C)
SetType(0x80136F6C, "int M_DoSAttack__Fi(int i)")
del_items(0x80137040)
SetType(0x80137040, "int M_DoFadein__Fi(int i)")
del_items(0x80137110)
SetType(0x80137110, "int M_DoFadeout__Fi(int i)")
del_items(0x80137224)
SetType(0x80137224, "int M_DoHeal__Fi(int i)")
del_items(0x801372D0)
SetType(0x801372D0, "int M_DoTalk__Fi(int i)")
del_items(0x8013773C)
SetType(0x8013773C, "void M_Teleport__Fi(int i)")
del_items(0x80137970)
SetType(0x80137970, "int M_DoGotHit__Fi(int i)")
del_items(0x801379D0)
SetType(0x801379D0, "void DoEnding__Fv()")
del_items(0x80137A8C)
SetType(0x80137A8C, "void PrepDoEnding__Fv()")
del_items(0x80137BB0)
SetType(0x80137BB0, "int M_DoDeath__Fi(int i)")
del_items(0x80137D80)
SetType(0x80137D80, "int M_DoSpStand__Fi(int i)")
del_items(0x80137E24)
SetType(0x80137E24, "int M_DoDelay__Fi(int i)")
del_items(0x80137F14)
SetType(0x80137F14, "int M_DoStone__Fi(int i)")
del_items(0x80137F98)
SetType(0x80137F98, "void M_WalkDir__Fii(int i, int md)")
del_items(0x801381C0)
SetType(0x801381C0, "void GroupUnity__Fi(int i)")
del_items(0x801385AC)
SetType(0x801385AC, "unsigned char M_CallWalk__Fii(int i, int md)")
del_items(0x80138798)
SetType(0x80138798, "unsigned char M_PathWalk__Fi(int i, char plr2monst[9], unsigned char (*Check)())")
del_items(0x8013885C)
SetType(0x8013885C, "unsigned char M_CallWalk2__Fii(int i, int md)")
del_items(0x80138970)
SetType(0x80138970, "unsigned char M_DumbWalk__Fii(int i, int md)")
del_items(0x801389C4)
SetType(0x801389C4, "unsigned char M_RoundWalk__FiiRi(int i, int md, int *dir)")
del_items(0x80138B64)
SetType(0x80138B64, "void MAI_Zombie__Fi(int i)")
del_items(0x80138D5C)
SetType(0x80138D5C, "void MAI_SkelSd__Fi(int i)")
del_items(0x80138EF4)
SetType(0x80138EF4, "void MAI_Snake__Fi(int i)")
del_items(0x801392D8)
SetType(0x801392D8, "void MAI_Bat__Fi(int i)")
del_items(0x80139690)
SetType(0x80139690, "void MAI_SkelBow__Fi(int i)")
del_items(0x80139874)
SetType(0x80139874, "void MAI_Fat__Fi(int i)")
del_items(0x80139A24)
SetType(0x80139A24, "void MAI_Sneak__Fi(int i)")
del_items(0x80139E10)
SetType(0x80139E10, "void MAI_Fireman__Fi(int i)")
del_items(0x8013A108)
SetType(0x8013A108, "void MAI_Fallen__Fi(int i)")
del_items(0x8013A424)
SetType(0x8013A424, "void MAI_Cleaver__Fi(int i)")
del_items(0x8013A50C)
SetType(0x8013A50C, "void MAI_Round__FiUc(int i, unsigned char special)")
del_items(0x8013A978)
SetType(0x8013A978, "void MAI_GoatMc__Fi(int i)")
del_items(0x8013A998)
SetType(0x8013A998, "void MAI_Ranged__FiiUc(int i, int missile_type, unsigned char special)")
del_items(0x8013ABB8)
SetType(0x8013ABB8, "void MAI_GoatBow__Fi(int i)")
del_items(0x8013ABDC)
SetType(0x8013ABDC, "void MAI_Succ__Fi(int i)")
del_items(0x8013AC00)
SetType(0x8013AC00, "void MAI_AcidUniq__Fi(int i)")
del_items(0x8013AC24)
SetType(0x8013AC24, "void MAI_Scav__Fi(int i)")
del_items(0x8013B03C)
SetType(0x8013B03C, "void MAI_Garg__Fi(int i)")
del_items(0x8013B21C)
SetType(0x8013B21C, "void MAI_RoundRanged__FiiUciUc(int i, int missile_type, unsigned char checkdoors, int dam, int lessmissiles)")
del_items(0x8013B730)
SetType(0x8013B730, "void MAI_Magma__Fi(int i)")
del_items(0x8013B75C)
SetType(0x8013B75C, "void MAI_Storm__Fi(int i)")
del_items(0x8013B788)
SetType(0x8013B788, "void MAI_Acid__Fi(int i)")
del_items(0x8013B7B8)
SetType(0x8013B7B8, "void MAI_Diablo__Fi(int i)")
del_items(0x8013B7E4)
SetType(0x8013B7E4, "void MAI_RR2__Fiii(int i, int mistype, int dam)")
del_items(0x8013BCE4)
SetType(0x8013BCE4, "void MAI_Mega__Fi(int i)")
del_items(0x8013BD08)
SetType(0x8013BD08, "void MAI_SkelKing__Fi(int i)")
del_items(0x8013C244)
SetType(0x8013C244, "void MAI_Rhino__Fi(int i)")
del_items(0x8013C6EC)
SetType(0x8013C6EC, "void MAI_Counselor__Fi(int i, unsigned char counsmiss[4], int _mx, int _my)")
del_items(0x8013CBB8)
SetType(0x8013CBB8, "void MAI_Garbud__Fi(int i)")
del_items(0x8013CD68)
SetType(0x8013CD68, "void MAI_Zhar__Fi(int i)")
del_items(0x8013CF60)
SetType(0x8013CF60, "void MAI_SnotSpil__Fi(int i)")
del_items(0x8013D194)
SetType(0x8013D194, "void MAI_Lazurus__Fi(int i)")
del_items(0x8013D40C)
SetType(0x8013D40C, "void MAI_Lazhelp__Fi(int i)")
del_items(0x8013D52C)
SetType(0x8013D52C, "void MAI_Lachdanan__Fi(int i)")
del_items(0x8013D6BC)
SetType(0x8013D6BC, "void MAI_Warlord__Fi(int i)")
del_items(0x8013D808)
SetType(0x8013D808, "void DeleteMonsterList__Fv()")
del_items(0x8013D924)
SetType(0x8013D924, "void ProcessMonsters__Fv()")
del_items(0x8013DF00)
SetType(0x8013DF00, "unsigned char DirOK__Fii(int i, int mdir)")
del_items(0x8013E2E8)
SetType(0x8013E2E8, "unsigned char PosOkMissile__Fii(int x, int y)")
del_items(0x8013E350)
SetType(0x8013E350, "unsigned char CheckNoSolid__Fii(int x, int y)")
del_items(0x8013E394)
SetType(0x8013E394, "unsigned char LineClearF__FPFii_Uciiii(unsigned char (*Clear)(), int x1, int y1, int x2, int y2)")
del_items(0x8013E61C)
SetType(0x8013E61C, "unsigned char LineClear__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x8013E65C)
SetType(0x8013E65C, "unsigned char LineClearF1__FPFiii_Uciiiii(unsigned char (*Clear)(), int monst, int x1, int y1, int x2, int y2)")
del_items(0x8013E8F0)
SetType(0x8013E8F0, "void M_FallenFear__Fii(int x, int y)")
del_items(0x8013EAC0)
SetType(0x8013EAC0, "void PrintMonstHistory__Fi(int mt)")
del_items(0x8013ECE8)
SetType(0x8013ECE8, "void PrintUniqueHistory__Fv()")
del_items(0x8013EE0C)
SetType(0x8013EE0C, "void MissToMonst__Fiii(int i, int x, int y)")
del_items(0x8013F288)
SetType(0x8013F288, "unsigned char PosOkMonst2__Fiii(int i, int x, int y)")
del_items(0x8013F4A4)
SetType(0x8013F4A4, "unsigned char PosOkMonst3__Fiii(int i, int x, int y)")
del_items(0x8013F798)
SetType(0x8013F798, "int M_SpawnSkel__Fiii(int x, int y, int dir)")
del_items(0x8013F8F0)
SetType(0x8013F8F0, "void TalktoMonster__Fi(int i)")
del_items(0x8013FA10)
SetType(0x8013FA10, "void SpawnGolum__Fiiii(int i, int x, int y, int mi)")
del_items(0x8013FC68)
SetType(0x8013FC68, "unsigned char CanTalkToMonst__Fi(int m)")
del_items(0x8013FCA0)
SetType(0x8013FCA0, "unsigned char CheckMonsterHit__FiRUc(int m, unsigned char *ret)")
del_items(0x8013FD6C)
SetType(0x8013FD6C, "void MAI_Golum__Fi(int i)")
del_items(0x801400E0)
SetType(0x801400E0, "unsigned char MAI_Path__Fi(int i)")
del_items(0x80140244)
SetType(0x80140244, "void M_StartAttack__Fi(int i)")
del_items(0x8014032C)
SetType(0x8014032C, "void M_StartWalk__Fiiiiii(int i, int xvel, int yvel, int xadd, int yadd, int EndDir)")
del_items(0x8014048C)
SetType(0x8014048C, "void AddWarpMissile__Fiii(int i, int x, int y)")
del_items(0x80140594)
SetType(0x80140594, "void SyncPortals__Fv()")
del_items(0x8014069C)
SetType(0x8014069C, "void AddInTownPortal__Fi(int i)")
del_items(0x801406D8)
SetType(0x801406D8, "void ActivatePortal__FiiiiiUc(int i, int x, int y, int lvl, int lvltype, int sp)")
del_items(0x80140748)
SetType(0x80140748, "void DeactivatePortal__Fi(int i)")
del_items(0x80140768)
SetType(0x80140768, "unsigned char PortalOnLevel__Fi(int i)")
del_items(0x801407A0)
SetType(0x801407A0, "void RemovePortalMissile__Fi(int id)")
del_items(0x8014093C)
SetType(0x8014093C, "void SetCurrentPortal__Fi(int p)")
del_items(0x80140948)
SetType(0x80140948, "void GetPortalLevel__Fv()")
del_items(0x80140B14)
SetType(0x80140B14, "void GetPortalLvlPos__Fv()")
del_items(0x80140BC8)
SetType(0x80140BC8, "void FreeInvGFX__Fv()")
del_items(0x80140BD0)
SetType(0x80140BD0, "void InvDrawSlot__Fiii(int X, int Y, int Frame)")
del_items(0x80140C54)
SetType(0x80140C54, "void InvDrawSlotBack__FiiiiUc(int X, int Y, int W, int H, int Flag)")
del_items(0x80140EA8)
SetType(0x80140EA8, "void InvDrawItem__FiiiUci(int ItemX, int ItemY, int ItemNo, unsigned char StatFlag, int TransFlag)")
del_items(0x80140F78)
SetType(0x80140F78, "void InvDrawSlots__Fv()")
del_items(0x8014128C)
SetType(0x8014128C, "void PrintStat__FiiPcUc(int Y, int Txt0, char *Txt1, unsigned char Col)")
del_items(0x80141358)
SetType(0x80141358, "void DrawInvStats__Fv()")
del_items(0x80141EE4)
SetType(0x80141EE4, "void DrawInvBack__Fv()")
del_items(0x80141F6C)
SetType(0x80141F6C, "void DrawInvCursor__Fv()")
del_items(0x80142448)
SetType(0x80142448, "void DrawInvMsg__Fv()")
del_items(0x80142610)
SetType(0x80142610, "void DrawInv__Fv()")
del_items(0x80142640)
SetType(0x80142640, "void DrawInvTSK__FP4TASK(struct TASK *T)")
del_items(0x80142920)
SetType(0x80142920, "void DoThatDrawInv__Fv()")
del_items(0x80143174)
SetType(0x80143174, "unsigned char AutoPlace__FiiiiUc(int pnum, int ii, int sx, int sy, int saveflag)")
del_items(0x80143490)
SetType(0x80143490, "unsigned char SpecialAutoPlace__FiiiiUc(int pnum, int ii, int sx, int sy, int saveflag)")
del_items(0x80143828)
SetType(0x80143828, "unsigned char GoldAutoPlace__Fi(int pnum)")
del_items(0x80143CF4)
SetType(0x80143CF4, "unsigned char WeaponAutoPlace__Fi(int pnum)")
del_items(0x80143F7C)
SetType(0x80143F7C, "int SwapItem__FP10ItemStructT0(struct ItemStruct *a, struct ItemStruct *b)")
del_items(0x8014406C)
SetType(0x8014406C, "void CheckInvPaste__Fiii(int pnum, int mx, int my)")
del_items(0x80145CF8)
SetType(0x80145CF8, "void CheckInvCut__Fiii(int pnum, int mx, int my)")
del_items(0x80146784)
SetType(0x80146784, "void RemoveInvItem__Fii(int pnum, int iv)")
del_items(0x80146A28)
SetType(0x80146A28, "void RemoveSpdBarItem__Fii(int pnum, int iv)")
del_items(0x80146B28)
SetType(0x80146B28, "void CheckInvScrn__Fv()")
del_items(0x80146BA0)
SetType(0x80146BA0, "void CheckItemStats__Fi(int pnum)")
del_items(0x80146C24)
SetType(0x80146C24, "void CheckBookLevel__Fi(int pnum)")
del_items(0x80146D58)
SetType(0x80146D58, "void CheckQuestItem__Fi(int pnum)")
del_items(0x80147180)
SetType(0x80147180, "void InvGetItem__Fii(int pnum, int ii)")
del_items(0x80147478)
SetType(0x80147478, "void AutoGetItem__Fii(int pnum, int ii)")
del_items(0x80147EDC)
SetType(0x80147EDC, "int FindGetItem__FiUsi(int idx, unsigned short ci, int iseed)")
del_items(0x80147F90)
SetType(0x80147F90, "void SyncGetItem__FiiiUsi(int x, int y, int idx, unsigned short ci, int iseed)")
del_items(0x8014811C)
SetType(0x8014811C, "unsigned char TryInvPut__Fv()")
del_items(0x801482E4)
SetType(0x801482E4, "int InvPutItem__Fiii(int pnum, int x, int y)")
del_items(0x80148788)
SetType(0x80148788, "int SyncPutItem__FiiiiUsiUciiiiiUl(int pnum, int x, int y, int idx, int icreateinfo, int iseed, int Id, int dur, int mdur, int ch, int mch, int ivalue, unsigned long ibuff)")
del_items(0x80148CE4)
SetType(0x80148CE4, "char CheckInvHLight__Fv()")
del_items(0x80148FF8)
SetType(0x80148FF8, "void RemoveScroll__Fi(int pnum)")
del_items(0x801491DC)
SetType(0x801491DC, "unsigned char UseScroll__Fv()")
del_items(0x80149444)
SetType(0x80149444, "void UseStaffCharge__FP12PlayerStruct(struct PlayerStruct *ptrplr)")
del_items(0x801494AC)
SetType(0x801494AC, "unsigned char UseStaff__Fv()")
del_items(0x8014956C)
SetType(0x8014956C, "void StartGoldDrop__Fv()")
del_items(0x80149670)
SetType(0x80149670, "unsigned char UseInvItem__Fii(int pnum, int cii)")
del_items(0x80149B98)
SetType(0x80149B98, "void DoTelekinesis__Fv()")
del_items(0x80149CC0)
SetType(0x80149CC0, "long CalculateGold__Fi(int pnum)")
del_items(0x80149DF8)
SetType(0x80149DF8, "unsigned char DropItemBeforeTrig__Fv()")
del_items(0x80149E50)
SetType(0x80149E50, "void ControlInv__Fv()")
del_items(0x8014A1D8)
SetType(0x8014A1D8, "void InvGetItemWH__Fi(int Pos)")
del_items(0x8014A2D0)
SetType(0x8014A2D0, "void InvAlignObject__Fv()")
del_items(0x8014A484)
SetType(0x8014A484, "void InvSetItemCurs__Fv()")
del_items(0x8014A618)
SetType(0x8014A618, "void InvMoveCursLeft__Fv()")
del_items(0x8014A7F4)
SetType(0x8014A7F4, "void InvMoveCursRight__Fv()")
del_items(0x8014AB0C)
SetType(0x8014AB0C, "void InvMoveCursUp__Fv()")
del_items(0x8014ACF4)
SetType(0x8014ACF4, "void InvMoveCursDown__Fv()")
del_items(0x8014B00C)
SetType(0x8014B00C, "void DumpMonsters__7CBlocks(struct CBlocks *this)")
del_items(0x8014B034)
SetType(0x8014B034, "void Flush__4CPad(struct CPad *this)")
del_items(0x8014B058)
SetType(0x8014B058, "void SetRGB__6DialogUcUcUc(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x8014B078)
SetType(0x8014B078, "void SetBack__6Dialogi(struct Dialog *this, int Type)")
del_items(0x8014B080)
SetType(0x8014B080, "void SetBorder__6Dialogi(struct Dialog *this, int Type)")
del_items(0x8014B088)
SetType(0x8014B088, "int SetOTpos__6Dialogi(struct Dialog *this, int OT)")
del_items(0x8014B094)
SetType(0x8014B094, "void ___6Dialog(struct Dialog *this, int __in_chrg)")
del_items(0x8014B0BC)
SetType(0x8014B0BC, "struct Dialog *__6Dialog(struct Dialog *this)")
del_items(0x8014B118)
SetType(0x8014B118, "void StartAutomap__Fv()")
del_items(0x8014B130)
SetType(0x8014B130, "void AutomapUp__Fv()")
del_items(0x8014B148)
SetType(0x8014B148, "void AutomapDown__Fv()")
del_items(0x8014B160)
SetType(0x8014B160, "void AutomapLeft__Fv()")
del_items(0x8014B178)
SetType(0x8014B178, "void AutomapRight__Fv()")
del_items(0x8014B190)
SetType(0x8014B190, "struct LINE_F2 *AMGetLine__FUcUcUc(unsigned char R, unsigned char G, unsigned char B)")
del_items(0x8014B23C)
SetType(0x8014B23C, "void AmDrawLine__Fiiii(int x0, int y0, int x1, int y1)")
del_items(0x8014B2A4)
SetType(0x8014B2A4, "void DrawAutomapPlr__Fv()")
del_items(0x8014B61C)
SetType(0x8014B61C, "void DrawAutoMapVertWall__Fiii(int X, int Y, int Length)")
del_items(0x8014B6C4)
SetType(0x8014B6C4, "void DrawAutoMapHorzWall__Fiii(int X, int Y, int Length)")
del_items(0x8014B76C)
SetType(0x8014B76C, "void DrawAutoMapVertDoor__Fii(int X, int Y)")
del_items(0x8014B8E4)
SetType(0x8014B8E4, "void DrawAutoMapHorzDoor__Fii(int X, int Y)")
del_items(0x8014BA64)
SetType(0x8014BA64, "void DrawAutoMapVertGrate__Fii(int X, int Y)")
del_items(0x8014BAF8)
SetType(0x8014BAF8, "void DrawAutoMapHorzGrate__Fii(int X, int Y)")
del_items(0x8014BB8C)
SetType(0x8014BB8C, "void DrawAutoMapSquare__Fii(int X, int Y)")
del_items(0x8014BCA4)
SetType(0x8014BCA4, "void DrawAutoMapStairs__Fii(int X, int Y)")
del_items(0x8014BE4C)
SetType(0x8014BE4C, "void DrawAutomap__Fv()")
del_items(0x8014C1A8)
SetType(0x8014C1A8, "void PRIM_GetPrim__FPP7LINE_F2(struct LINE_F2 **Prim)")
| 1.3125 | 1 |
py/week1.py | k-circle/algo | 1 | 12762145 | def simple(arry, target):
# simple search
arry = [i for i in range(20)]
for i in arry:
print("%d steps" % i)
if i == target:
print("Found %d" % i)
break
def binary_search(arry, target):
# binary search
l = 0 # left pointer
r = len(arry) - 1 # right pointer
step = 0
while l <= r:
step += 1
mid = l + (r - l) // 2
print("%d steps" % step)
if target == arry[mid]:
print("Found %d" % arry[mid])
break
elif arry[mid] < target:
l = mid + 1
else:
r = mid - 1
if __name__ == '__main__':
arry = [i for i in range(20)]
target = 17
simple(arry, target)
binary_search(arry, target)
| 3.828125 | 4 |
d06p2.py | cahorn/aoc21 | 0 | 12762146 | <filename>d06p2.py
from d06p1 import *
if __name__ == "__main__":
print(population(256, fish(map(int, stdin.read().split(",")))))
| 2.28125 | 2 |
lm_eval/tasks/logiqa.py | techthiyanes/lm-evaluation-harness | 0 | 12762147 | <filename>lm_eval/tasks/logiqa.py
"""
LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning
https://arxiv.org/pdf/2007.08124.pdf
LogiQA is a dataset for testing human logical reasoning. It consists of 8,678 QA
instances, covering multiple types of deductive reasoning. Results show that state-
of-the-art neural models perform by far worse than human ceiling. The dataset can
also serve as a benchmark for reinvestigating logical AI under the deep learning
NLP setting.
Homepage: https://github.com/lgw863/LogiQA-dataset
"""
import inspect
import lm_eval.datasets.logiqa.logiqa
from lm_eval.base import MultipleChoiceTask
_CITATION = """
@misc{liu2020logiqa,
title={LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
year={2020},
eprint={2007.08124},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
class LogiQA(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = inspect.getfile(lm_eval.datasets.logiqa.logiqa)
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
def format_example(doc, choices):
"""
Passage: <passage>
Question: <question>
Choices:
A. <choice1>
B. <choice2>
C. <choice3>
D. <choice4>
Answer:
"""
prompt = "Passage: " + doc["context"] + "\n"
prompt += "Question: " + doc["question"] + "\nChoices:\n"
for choice, option in zip(choices, doc["options"]):
prompt += f"{choice.upper()}. {option}\n"
prompt += "Answer:"
return prompt
choices = ['a', 'b', 'c', 'd']
return {
"query": format_example(doc, choices),
"choices": doc["options"],
"gold": choices.index(doc["label"])
}
def doc_to_text(self, doc):
return doc["query"]
| 2.359375 | 2 |
Discrepancy Match ToolSQL.py | pjconnolly12/Discrepancy-Match | 1 | 12762148 | import csv
import sqlite3
from tkinter import *
from tkinter import filedialog
"""Tool to compare two reports and provide specific information from matching lines"""
class MatchTool:
UNPLACED_RSL_TEXT = [
"Copy Required Report",
"Ad Copy Status Report",
"Unplaced Spots",
"Required Spots",
]
def __init__(self, master):
self.master = master
master.geometry("400x300")
master.title("Discrepancy Match Tool")
self.top_frame = Frame(master)
self.bottom_frame = Frame(master, width=400)
self.novar_button_var = IntVar()
self.novar_button_var.set(0)
self.novar_button = Checkbutton(self.top_frame, variable=self.novar_button_var, command=self.enableNovar)
self.eclipse_button_var = IntVar()
self.eclipse_button_var.set(0)
self.eclipse_button = Checkbutton(self.top_frame, variable=self.eclipse_button_var, command=self.enableEclipse)
self.missing_button_var = IntVar()
self.missing_button_var.set(0)
self.missing_button = Checkbutton(self.top_frame, state=DISABLED, variable=self.missing_button_var, command=self.missingCopy)
self.unplaced_button_var = IntVar()
self.unplaced_button_var.set(0)
self.unplaced_button = Checkbutton(self.top_frame, state=DISABLED, variable=self.unplaced_button_var, command=self.unplacedRSL)
self.novar_label = Label(self.top_frame, text="Novar")
self.eclipse_label = Label(self.top_frame, text="Eclipse/XG")
self.missing_label = Label(self.top_frame, text="Missing Copy")
self.unplaced_label_text = StringVar()
self.unplaced_label_text.set("Unplaced or Required Spots")
self.unplaced_label = Label(self.top_frame, textvariable=self.unplaced_label_text, width=22, anchor=constants.W)
self.load_discrep = Button(self.bottom_frame, text="Load Discrepancy Report", width=25, command=self.loadDiscrep)
self.load_discrep_file_name_text = StringVar()
self.load_discrep_file_name = Label(self.bottom_frame, textvariable=self.load_discrep_file_name_text)
self.submit = Button(self.bottom_frame, text="Submit")
self.load_unplaced_text = StringVar()
self.load_unplaced_text.set("Load Report")
self.load_unplaced = Button(self.bottom_frame, textvariable=self.load_unplaced_text, width=25, command=self.loadReports)
self.load_unplaced_file_name_text = StringVar()
self.load_unplaced_file_name = Label(self.bottom_frame, textvariable=self.load_unplaced_file_name_text)
#Layout
self.top_frame.grid()
self.bottom_frame.grid(row=1)
self.novar_button.grid()
self.eclipse_button.grid(row=1)
self.missing_button.grid(row=2)
self.unplaced_button.grid(row=3)
self.novar_label.grid(row=0, column=1, sticky=W)
self.eclipse_label.grid(row=1, column=1, sticky=W)
self.missing_label.grid(row=2, column=1, sticky=W)
self.unplaced_label.grid(row=3, column=1, sticky=W)
self.load_discrep.grid(row=0, pady=3, ipadx=5)
self.load_discrep_file_name.grid(row=1, pady=3, ipadx=5)
self.load_unplaced.grid(row=2, pady=3, ipadx=5)
self.load_unplaced_file_name.grid(row=3, pady=3, ipadx=5)
#Functions
def enableNovar(self):
"""Activates the Missing Copy and Unplaced Spots checkboxes, and disables the Novar checkbox"""
if self.novar_button_var.get() == 1:
self.eclipse_button["state"] = DISABLED
self.missing_button["state"] = ACTIVE
self.unplaced_button["state"] = ACTIVE
self.unplaced_label_text.set(self.UNPLACED_RSL_TEXT[3])
else:
self.eclipse_button["state"] = ACTIVE
self.missing_button["state"] = DISABLED
self.unplaced_button["state"] = DISABLED
self.unplaced_label_text.set("Unplaced or Required Spots")
def enableEclipse(self):
"""Activates the Missing Copy and Required Spots checkboxes, and disables the Eclipse checkbox"""
if self.eclipse_button_var.get() == 1:
self.novar_button["state"] = DISABLED
self.missing_button["state"] = ACTIVE
self.unplaced_button["state"] = ACTIVE
self.unplaced_label_text.set(self.UNPLACED_RSL_TEXT[2])
else:
self.novar_button["state"] = ACTIVE
self.missing_button["state"] = DISABLED
self.unplaced_button["state"] = DISABLED
self.unplaced_label_text.set("Unplaced or Required Spots")
def missingCopy(self):
"""Changes the value of missing_button_var to 1, changes text of unplaced_text, shows Submit button"""
if self.missing_button_var.get() == 1:
self.unplaced_button["state"] = DISABLED
self.submit.grid(row=4, pady=5)
if self.novar_button_var.get() == 1:
self.load_unplaced_text.set("Load " + self.UNPLACED_RSL_TEXT[1])
elif self.eclipse_button_var.get() == 1:
self.load_unplaced_text.set("Load " + self.UNPLACED_RSL_TEXT[0])
else:
self.unplaced_button["state"] = ACTIVE
self.load_unplaced_text.set("Load Report")
self.submit.grid_forget()
def unplacedRSL(self):
"""changes the value of unplaced_button_var to 1, changes text of unplaced_text, shows Submit button"""
if self.unplaced_button_var.get() == 1:
self.missing_button["state"] = DISABLED
self.submit.grid(row=4, pady=5)
if self.novar_button_var.get() == 1:
self.load_unplaced_text.set("Load " + self.UNPLACED_RSL_TEXT[3])
elif self.eclipse_button_var.get() == 1:
self.load_unplaced_text.set("Load " + self.UNPLACED_RSL_TEXT[2])
else:
self.missing_button["state"] = ACTIVE
self.load_unplaced_text.set("Load Report")
self.submit.grid_forget()
def unplacedEdit(self, loaded_file):
"""Opens the CSV file and edits the date and time for the Unplaced Spots report"""
with open(loaded_file) as csv_file:
unplaced_reader = csv.reader(csv_file, delimiter=',')
unplaced_list = [row for row in unplaced_reader]
unplaced_list.pop(0)
unplaced_list[0].extend(['Date', 'Time'])
for i in range(1, len(unplaced_list)):
date_time = unplaced_list[i][1].split(' ')
unplaced_list[i].append(date_time[0])
time_of_day = int(date_time[1][:date_time[1].index(":")])
if time_of_day < 13:
date_time[1] = date_time[1] + " AM"
else:
time_of_day = time_of_day - 12
date_time[1] = str(time_of_day) + date_time[1][date_time[1].index(":"):] + " PM"
unplaced_list[i].append(date_time[1])
return unplaced_list
def rslEdit(self, loaded_file):
"""Edits the RSL report's Date and Time"""
with open(loaded_file) as csv_file:
rsl_reader = csv.reader(csv_file, delimiter=',')
rsl_list = [row for row in rsl_reader]
rsl_list[0].extend(['Date', 'Time'])
for i in range(1, len(rsl_list)):
date = rsl_list[i][16]
date = date[:date.index("-")]
new_date = date.split('/') #add 20 to the beginning of the year
new_date[2] = "20" + new_date[2]
date = new_date[0] + '/' + new_date[1] + '/' + new_date[2]
time = rsl_list[i][17]
time = time[:time.index("-")]
rsl_list[i].append(date)
rsl_list[i].append(time)
for x in range(1, len(rsl_list)):
digits = rsl_list[x][31]
digits = int(digits[:digits.index(":")])
if digits < 10:
rsl_list[x][31] = rsl_list[x][31][1:] + " AM"
elif digits < 13:
rsl_list[x][31] = rsl_list[x][31] + " AM"
else:
digits = digits - 12
rsl_list[x][31] = str(digits) + rsl_list[x][31][rsl_list[x][31].index(":"):] + " PM"
return rsl_list
def copyRequiredEdit(self, loaded_file):
"""Removes the first row from the Copy Required Report"""
with open(loaded_file) as csv_file:
cr_reader = csv.reader(csv_file, delimiter=',')
cr_list = [row for row in cr_reader]
cr_list.pop(0)
return cr_list
# def discrepEdit(self, loaded_file):
# """Splits up the contract ID's into a list"""
# with open(loaded_file) as csv_file:
# discrep_reader = csv.reader(csv_file, delimiter=',')
# discrep_list = [row for row in discrep_reader]
# for i in range(1, len(discrep_list)):
# discrep_list[i][11] = discrep_list[i][11].split(';')
# return discrep_list
def discrepancyDB(self, discrepancy):
"""creates SQL database from the discrepancy report"""
with sqlite3.connect("DiscrepMatch.db") as connection:
c = connection.cursor()
discrep = csv.reader(open(discrepancy, "rU"))
c.execute("DROP TABLE if exists discrepancy1")
c.execute("""CREATE TABLE discrepancy1(Discrepancy TEXT, Reservation TEXT, Event TEXT, Episode TEXT, DateOf TEXT, Start TEXT,
Market TEXT, Zone TEXT, Network TEXT, ClientID INT, ClientName TEXT, ContractID TEXT, Rate TEXT, AE TEXT, Modified TEXT,
ModifiedBy TEXT)""")
c.executemany("""INSERT INTO discrepancy1(Discrepancy, Reservation, Event, Episode, DateOf, Start, Market, Zone, Network,
ClientID, ClientName, ContractID, Rate, AE, Modified, ModifiedBy) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", discrep)
#AdCopyStatus (No Edits needed)
def adCopyDB(self, ad_copy):
"""creates SQL database from the ad copy status report"""
with sqlite3.connect("DiscrepMatch.db") as connection:
c = connection.cursor()
adCopyStatus = csv.reader(open(ad_copy, "rU"))
c.execute("DROP TABLE if exists AdCopyStatus")
c.execute("""CREATE TABLE AdCopyStatus(ClientID INT, ClientName TEXT, AdCopyID INT, CutName TEXT, CutStart TEXT, CutStop TEXT, Reason TEXT)""")
c.executemany("""INSERT INTO AdCopyStatus(ClientID, ClientName, AdCopyID, CutName, CutStart, CutStop, Reason) values (?, ?, ?, ?, ?, ?, ?)""", adCopyStatus)
#Copy Required (Edit Required)
def copyRequiredDB(self, copy_required):
"""creates SQL database from the copy required report"""
with sqlite3.connect("DiscrepMatch.db") as connection:
c = connection.cursor()
copyRequired = copy_required
c.execute("DROP TABLE if exists copyrequired")
c.execute("""CREATE TABLE copyrequired(ClientID TEXT, ClientName TEXT, Rotation INT, RotDesc INT, SalesID INT, AE TEXT, SalOffID TEXT,
SalOff TEXT, OrderNum TEXT, Networks TEXT, Regions TEXT, TotalRev TEXT, AvgPrty INT, DateNeed TEXT, Issue TEXT)""")
c.executemany("""INSERT INTO copyrequired(ClientID, ClientName, Rotation, RotDesc, SalesID, AE, SalOffID, SalOff, OrderNum, Networks,
Regions, TotalRev, AvgPrty, DateNeed, Issue) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", copyRequired)
#RSL (Edit Required)
def rslDB(self, rsl_report):
"""creates SQL database from the RSL report"""
with sqlite3.connect("DiscrepMatch.db") as connection:
c = connection.cursor()
rsl = rsl_report
c.execute("DROP TABLE if exists RSL")
c.execute("""CREATE TABLE RSL(AE TEXT, Priority INT, ClientID INT, Client TEXT, ConID INT, LineNum INT, Zone TEXT, Network TEXT, DaysAuth TEXT,
Mon INT, Tue INT, Wed INT, Thu INT, Fri INT, Sat INT, Sun INT, OldDates TEXT, Daypart TEXT, CGName TEXT, Total INT, Normal INT,
Sched INT, Aired INT, ToDO INT, FinalWeek TEXT, Length INT, Program TEXT, Cost INT, LostRev INT, RD INT, NewDate TEXT, NewTime TEXT)""")
c.executemany("""INSERT INTO RSL(AE, Priority, ClientID, Client, ConID, LineNum, Zone, Network, DaysAuth, Mon, Tue, Wed, Thu, Fri, Sat, Sun,
OldDates, Daypart, CGName, Total, Normal, Sched, Aired, ToDO, FinalWeek, Length, Program, Cost, LostRev, RD, NewDate, NewTime)
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", rsl)
#Unplaced (Edit Required)
def unplacedDB(self, unplaced_report):
"""Creates SQL database from the unplaced spot report"""
with sqlite3.connect("DiscrepMatch.db") as connection:
c = connection.cursor()
unplaced = unplaced_report
c.execute("DROP TABLE if exists unplacedSpots")
c.execute("""CREATE TABLE unplacedSpots(OrderNum INT, OldDate TEXT, SpotName TEXT, Length INT, Description TEXT, Network TEXT, ClientID INT,
Client TEXT, Phone TEXT, Initials TEXT, Rotation INT, Active TEXT, UCType TEXT, Retail INT, InvType TEXT, Billing TEXT, Market TEXT,
Zone TEXT, Priority INT, Buy1 INT, BuyType TEXT, SpotsWeek INT, SpotsLine INT, MonAct TEXT, MonQua INT, TueAct TEXT, TueQua INT,
WedAct TEXT, WedQua INT, ThuAct TEXT, ThuQua INT, FriAct TEXT, FriQua INT, SatAct TEXT, SatQua INT, SunAct TEXT, SunQua INT, Buy2 INT,
Exception TEXT, Daypart TEXT, Entity TEXT, LineType TEXT, LineNum INT, OfficeID TEXT, Description2 TEXT, Name TEXT, OfficeName TEXT,
Exception2 TEXT, Uniform TEXT, LineNum2 INT, "Group" INT, EndDate TEXT, Orbits TEXT, NewDate TEXT, NewTime TEXT)""")
c.executemany("""INSERT INTO unplacedSpots(OrderNum, OldDate, SpotName, Length, Description, Network, ClientID, Client, Phone, Initials, Rotation,
Active, UCType, Retail, InvType, Billing, Market, Zone, Priority, Buy1, BuyType, SpotsWeek, Spotsline, MonAct, MonQua, TueAct, TueQua,
WedAct, WedQua, ThuAct, ThuQua, FriAct, FriQua, SatAct, SatQua, SunAct, SunQua, Buy2, Exception, Daypart, Entity, LineType, LineNum, OfficeID,
Description2, Name, OfficeName, Exception2, Uniform, LineNum2, "Group", EndDate, Orbits, NewDate, NewTime) values (?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?, ?)""", unplaced)
def loadDiscrep(self):
"""Opens file directory for user to load report in xls format"""
discrepReport = filedialog.askopenfilename(
filetypes=[("CSV File", "*.csv"), ("All Files", "*.*")]
)
if not discrepReport:
return
else:
self.load_discrep_file_name_text.set("Discrepancy Report loaded successfully")
#discrepReport = self.discrepEdit(discrepReport)
self.discrepancyDB(discrepReport)
def loadReports(self):
"""Opens file directory for user to load file, file type depends on prior selections"""
#Copy Required (Eclipse/Missing Copy)
if self.eclipse_button_var.get() == 1 and self.missing_button_var.get() == 1:
copyRequired = filedialog.askopenfilename(
filetypes=[("CSV File", "*.csv"), ("All Files", "*.*")]
)
if not copyRequired:
return
else:
self.load_unplaced_file_name_text.set("Copy Required loaded successfully")
copyRequired = self.copyRequiredEdit(copyRequired)
self.copyRequiredDB(copyRequired)
#AdCopyStatus (Novar/Missing Copy)
elif self.novar_button_var.get() == 1 and self.missing_button_var.get() == 1:
adCopyStatus = filedialog.askopenfilename(
filetypes=[("CSV File", "*.csv"), ("All Files", "*.*")]
)
if not adCopyStatus:
return
else:
self.load_unplaced_file_name_text.set("AdCopyStatus Report loaded successfully")
self.adCopyDB(adCopyStatus)
#Unplaced Spots (Eclipse/Unplaced)
elif self.eclipse_button_var.get() == 1 and self.unplaced_button_var.get() == 1:
unplacedSpots = filedialog.askopenfilename(
filetypes=[("CSV File", "*.csv"), ("All Files", "*.*")]
)
if not unplacedSpots:
return
else:
self.load_unplaced_file_name_text.set("Unplaced Spots Report loaded successfully")
unplacedSpots = self.unplacedEdit(unplacedSpots)
self.unplacedDB(unplacedSpots)
#RSL (Novar/Unplaced)
elif self.novar_button_var.get() == 1 and self.unplaced_button_var.get() == 1:
requiredSpots = filedialog.askopenfilename(
filetypes=[("CSV File", "*.csv"), ("All Files", "*.*")]
)
if not requiredSpots:
return
else:
self.load_unplaced_file_name_text.set("Required Spots loaded successfully")
requiredSpots = self.rslEdit(requiredSpots)
self.rslDB(requiredSpots)
# Add functionality for the Submit button: finds the matches between the two db's opened up and returns them as CSV
# Should I use :memory: or actual db's?
# Will :memory: work once the function is over? Won't it close the db being used?
# How can I write back to a CSV?
# Format the tool better
#Remove checks if button gets disabled
root = Tk()
interface = MatchTool(root)
root.mainloop()
| 2.8125 | 3 |
main.py | matolszew/identification_p1 | 0 | 12762149 | import argparse
import numpy as np
from scipy.io import wavfile
from tqdm import trange
from ar_model import ARmodel
def correctSignal(signal, model, window_size, pred_size, step, treshold=3):
"""Correct signal using AR model
Args:
signal (np.array): signal to correct
model (ARmodel): autoregresive model
window_size (int): length of the window for updating AR model coefs
pred_size (int): number of samples to generate from AR model
step (int): step interval
treshold (float): how many times error have to be bigger then standard
deviation to classify sample as disturbed
Returns:
np.array: cerrected signal
"""
out = np.copy(signal)
for i in trange(0, input.shape[0]-window_size-pred_size, step):
paramsEnd = i+window_size
predEnd = paramsEnd+pred_size
model.updateParams(out[i:paramsEnd])
estimated = model.estimateSignal(pred_size, out[paramsEnd-model.r:paramsEnd])
err = np.abs(out[paramsEnd:predEnd] - estimated)
std = np.std(err)
disturbed = np.abs(err) > std*treshold
disturbanceLength = 0
for j in range(pred_size):
if disturbed[j]:
disturbanceLength += 1
elif disturbanceLength > 0:
k = j + paramsEnd
before = signal[k-disturbanceLength-1]
after = signal[k]
out[k-disturbanceLength:k] = np.linspace(before,after,disturbanceLength+2)[1:-1]
disturbanceLength = 0
return out
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Removing impulse interference from music recordings")
parser.add_argument('filename', metavar='filename', type=str, help='path to wave file')
parser.add_argument('-r', '--order', type=int, default=4, help='order of AR model')
parser.add_argument('-o', '--out_file', type=str, default='out.wav', help='name of the output file')
parser.add_argument('-u', '--param_window', type=int, default=256, help='length of the window for updating AR model coefs')
parser.add_argument('-e', '--pred_widnow', type=int, default=8, help='number of samples to generate from AR model')
parser.add_argument('-s', '--step', type=int, default=4, help='step interval')
parser.add_argument('-d', '--decay', type=float, default=1.0, help='decay rate for exponential window')
parser.add_argument('-m', '--max_std', type=float, default=3.0, help='how many times error have to be bigger then standard deviation to classify sample as disturbed')
args = parser.parse_args()
fs, input = wavfile.read(args.filename)
input = input / 2**15
model = ARmodel(args.order, args.decay)
output = correctSignal(input, model, args.param_window, args.pred_widnow, args.step, args.max_std)
wavfile.write(args.out_file, fs, output)
| 2.8125 | 3 |
examples/views.py | infosmith/scripted | 0 | 12762150 | <reponame>infosmith/scripted
"""Github automation."""
import scripted
from .helpers import GithubPublicAPI
script = scripted.Script()
class TerminalView(script.View):
def options(self, resources):
"""Print repository releases to stdout."""
for index, resource in enumerate(resources):
option = " {}".format(str(index + 1).ljust(2))
self.fn.print(option, resource['name'])
@script.add_controller
class Github(script.Controller):
"""Github convenience."""
git = GithubPublicAPI()
view = TerminalView()
@script.argument('-r', '--release', help='release to be download')
@script.argument('repo', dest='repo', help='user/repo formatted repository')
def releases(self):
"""Releases of provided repository."""
if self.args.release:
release = self.args.release
else:
latest_releases = self.git.releases(self.args.repo)
self.view.options(latest_releases)
release = self.view.fn.prompt(' Select release > ')
self.git.download(self.args.repo, release)
if __name__ == '__main__':
script.execute()
| 2.828125 | 3 |