content stringlengths 5 1.05M |
|---|
#
# Copyright 2020 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os.path
import sys
import logging
import json
from datetime import datetime, timedelta
import java.text.SimpleDateFormat as SDF
import com.xebialabs.xlrelease.api.v1.forms.CreateRelease as CreateRelease
from com.xebialabs.xlrelease.api.v1 import ReleaseApi
from com.xebialabs.xlrelease.api.v1 import TemplateApi
from com.xebialabs.xlrelease.api.v1 import ConfigurationApi
def getValueFromMap(theKey, theType, configurationApi):
logging.debug('in getValueFromMap: theKey = {}, theType = "{}"'.format(theKey,theType))
global_vars = configurationApi.globalVariables
theValue = None
tpl_map = None
for gv in global_vars:
if gv.key == u'global.releaseTriggerMap':
tpl_map = gv.value
break
if tpl_map is None:
raise Exception('You must define the global variable "releaseTriggerMap" to map keys to template IDs and/or folder IDs')
logging.debug('found the global variable releaseTriggerMap')
for k,v in tpl_map.iteritems():
if theKey == k:
theValue = v
break
logging.debug('returning value {}'.format(theValue))
return theValue
def post_release(shouldStart, request, releaseApi, templateApi, configurationApi, response):
logging.debug("post_release: start")
logging.debug("request.entity = {}".format(request.entity))
logging.debug("shouldStart = {}".format(shouldStart))
# validate request
if not 'template_id' in request.entity and not 'template_key' in request.entity and not 'template_title' in request.entity:
raise Exception('Either "template_id" or "template_key" or "template_title" is required in the request body')
if not 'release_title' in request.entity:
raise Exception('"release_title" is required in the request body')
if 'autoStart' in request.entity and request.entity['autoStart'] and not 'scheduledStartDate' in request.entity:
raise Exception('if autoStart is set to true, "scheduledStartDate" is required in the request body')
# determine template to use
template_id = None
templateRefInRequest = None
if 'template_id' in request.entity:
templateRefInRequest = "template_id = %s" % request.entity['template_id']
template_id = request.entity['template_id']
elif 'template_key' in request.entity:
templateRefInRequest = "template_key = %s" % request.entity['template_key']
template_id = getValueFromMap(request.entity['template_key'], "template", configurationApi)
elif 'template_title' in request.entity:
# find template_id from template_title
templateRefInRequest = "template_title = %s" % request.entity['template_title']
tpl_list = templateApi.getTemplates(request.entity['template_title'], None, 0, 5, 5)
if len(tpl_list) > 0:
template_id = tpl_list[0].id
logging.debug("Template Reference in request = {}".format(templateRefInRequest))
if template_id is None:
raise Exception('No template "{}" was found'.format(templateRefInRequest))
logging.info('creating release from template "{}"'.format(template_id))
tpl = templateApi.getTemplate(template_id)
# get variables in template
tpl_vars = tpl.variables
rel_vars = {}
# create map of variables defined by post entity
logging.debug('template vars ----')
for v in tpl_vars:
logging.debug(' '+v.key)
if v.key in request.entity:
logging.debug(' value='+request.entity[v.key])
rel_vars[v.key] = request.entity[v.key]
# do CreateRelease createRelease
releaseProperties = CreateRelease()
# required properties
releaseProperties.releaseTitle = request.entity['release_title']
releaseProperties.variables = rel_vars
releaseProperties.autoStart = False
# optional properties
if 'scheduledStartDate' in request.entity:
releaseProperties.scheduledStartDate = SDF("MM/dd/yyyy").parse(request.entity['scheduledStartDate'])
if 'autoStart' in request.entity and request.entity['autoStart']:
releaseProperties.autoStart = request.entity['autoStart']
# determine folder to use
folder_id = None
if 'folder_id' in request.entity:
folder_id = request.entity['folder_id']
elif 'folder_key' in request.entity:
folder_id = getValueFromMap(request.entity['folder_key'], "folder", configurationApi)
if folder_id is not None:
releaseProperties.folderId = folder_id
logging.info('will put release in folder "{}"'.format(folder_id))
# create the release
logging.debug('creating release ----')
release = templateApi.create(tpl.id, releaseProperties)
logging.debug(' '+release.id)
# after release is created
if 'description' in request.entity:
release.description = request.entity['description']
if 'dueDate' in request.entity:
release.dueDate = SDF("MM/dd/yyyy").parse(request.entity['dueDate'])
if 'owner' in request.entity:
release.owner = request.entity['owner']
if 'tags' in request.entity:
release.tags = request.entity['tags']
releaseApi.updateRelease(release)
logging.debug("The newly created release id is {} ".format(release.id))
# determine whether or not to start release immediately
if shouldStart:
logging.info('starting release "{}"'.format(release.id))
releaseApi.start(release.id)
# form response
response.statusCode = 201
response.entity = {"release_id": release.id, "template_id": tpl.id}
logging.debug("post_release: end")
|
# coding: utf-8
import warnings
from traitlets.config.application import catch_config_error
from .releaseassignmentapp import ReleaseAssignmentApp
class ReleaseApp(ReleaseAssignmentApp):
@catch_config_error
def initialize(self, argv=None):
super(ReleaseApp, self).initialize(argv=argv)
msg = (
"`nbgrader release` is now deprecated, please use `nbgrader "
"release_assignment` instead. This command will be removed in "
"a future version of nbgrader.")
warnings.warn(msg, DeprecationWarning)
self.log.warning(msg)
|
def hw1p3b():
from sklearn.datasets import load_boston
import numpy as np
from numpy import linalg as linalg
boston = load_boston()
b_data = boston.data
b_target = boston.target
t0 = np.median(b_target)
divider = 406
x_train = b_data[:divider,:] # N*D
bDataTestS = b_data[divider:,:]
target = b_target[0:divider]
bTargetTestS = b_target[divider:]
class1 = target <= t0
class2 = target > t0
N1 = sum(class1)
N2 = sum(class2)
m1 = np.mean(x_train[class1],0)[None,:] #1*D
m2 = np.mean(x_train[class2],0)[None,:]
m = np.mean(x_train,0)[None,:]
#################################################
Sw = (x_train[class1]-m1).transpose().dot( (x_train[class1]-m1) ) + (x_train[class2]-m2).transpose().dot( (x_train[class2]-m2) )
# Sb1 = N1*(m1-m).transpose().dot( (m1-m) ) + N2*(m2-m).transpose().dot( (m2-m) )
Sb = N1*N2/(N1+N2) * np.dot(np.transpose(m1-m2),(m1-m2))
# print('allclose',np.allclose(Sb,Sb1))
print('Sw.shape = ',Sw.shape)
print('Sb.shape = ', Sb.shape)
A = np.dot(linalg.inv(Sw),Sb)
lam,vec = linalg.eig( A )
w = vec[:,0:2] # D*2
# test eigen vector vs Sw^-1(m1-m2):
w1 = np.dot(linalg.inv(Sw),(m1-m2).transpose())
w1 = w1/linalg.norm(w1)
print('check w ~ w1: ',np.allclose(w1,w[:,:1]))
print('w2:',w[:,1:])
print('eigen_values:',lam.real)
print('rank(Sw^-1.Sb', linalg.matrix_rank(A))
# Histogram the projected points:
import matplotlib.pyplot as plt
classes = [class1,class2]
f1 = plt.figure()
for cl,labName in zip(classes,"01"):
proj_data = np.dot(x_train[cl],w)
x,y = np.transpose(proj_data)
plt.scatter(x, y, label=("class" + labName), cmap=plt.get_cmap('gist_rainbow'))
plt.legend()
# Test that when w is 1D, the projected points restore to direct 1D result,ie Sw^-1*(m2-m1):
print("Test whether or not restore to direct 1D result,ie Sw^-1*(m2-m1):")
w = -vec[:,:1] # w = vec[:,:1]; w.shape = (13,1); w = vec[:,0]; w.shape = (13,)
# print('vec.shape',vec.shape)
# f2 = plt.figure()
# for cl,labName in zip(classes,"01"):
# proj_data = np.dot(x_train[cl],w)
# # x,y = np.transpose(proj_data)
# x = np.transpose(proj_data)
# y = np.zeros(x.shape)
# plt.scatter(x-np.dot(m,w),y, label = ("class"+labName), cmap = plt.get_cmap('gist_rainbow'))
# plt.legend()
# plt.vlines(0,0,1)
# # plt.show()
def classifier(x,win,mid):
# x is N*D data matrix
# x = x.reshape((D,1))
x = x.transpose()
D = len(win)
win = win.reshape((D,1))
mid = mid.reshape((D,1))
# return np.inner(w,x)>0
return (win.transpose().dot(x-mid))[0]>0 # D contracted, inner product on D, feature space.
x = bDataTestS
y = bTargetTestS
result = classifier(x,w,m)
err = np.mean((y>t0)!= result)
print('err rate is: ',err)
print('Linear dividable? ', np.mean(classifier(x_train,w,m) == (target > t0 )))
# print('wb:',w.real)
# plt.show()
if __name__ == '__main__':
hw1p3b()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `nameko_structlog` package."""
from unittest.mock import MagicMock, call
from nameko.testing.services import entrypoint_hook
import structlog
class TestingStructlogProcessor:
"""
Testing StuctlogProcessor that make use of `ReturnLogger`.
"""
def __init__(self, **kwargs):
pass
def get_logger(self):
structlog.configure(
processors=[],
context_class=structlog.threadlocal.wrap_dict(dict),
logger_factory=structlog.ReturnLoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
return structlog.get_logger().new()
def test_structlog_depedency(greeting_service, greeting_rpc):
greeting_rpc.log = TestingStructlogProcessor().get_logger()
with entrypoint_hook(greeting_service.container, "greet") as greet:
assert greet() == "Hi"
assert greeting_rpc.log.info("bar") == ((), {"event": "bar"})
def test_strcutlog_logger(greeting_service, greeting_rpc):
greeting_rpc.log = MagicMock(name="log")
greeting_rpc.log.info = MagicMock(name="info")
greeting_rpc.log.info.return_value = TestingStructlogProcessor().get_logger()
greeting_rpc.log.info("bar")
assert greeting_rpc.log.info.call_args_list == [call("bar")]
assert greeting_rpc.log.info.called_once()
|
from .clean import clean_json |
"Unit tests of array functions."
import unittest
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_raises
nan = np.nan
from la import larry
from la.missing import nans, missing_marker, ismissing
class Test_nans(unittest.TestCase):
"Test nans"
def test_nans_1(self):
"afunc.nans_1"
shape = (2,)
actual = nans(shape)
desired = np.array([nan, nan])
assert_almost_equal(actual, desired)
def test_nans_2(self):
"afunc.nans_2"
shape = (2,)
dtype = float
actual = nans(shape, dtype)
desired = np.array([nan, nan])
assert_almost_equal(actual, desired)
def test_nans_3(self):
"afunc.nans_3"
shape = (2,)
dtype = str
actual = nans(shape, dtype)
desired = np.array(['', ''])
assert_equal(actual, desired)
def test_nans_4(self):
"afunc.nans_4"
shape = (2,)
dtype = object
actual = nans(shape, dtype)
desired = np.array([None, None])
assert_equal(actual, desired)
def test_nans_5(self):
"afunc.nans_5"
shape = (2, 4, 3)
dtype = object
actual = nans(shape, dtype)
desired = np.zeros(shape, dtype=dtype)
desired[:] = None
assert_equal(actual, desired)
def test_nans_6(self):
"afunc.nans_6"
shape = 0
dtype = str
actual = nans(shape, dtype)
desired = np.zeros(shape, dtype=dtype)
assert_equal(actual, desired)
def test_nans_7(self):
"afunc.nans_7"
shape = 0
dtype = int
assert_raises(TypeError, nans, shape, dtype)
def test_nans_8(self):
"afunc.nans_8"
shape = 0
dtype = bool
assert_raises(TypeError, nans, shape, dtype)
class Test_missing_marker(unittest.TestCase):
"Test missing_marker"
def test_missing_marker_1(self):
"afunc.missing_marker_1"
assert_equal(missing_marker(larry([1])), NotImplemented)
def test_missing_marker_2(self):
"afunc.missing_marker_2"
assert_equal(missing_marker(larry([1.0])), nan)
def test_missing_marker_3(self):
"afunc.missing_marker_3"
assert_equal(missing_marker(larry([True])), NotImplemented)
def test_missing_marker_4(self):
"afunc.missing_marker_4"
assert_equal(missing_marker(larry(['a'])), '')
def test_missing_marker_5(self):
"afunc.missing_marker_5"
import datetime
d = datetime.date(2011, 1, 1)
assert_equal(missing_marker(larry([d])), None)
def test_missing_marker_1a(self):
"afunc.missing_marker_1a"
assert_equal(missing_marker(np.array([1])), NotImplemented)
def test_missing_marker_2a(self):
"afunc.missing_marker_2a"
assert_equal(missing_marker(np.array([1.0])), nan)
def test_missing_marker_3a(self):
"afunc.missing_marker_3a"
assert_equal(missing_marker(np.array([True])), NotImplemented)
def test_missing_marker_4a(self):
"afunc.missing_marker_4a"
assert_equal(missing_marker(np.array(['a'])), '')
def test_missing_marker_5a(self):
"afunc.missing_marker_5a"
import datetime
d = datetime.date(2011, 1, 1)
assert_equal(missing_marker(np.array([d])), None)
class Test_ismissing(unittest.TestCase):
"Test ismissing"
def test_ismissing_1(self):
"afunc.ismissing_1"
assert_equal(ismissing(larry([1])), np.array([False]))
def test_ismissing_2(self):
"afunc.ismissing_2"
assert_equal(ismissing(larry([1.0])), np.array([False]))
def test_ismissing_3(self):
"afunc.ismissing_3"
assert_equal(ismissing(larry(['str'])), np.array([False]))
def test_ismissing_4(self):
"afunc.ismissing_4"
assert_equal(ismissing(larry([None])), np.array([True]))
def test_ismissing_5(self):
"afunc.ismissing_5"
import datetime
d = datetime.date(2011, 1, 1)
assert_equal(ismissing(larry([d])), np.array([False]))
def test_ismissing_6(self):
"afunc.ismissing_6"
assert_equal(ismissing(larry([nan])), np.array([True]))
def test_ismissing_7(self):
"afunc.ismissing_7"
assert_equal(ismissing(larry([nan, 1])), np.array([True, False]))
def test_ismissing_8(self):
"afunc.ismissing_8"
assert_equal(ismissing(larry([''])), np.array([True]))
def test_ismissing_9(self):
"afunc.ismissing_9"
assert_equal(ismissing(larry([True])), np.array([False]))
def test_ismissing_1a(self):
"afunc.ismissing_1a"
assert_equal(ismissing(np.array([1])), np.array([False]))
def test_ismissing_2a(self):
"afunc.ismissing_2a"
assert_equal(ismissing(np.array([1.0])), np.array([False]))
def test_ismissing_3a(self):
"afunc.ismissing_3a"
assert_equal(ismissing(np.array(['str'])), np.array([False]))
def test_ismissing_4a(self):
"afunc.ismissing_4a"
assert_equal(ismissing(np.array([None])), np.array([True]))
def test_ismissing_5a(self):
"afunc.ismissing_5a"
import datetime
d = datetime.date(2011, 1, 1)
assert_equal(ismissing(np.array([d])), np.array([False]))
def test_ismissing_6a(self):
"afunc.ismissing_6a"
assert_equal(ismissing(np.array([nan])), np.array([True]))
def test_ismissing_7a(self):
"afunc.ismissing_7a"
assert_equal(ismissing(np.array([nan, 1])), np.array([True, False]))
def test_ismissing_8a(self):
"afunc.ismissing_8a"
assert_equal(ismissing(np.array([''])), np.array([True]))
def test_ismissing_9a(self):
"afunc.ismissing_9a"
assert_equal(ismissing(np.array([True])), np.array([False]))
# Unit tests ----------------------------------------------------------------
def suite():
unit = unittest.TestLoader().loadTestsFromTestCase
s = []
s.append(unit(Test_nans))
s.append(unit(Test_missing_marker))
s.append(unit(Test_ismissing))
return unittest.TestSuite(s)
def run():
suite = testsuite()
unittest.TextTestRunner(verbosity=2).run(suite)
|
#! /usr/bin/python3
# (c) Copyright 2019-2022, James Stevens ... see LICENSE for details
# Alternative license arrangements possible, contact me for more information
""" module to resolve DNS queries into DoH JSON objects """
from syslog import syslog
import socket
import select
import argparse
import os
import json
import dns
import dns.name
import dns.message
import dns.rdatatype
import validation
DNS_MAX_RESP = 4096
MAX_TRIES = 10
DNS_FLAGS = {
"QR": 0x8000,
"AA": 0x0400,
"TC": 0x0200,
"RD": 0x0100,
"AD": 0x20,
"CD": 0x40,
"RA": 0x80
}
dohServers = ["8.8.8.8", "8.8.4.4"]
if "DOH_SERVERS" in os.environ:
dohServers = os.environ["DOH_SERVERS"].split(",")
def resolv_host(server):
""" resolve {host} to an IP if its a host name """
if validation.is_valid_ipv4(server):
return server
if validation.is_valid_host(server):
return socket.gethostbyname(server)
return None
class ResolvError(Exception):
""" custom error """
class Query: # pylint: disable=too-few-public-methods
""" build a DNS query & resolve it """
def __init__(self, name, rdtype):
if not validation.is_valid_host(name):
raise ResolvError(f"Hostname '{name}' failed validation")
self.name = name
self.rdtype = rdtype
self.with_dnssec = True
self.do = False
self.cd = False
self.servers = ["8.8.8.8", "1.1.1.1"]
def resolv(self):
""" resolve the query we hold """
res = Resolver(self)
return res.recv()
class Resolver:
""" resolve a DNS <Query> """
def __init__(self, qry):
self.qryid = None
self.reply = None
if not validation.is_valid_host(qry.name):
raise ResolvError(f"Hostname '{qry.name}' failed validation")
if isinstance(qry.rdtype, int):
rdtype = int(qry.rdtype)
else:
rdtype = dns.rdatatype.from_text(qry.rdtype)
if hasattr(qry, "servers"):
self.servers = qry.servers
else:
self.servers = dohServers
for each_svr in qry.servers:
if not validation.is_valid_ipv4(each_svr):
raise ResolvError("Invalid IP v4 Address for a Server")
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if self.sock is None:
raise ResolvError("Failed to open UDP client socket")
self.expiry = 1
self.tries = 0
msg = dns.message.make_query(qry.name,
rdtype,
want_dnssec=(qry.do or qry.cd))
self.question = bytearray(msg.to_wire())
def send_all(self):
""" send the query to all servers """
ret = False
for each_svr in self.servers:
try:
sent_len = self.sock.sendto(self.question, (each_svr, 53))
ret = ret or (sent_len == len(self.question))
# pylint: disable=unused-variable,broad-except
except Exception as err:
syslog(str(err))
return ret # True if at least one worked
def send(self):
""" send the DNS query out """
if self.question is None:
return None
self.question[0] = 0
self.question[1] = 0
while self.question[0] == 0 and self.question[1] == 0:
self.qryid = os.urandom(2)
self.question[0] = self.qryid[0]
self.question[1] = self.qryid[1]
return self.send_all()
def match_id(self):
""" cehck the DNS quiery Id field matches what we asked """
return (self.qryid is not None and self.reply[0] == self.qryid[0]
and self.reply[1] == self.qryid[1])
def recv(self, binary_format=False):
""" look for dns UDP response and read it """
while self.tries < MAX_TRIES:
if not self.send():
self.sock.close()
return None
while True:
rlist, _, _ = select.select([self.sock], [], [], self.expiry)
if len(rlist) <= 0:
break
self.reply, (addr, _) = self.sock.recvfrom(DNS_MAX_RESP)
if self.match_id():
if binary_format:
return self.reply
if (ret := self.decode_reply()) is None:
return None
ret["Responder"] = addr
self.sock.close()
return ret
self.expiry += int(self.expiry / 2) if self.expiry > 2 else 1
self.tries += 1
self.sock.close()
return None
def decode_reply(self):
""" decode binary {message} in DNS format to dictionary in DoH fmt """
msg = dns.message.from_wire(self.reply)
if (msg.flags & DNS_FLAGS["QR"]) == 0:
return None # REPLY flag not set
out = {}
for flag in DNS_FLAGS:
out[flag] = (msg.flags & DNS_FLAGS[flag]) != 0
out["Status"] = msg.rcode()
out["Question"] = [{
"name": rr.name.to_text(),
"type": rr.rdtype
} for rr in msg.question]
out["Answer"] = [{
"name": rr.name.to_text(),
"data": i.to_text(),
"type": rr.rdtype
} for rr in msg.answer for i in rr]
out["Authority"] = [{
"name": rr.name.to_text(),
"data": i.to_text(),
"type": rr.rdtype
} for rr in msg.authority for i in rr]
return out
def main():
""" main """
parser = argparse.ArgumentParser(
description='This is a wrapper to test the resolver code')
parser.add_argument("-s",
"--servers",
default="8.8.8.8,1.1.1.1",
help="Resolvers to query")
parser.add_argument("-n",
"--name",
default="jrcs.net",
help="Name to query for")
parser.add_argument("-t",
"--rdtype",
default="txt",
help="RR Type to query for")
args = parser.parse_args()
if not validation.is_valid_host(args.name):
print(f"ERROR: '{args.name}' is an invalid host name")
else:
qry = Query(args.name, args.rdtype)
qry.servers = args.servers.split(",")
qry.do = True
print(json.dumps(qry.resolv(), indent=2))
if __name__ == "__main__":
main()
|
import os
import re
import csv
import argparse
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence
class Lang(object):
""" creates word-index mappings
"""
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2
def addSentence(self, sentence):
# need to split according to Chinese sentences or specific input format
for word in sentence.split():
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def unicodeToAscii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def readLangs(path):
print("Reading data...")
with open(data_path, 'rb') as f:
reader = csv.reader(f)
data_list = list(reader)
# each item (sent1, sent2, label)
item_list = [[w for w in item[0].split('\t')] for item in data_list]
# TODO, apply filters here to remove unwanted lexiacal info
# word-index mappings
lang = Lang(path)
return item_list, lang
def prepareData(path):
items, lang = readLangs(path)
for item in items:
lang.addSentence(item[0])
lang.addSentence(item[1])
print("Counted words: ", lang.n_words)
return items, lang
def indexFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence):
index = indexFromSentence(lang, sentence)
index.append(EOS_token)
return torch.tensor(index, dtype=torch.long)
class AtecDataset(Dataset):
def __init__(self, config, mode="train"):
super(AtecDataset, self).__init__()
self.config = config
# self.build_dic()
self.load_embedded_data()
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.items[index]
# def build_dic(self):
# items, lang = prepareData(config.data_path)
# self.items = items
# self.lang = lang
def load_embedded_data(self):
def get_dataloader(config, mode="train"):
atec_dataset = AtecDataset(config, mode)
atrc_dataloader = DataLoader(dataset = atec_dataset,
batch_size = config.batch_size,
shuffle = True,
num_worker = config.num_worker)
return atec_dataloader
####################################################################################################
class TestDataset(Dataset):
def __init__(self, config, mode="train"):
super(TestDataset, self).__init__()
self.config = config
self. build_dataset()
def build_dataset(self):
self.lengths = np.random.randint(20, size=10) + 1
self.data = [np.random.rand(l, self.config.input_size) for l in self.lengths]
self.labels = [np.random.randint(2, size=l) for l in self.lengths]
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
return self.data[index], self.labels[index]
def pad_collate(batch):
# print(batch)
batch.sort(key=lambda x: len(x[0]), reverse=True)
lengths = torch.LongTensor([len(item[1]) for item in batch])
# print(lengths)
data = pad_sequence([torch.FloatTensor(item[0]) for item in batch], batch_first=True)
labels = pad_sequence([torch.LongTensor(item[1]) for item in batch], batch_first=True)
data = pack_padded_sequence(data, lengths, batch_first=True)
return data, labels
def get_test_dataloader(config, mode="train"):
dataset = TestDataset(config, mode)
dataloader = DataLoader(dataset = dataset,
batch_size = config.batch_size,
shuffle = True,
collate_fn=pad_collate)
return dataloader
####################################################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_size', type=int, default=5, help='interval between training model saves')
parser.add_argument('--batch_size', type=int, default=3, help='number of workers for parallelizationp')
args = parser.parse_args()
# print(args)
dl = get_test_dataloader(args)
for i, (data, labels) in enumerate(dl):
# pass
print("data", data)
# print("labels", labels)
break
|
import os, sys, string, re, time
import marshal
from log import *
import time,hashlib
import neo_cgi, neo_util
from clearsilver import odb, hdfhelp, odb_sqlite3
class WhichReadDB(odb.Database):
def __init__ (self, conn):
odb.Database.__init__(self, conn)
self.addTable("whichread", "wr_whichread", WhichReadTable)
def get(self, readerid):
row = self.whichread.lookup(readerid=readerid)
if not row: row = ''
return row
class WhichReadTable(odb.Table):
def _defineRows(self):
self.d_addColumn("readerid", odb.kVarString, primarykey=1)
self.d_addColumn("wrlist", odb.kVarString)
def createTables(path):
dbpath = "%s/whichread.db3" % path
# conn = odb_sqlite3.Connection(dbpath, autocommit=0)
conn = odb_sqlite3.Connection(dbpath)
db = WhichReadDB(conn)
db.createTables()
db.synchronizeSchema()
db.createIndices()
class WhichRead:
def __init__ (self, listname,path,ncgi):
self.listname = listname
self._path = path
self.ncgi = ncgi
self.__db = None
self._whichReadID = self.getWhichReadID()
def getWhichReadID(self):
wrid = self.ncgi.hdf.getValue("Cookie.WRID","")
if not wrid:
m = hashlib.md5()
m.update("%s-%s" % (self.ncgi.hdf.getValue("CGI.RemoteAddress","ADDR"),
time.time()))
wrid = m.hexdigest()
log("issued new WhichReadID: %s" % wrid)
self.ncgi.cookieSet("WRID",wrid,persist=1)
# self.ncgi.hdf.setValue("Cookie.WRID",wrid)
return wrid
def _db(self):
if self.__db is None:
dbpath = "%s/whichread.db3" % self._path
# conn = odb_sqlite3.Connection(dbpath, autocommit=0)
conn = odb_sqlite3.Connection(dbpath)
self.__db = WhichReadDB(conn)
return self.__db
def markMsgRead(self, message_num):
# unpack the seen cookie
seencookiename = "%s.WR" % self.listname
seencookie = self.ncgi.hdf.getValue("Cookie.%s" % seencookiename, "")
if seencookie:
c_parts = string.split(seencookie,",")
else:
c_parts = []
mnum_str = "%s" % message_num
try:
index = c_parts.remove(mnum_str)
log("already seen in cookie: %s" % message_num)
except ValueError:
log("markread: %s" % message_num)
# yes, it's new!
# make a new seen cookie! (only 200 entries)
c_parts.insert(0,mnum_str)
new_seencookie = string.join(c_parts[:200],",")
self.ncgi.cookieSet(seencookiename,new_seencookie,persist=1)
# add to whichread DB
self.addToDB(message_num)
# append to whichread log
fp = open("%s/whichreadchanges.log" % self._path,"ab+")
fp.write("%s %s\n" % (self._whichReadID,mnum_str))
fp.close()
def getWRList(self):
# read whichread from disk
wdb = self._db()
whichread = ""
whichread = wdb.whichread.lookup(readerid=self._whichReadID)
if whichread is None:
wrlist = ''
else:
wrlist = whichread.wrlist
wrl = WRList(wrlist)
return wrl
def addToDB(self,mnum):
wdb = self._db()
whichread = ""
whichread = wdb.whichread.lookup(readerid=self._whichReadID)
if whichread is None:
wrlist = ''
else:
wrlist = whichread.wrlist
wr_list = WRList(wrlist)
wr_list.markRead(mnum)
row = wdb.whichread.lookupCreate(readerid=self._whichReadID)
row.wrlist = wr_list.dump()
row.save()
def __del__ (self):
if self.__db:
self.__db.close()
class WRList:
def __init__(self,val):
self._val = val
self._parts = string.split(val,",")
self._dict = {}
dict = self._dict
for a_part in self._parts:
dict[a_part] = 1
def markRead(self,mnum):
mnum = "%s" % mnum
try:
index = self._parts.index(mnum)
except ValueError:
self._parts.insert(0,mnum)
def dump(self):
# log("WRLIST: %s" % self._parts)
return string.join(self._parts,",")
def isRead(self,mnum):
mnum = "%s" % mnum
# log("isRead %s = %s" % (mnum,self._dict.has_key(mnum)))
return self._dict.has_key(mnum)
|
# Copyright (c) 2015, the Fletch project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE.md file.
{
'target_defaults': {
'include_dirs': [
'../../',
],
},
'targets': [
{
'target_name': 'fletch_shared',
'type': 'static_library',
'toolsets': ['target', 'host'],
'sources': [
'asan_helper.h',
'assert.cc',
'assert.h',
'atomic.h',
'bytecodes.cc',
'bytecodes.h',
'connection.cc',
'connection.h',
'flags.cc',
'flags.h',
'fletch.h',
'globals.h',
'list.h',
'names.h',
'natives.h',
'native_socket.h',
'native_socket_linux.cc',
'native_socket_macos.cc',
'native_socket_posix.cc',
'platform.h',
'platform_linux.cc',
'platform_macos.cc',
'platform_posix.cc',
'platform_posix.h',
'platform_lk.cc',
'platform_lk.h',
'platform_mbed.cc',
'platform_mbed.h',
'random.h',
'selectors.h',
'utils.cc',
'utils.h',
],
'link_settings': {
'libraries': [
'-lpthread',
],
},
},
{
'target_name': 'cc_test_base',
'type': 'static_library',
'dependencies': [
'fletch_shared',
],
'sources': [
'test_case.h',
'test_case.cc',
'test_main.cc',
],
},
{
'target_name': 'shared_cc_tests',
'type': 'executable',
'dependencies': [
'cc_test_base',
],
'defines': [
'TESTING',
],
'sources': [
'assert_test.cc',
'flags_test.cc',
'globals_test.cc',
'random_test.cc',
'utils_test.cc',
'fletch.cc',
],
},
{
'target_name': 'natives_to_json',
'type': 'executable',
'toolsets': ['host'],
'dependencies': [
'fletch_shared',
],
'sources': [
'natives_to_json.cc',
],
'conditions': [
[ 'OS=="mac"', {
'dependencies': [
'copy_asan#host',
],
'sources': [
'<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib',
],
}],
],
},
{
'target_name': 'natives_json',
'type': 'none',
'toolsets': ['host'],
'dependencies': [
'natives_to_json',
],
'actions': [
{
'action_name': 'make_natives_json',
'inputs': [
'<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)'
'natives_to_json'
'<(EXECUTABLE_SUFFIX)',
],
'outputs': [
'<(PRODUCT_DIR)/natives.json',
],
'action': [
'<@(_inputs)',
'<@(_outputs)',
],
}
],
}
],
'conditions': [
[ 'OS=="mac"', {
'targets': [
{
'target_name': 'copy_asan',
'type': 'none',
'toolsets': ['host'],
'copies': [
{
# The asan dylib file sets its install name as
# @executable_path/..., and by copying to PRODUCT_DIR, we avoid
# having to set DYLD_LIBRARY_PATH.
'destination': '<(PRODUCT_DIR)',
'files': [
'../../third_party/clang/mac/lib/clang/3.8.0/'
'lib/darwin/libclang_rt.asan_osx_dynamic.dylib',
],
},
],
},
]
}]
],
}
|
Version = "3.56.0"
|
from slot import Slot
class Recipient:
def __init__(self, recipient_id:str, email:str, first_name:str, last_name:str, age:int):
self.__recipient_id = recipient_id
self.__email = email
self.__first_name = first_name
self.__last_name = last_name
self.__age = age
self.__slots = []
@property
def recipient_id(self):
return self.__recipient_id
@property
def email(self):
return self.__email
@property
def first_name(self):
return self.__first_name
@property
def last_name(self):
return self.__last_name
@property
def age(self):
return self.__age
@property
def slots(self):
return self.__slots
def are_slots_same_date(self, slot:Slot) -> bool:
for selfslot in self.__slots:
if selfslot.reservation_date == slot.reservation_date:
return True
return False
def is_slot_counts_equal_or_over_two(self) -> bool:
if len(self.__slots) >= 2:
return True
return False
def add_reserve_slot(self, slot:Slot) -> bool:
if self.are_slots_same_date(slot):
return False
if self.is_slot_counts_equal_or_over_two():
return False
self.__slots.append(slot)
slot.use_slot()
return True
|
from bean import RuleBuilder
bot_host = "127.0.0.1" # 机器人host
bot_port = "8080" # 机器人port
qq_group = [123456, 987654] # 生效群
compress_kb = 500 # 压缩图片大小, 单位为kb
bot_img_file_dir = r"C:\酷Q Pro\data\image" # 酷q机器人接收图片位置, 如图位置
APP_ID = 123456 # 从tx云获取的api
APP_KEY = 'jlKkjKOWtEogd'
# 规则
rules = RuleBuilder()\
.add(RuleBuilder.TAG_NORMAL_HOT_PORN, 90, 100, True, RuleBuilder.Punishment.kick(True))\
.add(RuleBuilder.TAG_NORMAL_HOT_PORN, 80, 89, True, RuleBuilder.Punishment.ban(7*24*60*60))\
.build() |
import sys
import random
import datetime
import time
class utils:
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
__version__ = '0.1'
@staticmethod
def num_to_short_alphanumeric(i):
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXY'
length = len(chars)
result = ''
while i:
result = chars[i % length] + result
i = i // length
if not result:
result = chars[0]
return result
def get_unique_new_customer_mnemonic(self):
base = datetime.datetime(2016, 2, 1, 00, 00)
secondsPassed = int((datetime.datetime.now() - base).total_seconds())
code = self.num_to_short_alphanumeric(secondsPassed)
result = code.rjust(8, 'Z')
return result
def get_unique_date_string(self):
now = datetime.datetime.now()
return now.strftime("%m%d%H%M%S")
def get_todays_date(self):
now = datetime.datetime.now()
return now.strftime("%m/%d/%Y")
def get_id_from_transaction_confirmation_text(self, confirmTransactionText):
return confirmTransactionText.replace('Txn Complete:', '').strip().split(' ', 1)[0]
|
"""Preprocessing mainly cares about constructing a DAG before we start processing.
The shared layers and models are also duplicated duing this procedue.
"""
import logging
import onnx as O
import keras as K
from keras.models import Sequential
from . import helper
from .exceptions import FeatureNotImplemented, OnnxNotSupport
from .tree_structure import TreeNode, TreeTensor
from . import optimizer
def is_same_tensor(tensor_a, tensor_b):
"""Compare tow tree tensor class and check if their inner tensor is the same
"""
return tensor_a.tensor == tensor_b.tensor
# Define the outside input and output as two special tree nodes
GRAND_INPUT = TreeTensor('INPUT')
def createTreeNode(layer_tree, tensor_dict, klayer, prefix=''):
"""The function used to create the tree node and tree tensors corresponding
to the given keras layer.
"""
total_use_num = len(klayer._inbound_nodes)
if total_use_num == 1:
# For non-shared layers
cur_node = TreeNode(klayer, prefix)
for i in range(len(cur_node.get_output_tensors())):
ktensor = cur_node.get_output_tensors()[i]
cur_tensor = TreeTensor(cur_node.name + '_o' + str(i),
tensor=ktensor,
creator=cur_node)
tensor_dict[ktensor] = cur_tensor
cur_node.outputs.append(cur_tensor)
layer_tree.append(cur_node)
else:
# For shared layers
for i in range(total_use_num):
postfix = '_p' + str(i)
cur_node = TreeNode(klayer, prefix, postfix, i)
for j in range(len(cur_node.get_output_tensors())):
ktensor = cur_node.get_output_tensors()[j]
cur_tensor = TreeTensor(cur_node.name + '_o' + str(j),
tensor=ktensor,
creator=cur_node)
tensor_dict[ktensor] = cur_tensor
cur_node.outputs.append(cur_tensor)
layer_tree.append(cur_node)
def check_rnn_start_point(layers):
RNN_LAYERS = ['GRU', 'LSTM']
START_LAYERS = ['Reshape', 'InputLayer']
# Check for RNN layers
the_first_rnn_layer = None
for layer in layers:
if layer.type in RNN_LAYERS:
the_first_rnn_layer = layer
break
if the_first_rnn_layer is None:
return
# Find the start layer before this RNN layer
layer = the_first_rnn_layer
while layer.type not in START_LAYERS:
layer = layer.inputs[0].input
helper.RNN_start_node = layer
if layer.type == 'InputLayer':
helper.RNN_start = True
helper.logger.debug("Found RNN starting from " + layer.name)
def preprocess(kmodel, prefix='', outer_node=None, optimize=False):
"""The main tree construction function
"""
output_tensors = kmodel.outputs
input_tensors = kmodel.inputs
layer_tree = []
input_node_list = []
output_tensor_list = []
submodel_list = []
useless_node_idx_list = []
helper.is_sequential = isinstance(kmodel, Sequential)
# Define a dictionary to map the output tensors to a single tree tenser
tensor_dict = dict()
# Dictionary for input tensor
input_dict = dict()
# 1. Set up tree nodes and their tensors for all the layers
for layer in kmodel.layers:
createTreeNode(layer_tree, tensor_dict, layer, prefix=prefix)
# 2.1 If it is sequential without InputLayer, construct one for it
if helper.is_sequential:
helper.logger.warning("Sequential model is not recommanded.")
if layer_tree[0].type != "InputLayer":
helper.logger.warning("Constructing an InputLayer for Sequential model.")
constructed_input = TreeNode()
constructed_input.name = "contructed_input"
constructed_input.type = "InputLayer"
actual_input_tensor = input_tensors[0]
input_tensor = TreeTensor(constructed_input.name + '_o0',
tensor=actual_input_tensor,
creator=constructed_input)
tensor_dict[actual_input_tensor] = input_tensor
constructed_input.outputs.append(input_tensor)
layer_tree.insert(0, constructed_input)
# 2.2 Set up tree nodes inputs and outputs
for tree_node in layer_tree:
if tree_node.type == "InputLayer":
# Input node has no regular input tensor
if outer_node is None:
tree_node.inputs.append(GRAND_INPUT)
GRAND_INPUT.outputs.append(tree_node)
else:
tree_node.type = "InnerInput"
keras_tensor = tree_node.get_input_tensors()[0]
input_dict[keras_tensor] = tree_node
else:
# None input nodes
try:
for tensor in tree_node.get_input_tensors():
src_tensor = tensor_dict[tensor]
src_tensor.outputs.append(tree_node)
tree_node.inputs.append(src_tensor)
except KeyError:
useless_node_idx_list.append(layer_tree.index(tree_node))
# Check for submodel
if tree_node.type == "Model":
submodel_list.append(tree_node)
# 3.1 Check for output nodes
for output_tensor in output_tensors:
output_tensor_list.append(tensor_dict[output_tensor])
# 3.2 Check for input nodes
try:
for input_tensor in input_tensors:
input_node_list.append(input_dict[input_tensor])
except KeyError:
raise FeatureNotImplemented("Keras model without Input layer")
# 4. Deal with submodels
for model in submodel_list:
if layer_tree.index(model) in useless_node_idx_list:
continue
# 4.1 Extract submodel
sub_layers, sub_inputs, sub_outputs = preprocess(model.klayer,
prefix=model.name + '_',
outer_node=model)
layer_tree += sub_layers
# 4.2 Reset inner input node
assert len(model.inputs) == len(sub_inputs), "Submodel input number error"
for i in range(len(sub_inputs)):
# for each inner input layer
input_out_tensor = sub_inputs[i].outputs[0]
for input_follower in input_out_tensor.outputs:
# For each use of the current input
input_follower.replace_input(model.inputs[i], input_out_tensor)
model.inputs[i].replace_output(input_out_tensor.outputs, model)
useless_node_idx_list.append(layer_tree.index(sub_inputs[i]))
# 4.3 Reset inner output node
assert len(model.outputs) == len(sub_outputs), "Submodel output number error"
for i in range(len(sub_outputs)):
# for each output tensor
sub_outputs[i].outputs += model.outputs[i].outputs
for follower in model.outputs[i].outputs:
# for each use of output tensor
follower.replace_input(sub_outputs[i], model.outputs[i])
if model.outputs[i] in output_tensor_list:
sub_outputs[i].tensor = model.outputs[i].tensor
output_tensor_list.remove(model.outputs[i])
output_tensor_list.append(sub_outputs[i])
useless_node_idx_list.append(layer_tree.index(model))
# 5. Remove useless nodes
for i in sorted(useless_node_idx_list, reverse=True):
helper.logger.debug("Remove %s", layer_tree[i].name)
del layer_tree[i]
# 6. Optimizations
for i in range(optimize):
for opt_func in optimizer.pass_list[i]:
opt_func(layer_tree)
# Check RNN
check_rnn_start_point(layer_tree)
# 7. Only the out most preprocess will print the debug message
if prefix != '':
return layer_tree, input_node_list, output_tensor_list
helper.logger.debug("Here goes the graph:")
for layer in layer_tree:
layer.print_info()
for tensor in output_tensor_list:
helper.logger.debug(tensor.name + '\t-> OUTPUT')
return layer_tree, input_node_list, output_tensor_list |
# ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2021
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
import os
import json
import numpy as np
import pandas as pd
from qa.utils import set_seed
DEFAULT_SIZES = [500, 1000, 1500, 2000, 2500, 3000]
def randomize_indices(data):
idx = np.arange(len(data))
return np.random.permutation(idx)
def partition_data(data, indices_or_sections=None, seed=42):
"""
data should be a ... what??? list?
partitions can be a number (as in, the number of partitions) or a list of data sizes?
"""
set_seed(seed)
dd = np.array(data)
idx = randomize_indices(data)
idx_chunks = np.array_split(idx, indices_or_sections)
partitions = [list(dd[chunk]) for chunk in idx_chunks]
return partitions
def create_increasing_sized_train_sets(json_data_file, sizes=DEFAULT_SIZES, **kwargs):
"""
json_data_file: filename of the original, full dataset from which to create subsets
sizes: list of dataset sizes to partition the original dataset into
these will translate into increasing sized datasets, with each
successive dataset consisting of the previous subset's examples plus
the number of additional examples identified in the splits
Takes filename of a json dataset and the desired sizes and creates
subsets of increasing size. These subsets are saved to the directory
associated with the json_data_file.
"""
outfile_prefix = os.path.splitext(json_data_file)[0]
json_data = json.load(open(json_data_file, "r"))
data_chunks = partition_data(json_data["data"][0]["paragraphs"], sizes, **kwargs)
new_json = {"data": [{"paragraphs": []}]}
for chunk in data_chunks:
try:
num_examples += len(chunk)
except:
num_examples = len(chunk)
new_json["data"][0]["paragraphs"] += chunk
json.dump(new_json, open(f"{outfile_prefix}_{num_examples}.json", "w"))
def load_results(data_dir):
data = json.load(open(data_dir + "/results_.json", "r"))
return pd.DataFrame(data, index=[0])
def load_predictions(data_dir):
preds = json.load(open(data_dir + "/predictions_.json", "r"))
return pd.Series(preds)
def load_nbest_predictions(data_dir):
"""
I have not tested this and I have no idea if it works.
"""
preds = json.load(open(data_dir + "/nbest_predictions_.json", "r"))
return pd.DataFrame(data, index=[0])
|
class Game:
def __init__(self, players) -> None:
super().__init__()
self.players = list()
self.num_players = players
for i in range(players):
self.players.append(Player(i + 1))
self.circle = Circle()
def play_until(self, last_marble):
for i in range(last_marble + 1):
player = self.players[i % self.num_players]
score = self.circle.add_marble(i + 1)
player.score += score
print(self.circle)
return self.print_high_score()
def print_high_score(self):
high_score = 0
highest_scoring_player = None
for player in self.players:
if player.score > high_score:
high_score = player.score
highest_scoring_player = player
print(f"Player {highest_scoring_player.number} won with a high score of {high_score}")
return high_score
class Circle:
def __init__(self) -> None:
super().__init__()
self.current = Marble(0)
self.current.clockwise = self.current
self.current.counter_clockwise = self.current
self.zero = self.current
def add_marble(self, value):
new_marble = Marble(value)
if value % 23 == 0:
points = new_marble.value
marble_to_remove = self.current
for j in range(7):
marble_to_remove = marble_to_remove.counter_clockwise
points += marble_to_remove.value
marble_to_remove.clockwise.counter_clockwise = marble_to_remove.counter_clockwise
marble_to_remove.counter_clockwise.clockwise = marble_to_remove.clockwise
self.current = marble_to_remove.clockwise
return points
else:
clockwise1 = self.current.clockwise
clockwise2 = clockwise1.clockwise
new_marble.clockwise = clockwise2
new_marble.counter_clockwise = clockwise1
clockwise1.clockwise = new_marble
clockwise2.counter_clockwise = new_marble
self.current = new_marble
return 0
def __str__(self) -> str:
values = list()
values.append(self.zero.value)
next_marble = self.zero.clockwise
while next_marble is not self.zero:
values.append(next_marble.value)
next_marble = next_marble.clockwise
return values.__str__()
class Marble:
def __init__(self, value) -> None:
super().__init__()
self.value = value
self.counter_clockwise = None
self.clockwise = None
class Player:
def __init__(self, number) -> None:
super().__init__()
self.number = number
self.score = 0
|
#xorshift method https://en.wikipedia.org/wiki/Xorshift
class fastrand:
def __init__(self, seed=0x956126898):
self.rctr = seed
def rand(self):
self.rctr %= 0xFFFFFFFF
self.rctr ^= (self.rctr << 13)
self.rctr %= 0xFFFFFFFF
self.rctr ^= (self.rctr >> 7)
self.rctr %= 0xFFFFFFFF
self.rctr ^= (self.rctr << 17)
self.rctr %= 0xFFFFFFFF
return float(self.rctr % 0xFFFFFFFF)/0xFFFFFFFF
def mc_pi(acc: int) -> float:
rng = fastrand()
hit_count: int = 0
count: int = 0
for _ in range(0, acc):
x: float = rng.rand()
y: float = rng.rand()
if x * x + y * y <= 1. :
hit_count += 1
count += 1
return float(hit_count) / float(count) * 4.0
|
from typing import List
class Solution:
def trap(self, height: List[int]) -> int:
before = 0
sum = 0
for i in range(1, len(height)):
if height[i] >= height[before]:
for j in range(before + 1, i):
sum += height[before] - height[j]
before = i
before = len(height) - 1
for i in reversed(range(len(height) - 1)):
if height[i] > height[before]:
for j in range(i + 1, before):
sum += height[before] - height[j]
before = i
return sum
|
"""Dialog State Tracker Interface"""
from abc import abstractmethod
from tatk.util.module import Module
class Tracker(Module):
"""Base class for dialog state tracker models."""
# @abstractmethod
def update(self, action):
""" Update the internal dialog state variable.
Args:
action (str or list of tuples):
The type is str when Tracker is word-level (such as NBT), and list of tuples when it is DA-level.
Returns:
new_state (dict):
Updated dialog state, with the same form of previous state.
"""
return self.update_batch([action])[0]
# @abstractmethod
def update_batch(self, batch_action):
""" Update the internal dialog state variable.
Args:
batch_action (list of str or list of list of tuples):
The type is list of str when Tracker is word-level (such as NBT), and list of list of tuples when it is DA-level.
Returns:
batch_new_state (list of dict):
Updated dialog states, with the same form of previous states.
"""
pass
|
#!/usr/bin/env python
import sys
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
setup(name='treetagger',
version='1.1.1',
description='A Python module for interfacing with the Treetagger by Helmut Schmid.',
long_description=README,
author='Mirko Otto',
author_email='dropsy@gmail.com',
url='https://github.com/miotto/treetagger-python',
py_modules=['treetagger'],
install_requires=['nltk'],
license='GPL Version 3',
)
|
"""
PyDelphin contains modules for working with the (in)formalisms of the
DELPH-IN ecosystem.
"""
|
from typing import Any
import tensorflow as tf
import numpy as np
from teal import RandomGain
from tests.utils import get_audio_examples
from tests.common import TealTest
class TestRandomGain(TealTest.TealTestCase):
def setUp(self):
self.power = 2
self.setup_layer(
layer=RandomGain(1.0),
single_example=get_audio_examples(1),
batch_example=get_audio_examples(3),
)
def value_assertion(self, a: Any, b: Any):
return self.assertAllClose(a, b, atol=0.01)
def alternate_logic(self, inputs: tf.Tensor) -> np.ndarray:
expected = []
numpy_examples = inputs.numpy()
num_examples = numpy_examples.shape[0]
if num_examples == 1:
results = self.single_result
else:
results = self.batch_result
for i in range(0, num_examples):
factor = (results[i] / (numpy_examples[i] + 1e-10)).numpy()
factor = np.unique(factor.round(4)).max()
expected.append(np.expand_dims(numpy_examples[i] * factor, axis=0))
return np.concatenate(expected, axis=0)
if __name__ == "__main__":
tf.test.main()
|
import easyocr
import os
import pandas as pd
from PIL import Image
import numpy as np
import time
import spacy
from spacy_langdetect import LanguageDetector
import streamlit as st
import de_core_news_sm
import en_core_web_sm
from langdetect import detect, DetectorFactory
DetectorFactory.seed = 0
import torch
torch.cuda.is_available()
ROTATIONS = [0, 90, 180, 270]
reader = easyocr.Reader(['en', 'de'], gpu=True)
func = np.vectorize(lambda t: len(t) > 2)
def find_most_likely_words_array(all_words):
all_words_len = [0, 0, 0, 0]
for i, words in enumerate(all_words):
try:
long_words = np.array(words)[func(words)]
except:
long_words = np.array([])
try:
for word in long_words:
try:
word = word.strip()
lan = detect(word)
if lan == "de" or lan == "en" or lan == "fr":
all_words_len[i] += 1
except:
pass
except:
pass
return np.argmax(all_words_len)
@st.cache
def predict_text(np_img, filename):
all_words = []
img = Image.fromarray(np_img)
for angle in ROTATIONS:
out = img.rotate(angle)
out.save(filename)
result = reader.readtext(filename)
words = [r[1] for r in result]
all_words.append(words)
index = find_most_likely_words_array(all_words)
return all_words[index], ROTATIONS[index] |
from __future__ import annotations
from typing import Any, TYPE_CHECKING
from pathmagic.helper import PathLike
from .format import Format
if TYPE_CHECKING:
from pathmagic.file import File
class Serialized(Format):
extensions = {"pkl"}
def __init__(self, file: File) -> None:
from iotools import Serializer
super().__init__(file=file)
self.serializer = Serializer(file)
@classmethod
def initialize(cls) -> None:
import dill
cls.module = dill
cls.readfuncs.update({"pkl": cls.module.load})
cls.writefuncs.update({"pkl": cls.module.dump})
def read(self, **kwargs: Any) -> Any:
return self.serializer.deserialize(**kwargs)
def write(self, item: PathLike, **kwargs: Any) -> None:
self.serializer.serialize(item, **kwargs)
|
'''
Picks a subset of the validation set for grid_search
Script arguments:
1: directory containing validation ppm images
2: output directory
3: how many images will be picked
'''
import random
import os
import sys
from shutil import copyfile
all_files = os.listdir(sys.argv[1])
all_files = [x[0:-4] for x in all_files]
picked_files = random.sample(all_files, int(sys.argv[3]))
for file in picked_files:
print(file)
copyfile(os.path.join(sys.argv[1], "{}.ppm".format(file)),
os.path.join(sys.argv[2], "{}.ppm".format(file))) # input
|
# An element in linked list
class Node:
def __init__(self, value):
self.value = value
self.prev = None
self.next = None
class CircularDoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
self.size = 0
self.index = self.head
self.is_tail_iterated = False
''' Inserting elements in linked list '''
# Insert element before head
def insert_at_start(self, value):
node = Node(value)
if not self.head:
self.head = node
self.tail = node
else:
prev_head = self.head
self.head = node
self.head.next = prev_head
prev_head.prev = self.head
self.head.prev = self.tail
self.tail.next = self.head
self.increment_size()
# Insert element at end (append)
def insert_at_end(self, value):
node = Node(value)
if not self.head:
self.head = node
self.tail = node
else:
prev_tail = self.tail
self.tail = node
self.tail.prev = prev_tail
prev_tail.next = self.tail
self.tail.next = self.head
self.head.prev = self.tail
self.increment_size()
# Insert element after some element
def insert_after(self, after, value):
self.empty_check()
if self.tail == after:
# Insert element after tail
return self.insert_at_end(value)
else:
node = Node(value)
next_node = after.next
after.next = node
next_node.prev = node
node.next = next_node
node.prev = after
self.increment_size()
# Insert element before some element
def insert_before(self, before, value):
self.empty_check()
if self.head == before:
# Insert element before head
return self.insert_at_start(value)
else:
node = Node(value)
prev_node = before.prev
before.prev = node
prev_node.next = node
node.next = before
node.prev = prev_node
self.increment_size()
''' Removing elements in linked list '''
# Note - after removing a node it can be assigned to None
# to free up memory but since here I'm returing those values
# I haven't done that
def remove_head(self):
self.empty_check()
node = self.head
next_node = self.head.next
self.head = next_node
self.head.prev = self.tail
self.tail.next = self.head
self.decrement_size()
if len(self) == 0:
self.tail = None
return node
def remove_tail(self):
self.empty_check()
node = self.tail
prev_node = self.tail.prev
self.tail = prev_node
self.tail.next = self.head
self.head.prev = self.tail
self.decrement_size()
# Check if linked list is empty or not
if len(self) == 0:
self.head = None
return node
def remove_node(self, node):
self.empty_check()
if node == self.head:
return self.remove_head()
elif node == self.tail:
return self.remove_tail()
else:
next_node = node.next
prev_node = node.prev
next_node.prev = prev_node
prev_node.next = next_node
self.decrement_size()
return node
''' Iterating through linked list '''
def __iter__(self):
self.index = self.head
self.is_tail_iterated = False
return self
def __next__(self):
if self.is_tail_iterated:
raise StopIteration
current_node = self.index
self.index = self.index.next
if current_node == self.tail:
self.is_tail_iterated = True
return current_node
''' Reversing linked list '''
def reverse_linked_list(self):
head_node = self.head
tail_node = self.tail
current_node = self.head
stop_at_node = None
while current_node is not stop_at_node:
prev_node = current_node.prev
next_node = current_node.next
current_node.next = prev_node
current_node.prev = next_node
current_node = next_node
stop_at_node = self.head
self.head = tail_node
self.tail = head_node
self.head.prev = self.tail
self.tail.next = self.head
return self
''' Utils '''
def is_empty(self):
return self.size == 0
def get_node_object(self, value):
current_node = self.head
stop_at_node = None
while current_node is not stop_at_node:
if current_node.value == value:
return current_node
current_node = current_node.next
stop_at_node = self.head
return None
def exists(self, value):
current_node = self.head
stop_at_node = None
while current_node is not stop_at_node:
if current_node.value == value:
return True
current_node = current_node.next
stop_at_node = self.head
return False
''' Checks '''
def empty_check(self):
assert len(self) > 0, 'Doubly linked list is empy'
def increment_size(self):
self.size += 1
def decrement_size(self):
self.size -= 1
''' Dunders '''
def __len__(self):
return self.size
def __str__(self):
linked_list = ''
current_node = self.head
stop_at_node = None
while current_node is not stop_at_node:
linked_list += f'{current_node.value} <-> '
current_node = current_node.next
stop_at_node = self.head
linked_list += f'{current_node.value}(head)'
return linked_list
def __repr__(self):
return str(self)
d = CircularDoublyLinkedList()
d.insert_at_start(4)
d.insert_at_start(3)
d.insert_at_start(2)
d.insert_at_start(1)
d.insert_at_end(100)
print(d)
d.remove_head()
print(d)
print(d.reverse_linked_list())
|
# Angus Dempster, Francois Petitjean, Geoff Webb
# Dempster A, Petitjean F, Webb GI (2019) ROCKET: Exceptionally fast and
# accurate time series classification using random convolutional kernels.
# arXiv:1910.13051
import argparse
import numpy as np
import pandas as pd
import time
from numba import njit, prange
from sklearn.linear_model import RidgeClassifierCV
from rocket_functions import generate_kernels, apply_kernels, apply_kernel
# == notes =====================================================================
# - This script is intended to allow for reproduction of the experiments on the
# additional 2018 datasets in the UCR archive, using the txt versions of those
# datasets from timeseriesclassification.com (Univariate2018_arff.zip).
# - This code has significant overlap with *reproduce_experiments_bakeoff.py*
# but, for convenience, is provided as a separate script.
# - The differences from *reproduce_experiments_bakeoff.py* relate to:
# - normalising input time series;
# - handling missing values (missing values are interpolated); and
# - hadling variable length time series (time series are rescaled or used
# "as is", using a variation of *apply_kernels(...)*, as determined by
# 10-fold cross-validation).
# - The required arguments for this script are:
# - -i or --input_path, the parent directory for the datasets; and
# - -o or --output_path, to save "results_additional.csv".
# - Optional arguments allow you to set the number of runs, -n or --num_runs,
# and the number of kernels, -k or --num_kernels.
# - If input_path is ".../Univariate_arff/", then each dataset should be
# located at "{input_path}/{dataset_name}/{dataset_name}_TRAIN.txt", etc.
# == parse arguments ===========================================================
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_path", required = True)
parser.add_argument("-o", "--output_path", required = True)
parser.add_argument("-n", "--num_runs", type = int, default = 10)
parser.add_argument("-k", "--num_kernels", type = int, default = 10_000)
arguments = parser.parse_args()
# == additional dataset names ==================================================
dataset_names_additional = \
(
"ACSF1",
"AllGestureWiimoteX",
"AllGestureWiimoteY",
"AllGestureWiimoteZ",
"BME",
"Chinatown",
"Crop",
"DodgerLoopDay",
"DodgerLoopGame",
"DodgerLoopWeekend",
"EOGHorizontalSignal",
"EOGVerticalSignal",
"EthanolLevel",
"FreezerRegularTrain",
"FreezerSmallTrain",
"Fungi",
"GestureMidAirD1",
"GestureMidAirD2",
"GestureMidAirD3",
"GesturePebbleZ1",
"GesturePebbleZ2",
"GunPointAgeSpan",
"GunPointMaleVersusFemale",
"GunPointOldVersusYoung",
"HouseTwenty",
"InsectEPGRegularTrain",
"InsectEPGSmallTrain",
"MelbournePedestrian",
"MixedShapesRegularTrain",
"MixedShapesSmallTrain",
"PLAID",
"PickupGestureWiimoteZ",
"PigAirwayPressure",
"PigArtPressure",
"PigCVP",
"PowerCons",
"Rock",
"SemgHandGenderCh2",
"SemgHandMovementCh2",
"SemgHandSubjectCh2",
"ShakeGestureWiimoteZ",
"SmoothSubspace",
"UMD"
)
# == apply kernels, variable input lengths =====================================
# if variable length time series are used "as is", the effective length of some
# kernels, including dilation (if set with reference to, e.g., the longest
# time series in a dataset, the default behaviour), may be larger than some
# input time series; this is irrelevant if padding is applied; even where
# padding is not applied, this should only affect a minority of kernels
# (dilation for most kernels is relatively small); the default behaviour of
# *apply_kernels_jagged(...)* is to "skip" incompatible kernels (i.e., where the
# effective size of the kernel including dilation is larger than the input time
# series including padding)
@njit(parallel = True, fastmath = True)
def apply_kernels_jagged(X, kernels, input_lengths):
weights, lengths, biases, dilations, paddings = kernels
num_examples = len(X)
num_kernels = len(weights)
# initialise output
_X = np.zeros((num_examples, num_kernels * 2)) # 2 features per kernel
for i in prange(num_examples):
for j in range(num_kernels):
# skip incompatible kernels (effective length is "too big" without padding)
if (input_lengths[i] + (2 * paddings[j])) > ((lengths[j] - 1) * dilations[j]):
_X[i, (j * 2):((j * 2) + 2)] = \
apply_kernel(X[i][:input_lengths[i]], weights[j][:lengths[j]], lengths[j], biases[j], dilations[j], paddings[j])
return _X
# == additional convenience function ===========================================
def run_additional(training_data, test_data, num_runs = 10, num_kernels = 10_000):
# assumes variable length time series are padded with nan
get_input_lengths = lambda X : X.shape[1] - (~np.isnan(np.flip(X, 1))).argmax(1)
def rescale(X, reference_length):
_X = np.zeros([len(X), reference_length])
input_lengths = get_input_lengths(X)
for i in range(len(X)):
_X[i] = np.interp(np.linspace(0, 1, reference_length), np.linspace(0, 1, input_lengths[i]), X[i][:input_lengths[i]])
return _X
def interpolate_nan(X):
_X = X.copy()
good = ~np.isnan(X)
for i in np.where(np.any(~good, 1))[0]:
_X[i] = np.interp(np.arange(len(X[i])), np.where(good[i])[0], X[i][good[i]])
return _X
results = np.zeros(num_runs)
timings = np.zeros([4, num_runs]) # training transform, test transform, training, test
Y_training, X_training = training_data[:, 0].astype(np.int), training_data[:, 1:]
Y_test, X_test = test_data[:, 0].astype(np.int), test_data[:, 1:]
variable_lengths = False
# handle three cases: (1) same lengths, no missing values; (2) same lengths,
# missing values; and (3) variable lengths, no missing values
if np.any(np.isnan(X_training)):
input_lengths_training = get_input_lengths(X_training)
input_lengths_training_max = input_lengths_training.max()
input_lengths_test = get_input_lengths(X_test)
# missing values (same lengths)
if np.all(input_lengths_training == input_lengths_training_max):
X_training = interpolate_nan(X_training)
X_test = interpolate_nan(X_test)
# variable lengths (no missing values)
else:
variable_lengths = True
num_folds = 10
cross_validation_results = np.zeros([2, num_folds])
# normalise time series
X_training = (X_training - np.nanmean(X_training, axis = 1, keepdims = True)) / (np.nanstd(X_training, axis = 1, keepdims = True) + 1e-8)
X_test = (X_test - np.nanmean(X_test, axis = 1, keepdims = True)) / (np.nanstd(X_test, axis = 1, keepdims = True) + 1e-8)
for i in range(num_runs):
# -- variable lengths --------------------------------------------------
if variable_lengths:
kernels = generate_kernels(input_lengths_training_max, num_kernels)
time_a = time.perf_counter()
X_training_transform_rescale = apply_kernels(rescale(X_training, input_lengths_training_max), kernels)
X_training_transform_jagged = apply_kernels_jagged(X_training, kernels, input_lengths_training)
time_b = time.perf_counter()
timings[0, i] = time_b - time_a
# indices for cross-validation folds
I = np.random.permutation(len(X_training))
I = np.array_split(I, num_folds)
time_a = time.perf_counter()
# j = 0 -> rescale
# j = 1 -> "as is" ("jagged")
for j in range(2):
for k in range(num_folds):
VA, *TR = np.roll(I, k, axis = 0)
TR = np.concatenate(TR)
classifier = RidgeClassifierCV(alphas = 10 ** np.linspace(-3, 3, 10), normalize = True)
if j == 0: # rescale
classifier.fit(X_training_transform_rescale[TR], Y_training[TR])
cross_validation_results[j][k] = classifier.score(X_training_transform_rescale[VA], Y_training[VA])
elif j == 1: # jagged
classifier.fit(X_training_transform_jagged[TR], Y_training[TR])
cross_validation_results[j][k] = classifier.score(X_training_transform_jagged[VA], Y_training[VA])
best = cross_validation_results.sum(1).argmax()
time_b = time.perf_counter()
timings[2, i] = time_b - time_a
classifier = RidgeClassifierCV(alphas = 10 ** np.linspace(-3, 3, 10), normalize = True)
if best == 0: # rescale
time_a = time.perf_counter()
X_test_transform_rescale = apply_kernels(rescale(X_test, input_lengths_training_max), kernels)
time_b = time.perf_counter()
timings[1, i] = time_b - time_a
time_a = time.perf_counter()
classifier.fit(X_training_transform_rescale, Y_training)
time_b = time.perf_counter()
timings[2, i] += time_b - time_a
time_a = time.perf_counter()
results[i] = classifier.score(X_test_transform_rescale, Y_test)
time_b = time.perf_counter()
timings[3, i] = time_b - time_a
elif best == 1: # jagged
time_a = time.perf_counter()
X_test_transform_jagged = apply_kernels_jagged(X_test, kernels, input_lengths_test)
time_b = time.perf_counter()
timings[1, i] = time_b - time_a
time_a = time.perf_counter()
classifier.fit(X_training_transform_jagged, Y_training)
time_b = time.perf_counter()
timings[2, i] += time_b - time_a
time_a = time.perf_counter()
results[i] = classifier.score(X_test_transform_jagged, Y_test)
time_b = time.perf_counter()
timings[3, i] = time_b - time_a
# -- same lengths ------------------------------------------------------
else:
kernels = generate_kernels(X_training.shape[1], num_kernels)
# -- transform training --------------------------------------------
time_a = time.perf_counter()
X_training_transform = apply_kernels(X_training, kernels)
time_b = time.perf_counter()
timings[0, i] = time_b - time_a
# -- transform test ------------------------------------------------
time_a = time.perf_counter()
X_test_transform = apply_kernels(X_test, kernels)
time_b = time.perf_counter()
timings[1, i] = time_b - time_a
# -- training ------------------------------------------------------
time_a = time.perf_counter()
classifier = RidgeClassifierCV(alphas = 10 ** np.linspace(-3, 3, 10), normalize = True)
classifier.fit(X_training_transform, Y_training)
time_b = time.perf_counter()
timings[2, i] = time_b - time_a
# -- test ----------------------------------------------------------
time_a = time.perf_counter()
results[i] = classifier.score(X_test_transform, Y_test)
time_b = time.perf_counter()
timings[3, i] = time_b - time_a
return results, timings
# == run through the additional datasets =======================================
results_additional = pd.DataFrame(index = dataset_names_additional,
columns = ["accuracy_mean",
"accuracy_standard_deviation",
"time_training_seconds",
"time_test_seconds"],
data = 0)
results_additional.index.name = "dataset"
compiled = False
print(f"RUNNING".center(80, "="))
for dataset_name in dataset_names_additional:
print(f"{dataset_name}".center(80, "-"))
# -- read data -------------------------------------------------------------
print(f"Loading data".ljust(80 - 5, "."), end = "", flush = True)
if dataset_name != "PLAID":
training_data = np.loadtxt(f"{arguments.input_path}/{dataset_name}/{dataset_name}_TRAIN.txt")
test_data = np.loadtxt(f"{arguments.input_path}/{dataset_name}/{dataset_name}_TEST.txt")
else:
training_data = np.loadtxt(f"{arguments.input_path}/{dataset_name}/{dataset_name}_TRAIN.txt", delimiter = ",")
test_data = np.loadtxt(f"{arguments.input_path}/{dataset_name}/{dataset_name}_TEST.txt", delimiter = ",")
print("Done.")
# -- precompile ------------------------------------------------------------
if not compiled:
print(f"Compiling ROCKET functions (once only)".ljust(80 - 5, "."), end = "", flush = True)
_ = generate_kernels(100, 10)
apply_kernels(np.zeros_like(training_data)[:, 1:], _)
apply_kernels_jagged(np.zeros_like(training_data)[:, 1:], _, np.array([training_data.shape[1]] * len(training_data)))
compiled = True
print("Done.")
# -- run -------------------------------------------------------------------
print(f"Performing runs".ljust(80 - 5, "."), end = "", flush = True)
results, timings = run_additional(training_data, test_data,
num_runs = arguments.num_runs,
num_kernels = arguments.num_kernels)
timings_mean = timings.mean(1)
print("Done.")
# -- store results ---------------------------------------------------------
results_additional.loc[dataset_name, "accuracy_mean"] = results.mean()
results_additional.loc[dataset_name, "accuracy_standard_deviation"] = results.std()
results_additional.loc[dataset_name, "time_training_seconds"] = timings_mean[[0, 2]].sum()
results_additional.loc[dataset_name, "time_test_seconds"] = timings_mean[[1, 3]].sum()
print(f"FINISHED".center(80, "="))
results_additional.to_csv(f"{arguments.output_path}/results_additional.csv")
|
import argparse
import os
import numpy as np
import random
import imagegen
import matplotlib.pyplot as plt
import cv2
from PIL import Image
import matplotlib.colors as color
#import matplotlib.image as mpimg
import matplotlib
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--number", help="number of images",
type=str)
parser.add_argument("-o", "--output", help="output path")
args = parser.parse_args()
print(f"number of generated images: {args.number}")
print(f"ouput path is: {args.output}")
if not os.path.exists(args.output):
os.makedirs(args.output[:-1])
# set the display level to none.
display = False
if not os.path.exists(f"{args.output}npy/"):
os.makedirs(f"{args.output[:-1]}/npy")
if not os.path.exists(f"{args.output}image/"):
os.makedirs(f"{args.output[:-1]}/image")
min_shape = random.randint(10, 101)
max_shape = random.randint(min_shape+1, 201)
min_size = random.randint(2, 10)
boolOverlap = bool(random.getrandbits(1))
counter = 0
while counter < int(args.number):
randomVote = int(random.uniform(0, 100))
if randomVote > 50:
generated_Image = imagegen.generateRandShapes(
min_s=min_shape, max_s=max_shape, min_size=min_size, overlap=True)
else:
generated_Image = imagegen.GeneratePerlin()
#generated_Image = color.rgb_to_hsv(generated_Image)
if display:
print(f"shape is {generated_Image.shape}")
print(f"max val is {np.amax(generated_Image[:,:,1])}")
plt.imshow(generated_Image)
plt.show()
name = format(counter, "07d")
np.save(f"{args.output}npy/{name}.npy", generated_Image)
cv2.imwrite(f"{args.output}image/{name}.tiff", generated_Image)
#matplotlib.image.imsave(f"{args.output}{name}.npy", generated_Image)
if display:
loaded = np.load(f"{args.output}{name}.npy")
plt.imshow(loaded)
plt.show()
counter += 1
|
from setuptools import setup
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='bokchoy',
version='1.0.0',
description='Simple distributed task queue using NSQ',
url='https://github.com/ulule/bokchoy',
author='Ulule',
author_email='tech@ulule.com',
license='MIT',
classifiers=[
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
)
|
from asyncpg.exceptions import PostgresError
from bot.exceptions import (NotEnoughPrefixes, PrefixExists,
PrefixDoesntExist)
class GuildCache:
def __init__(self, bot):
self._bot = bot
self.guilds = {}
@property
def bot(self):
return self._bot
def update_cached_guild(self, guild_id, **values):
"""
Updates a servers cached values. None values are ignored
"""
settings = self.get_settings(guild_id)
for k, v in values.items():
if v is None:
continue
settings[k] = v
# Reverse sort prefixes so some prefixes don't get overlooked
# e.q. if you add a prefix a and then a prefix aa if the a prefix is
# first in the list it will always get invoked when aa is used
self._set_internal_value(guild_id, 'prefixes', sorted(list(self.prefixes(guild_id, use_set=True)), reverse=True))
async def set_value(self, guild_id, name, value):
# WARNING sql injection could happen if user input is allowed to the name var
sql = 'INSERT INTO guilds (guild, {0}) VALUES ($1, $2) ON CONFLICT (guild) DO UPDATE SET {0}=$2'.format(name)
try:
await self.bot.dbutil.execute(sql, (guild_id, value))
success = True
except PostgresError:
success = False
settings = self.get_settings(guild_id)
settings[name] = value
return success
# Used for setting cached values not present in the database or in a different form
# e.g. cache a set as a list. Used only for prefixes as we need them stored in a
# sorted list when checking for usable prefixes and a set when adding more
# to the list
def _set_internal_value(self, guild_id, name, value):
settings = self.get_settings(guild_id)
internals = settings.get('_internals')
if internals is None:
internals = {}
settings['_internals'] = internals
internals[name] = value
def _get_internals(self, guild_id):
settings = self.get_settings(guild_id)
internals = settings.get('_internals')
if internals is None:
internals = {}
settings['_internals'] = internals
return internals
# utils
def prefixes(self, guild_id, use_set=False):
if use_set:
return self.get_settings(guild_id).get('prefixes', {self.bot.default_prefix})
prefixes = self._get_internals(guild_id).get('prefixes')
if prefixes is None:
prefixes = list(self.get_settings(guild_id).get('prefixes', [self.bot.default_prefix]))
self._set_internal_value(guild_id, 'prefixes', prefixes)
return prefixes
async def add_prefix(self, guild_id, prefix):
settings = self.get_settings(guild_id)
if 'prefixes' not in settings:
prefixes = {self.bot.default_prefix}
settings['prefixes'] = prefixes
else:
prefixes = self.prefixes(guild_id, use_set=True)
if prefix in prefixes:
raise PrefixExists('Prefix is already in use')
success = await self.bot.dbutil.add_prefix(guild_id, prefix)
if success:
prefixes_list = self.prefixes(guild_id)
prefixes_list.append(prefix)
prefixes_list.sort(reverse=True)
prefixes.add(prefix)
return success
async def remove_prefix(self, guild_id, prefix):
prefixes = self.prefixes(guild_id, use_set=True)
if prefix not in prefixes:
raise PrefixDoesntExist("Prefix doesn't exist")
if len(prefixes) == 1:
raise NotEnoughPrefixes('Must have at least one prefix')
success = await self.bot.dbutil.remove_prefix(guild_id, prefix)
if success:
prefixes.discard(prefix)
try:
self.prefixes(guild_id).remove(prefix)
except ValueError:
pass
return success
# moderation
def modlog(self, guild_id):
return self.get_settings(guild_id).get('modlog', None)
async def set_modlog(self, guild_id, channel_id):
return await self.set_value(guild_id, 'modlog', channel_id)
def mute_role(self, guild_id):
return self.get_settings(guild_id).get('mute_role', None)
async def set_mute_role(self, guild_id, role_id):
return await self.set_value(guild_id, 'mute_role', role_id)
def log_unmutes(self, guild_id):
return self.get_settings(guild_id).get('log_unmutes', False)
async def set_log_unmutes(self, guild_id, boolean):
return await self.set_value(guild_id, 'log_unmutes', boolean)
def keeproles(self, guild_id):
if self.get_settings(guild_id).get('keeproles', 0):
return True
else:
return False
async def set_keeproles(self, guild_id, value):
return await self.set_value(guild_id, 'keeproles', value)
# automod
def automute(self, guild_id):
return self.get_settings(guild_id).get('automute', False)
async def set_automute(self, guild_id, on: bool):
return await self.set_value(guild_id, 'automute', on)
def automute_limit(self, guild_id):
return self.get_settings(guild_id).get('automute_limit', 10)
async def set_automute_limit(self, guild_id, limit: int):
return await self.set_value(guild_id, 'automute_limit', limit)
def automute_time(self, guild_id):
return self.get_settings(guild_id).get('automute_time')
async def set_automute_time(self, guild_id, time):
return await self.set_value(guild_id, 'automute_time', time)
# join config
def join_message(self, guild_id, default_message=False):
message = self.get_settings(guild_id).get('on_join_message')
if message is None and default_message:
message = self.bot.config.join_message
return message
async def set_join_message(self, guild_id, message):
return await self.set_value(guild_id, 'on_join_message', message)
def join_channel(self, guild_id):
return self.get_settings(guild_id).get('on_join_channel')
async def set_join_channel(self, guild_id, channel):
return await self.set_value(guild_id, 'on_join_channel', channel)
# random color on join
def random_color(self, guild_id):
return self.get_settings(guild_id).get('color_on_join', False)
async def set_random_color(self, guild_id, value):
return await self.set_value(guild_id, 'color_on_join', value)
# leave config
def leave_message(self, guild_id, default_message=False):
message = self.get_settings(guild_id).get('on_leave_message')
if message is None and default_message:
message = self.bot.config.leave_message
return message
async def set_leave_message(self, guild_id, message):
return await self.set_value(guild_id, 'on_leave_message', message)
def leave_channel(self, guild_id):
return self.get_settings(guild_id).get('on_leave_channel')
async def set_leave_channel(self, guild_id, channel):
return await self.set_value(guild_id, 'on_leave_channel', channel)
# On message edit
def on_edit_message(self, guild_id, default_message=False):
message = self.get_settings(guild_id).get('on_edit_message')
if message is None and default_message:
message = self.bot.config.edit_message
return message
async def set_on_edit_message(self, guild_id, message):
return await self.set_value(guild_id, 'on_edit_message', message)
def on_edit_channel(self, guild_id):
return self.get_settings(guild_id).get('on_edit_channel')
async def set_on_edit_channel(self, guild_id, channel):
return await self.set_value(guild_id, 'on_edit_channel', channel)
def on_edit_embed(self, guild_id):
return self.get_settings(guild_id).get('on_edit_embed')
async def set_on_edit_embed(self, guild_id, boolean):
return await self.set_value(guild_id, 'on_edit_embed', boolean)
# On message delete
def on_delete_message(self, guild_id, default_message=False):
message = self.get_settings(guild_id).get('on_delete_message')
if message is None and default_message:
message = self.bot.config.delete_message
return message
async def set_on_delete_message(self, guild_id, message):
return await self.set_value(guild_id, 'on_delete_message', message)
def on_delete_channel(self, guild_id):
return self.get_settings(guild_id).get('on_delete_channel')
async def set_on_delete_channel(self, guild_id, channel):
return await self.set_value(guild_id, 'on_delete_channel', channel)
def on_delete_embed(self, guild_id):
return self.get_settings(guild_id).get('on_delete_embed')
async def set_on_delete_embed(self, guild_id, boolean):
return await self.set_value(guild_id, 'on_delete_embed', boolean)
def dailygachi(self, guild_id):
return self.get_settings(guild_id).get('dailygachi')
async def set_dailygachi(self, guild_id, channel):
return await self.set_value(guild_id, 'dailygachi', channel)
def get_settings(self, guild_id):
settings = self[guild_id]
if not settings:
settings = {}
self[guild_id] = settings
return settings
def __getitem__(self, item):
return self.guilds.get(item, None)
def __setitem__(self, key, value):
self.guilds[key] = value
def __delitem__(self, key):
try:
del self.guilds[key]
except KeyError:
pass
|
import typing
import tempfile
class FileHelper:
'''Context manager to use files in pulzar
'''
def __init__(self) -> None:
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass |
# file1=open('text.txt','w')
# print(file1.write('\n'+'test append'))
"""
ТЗ:
По данным введенным пользователем вычислить, сможет он купить выбранный им товар или нет.
Если товар в списке отсутствует - NOT OK
__________
Входные данные: название товара,кол-во товара, наличные
Реализовать 2+ функциями
Выходные данные: словарь состящий из:
{названия товара как ключ:кол-во, следующий элемент - потраченная сумма - ключ, значение сумма}
"""
data = {
'glock.20':2000,
'usp':2500,
'fs':3467,
'deagle':5000,
'p92':4000,
'colt':90000,
'magnum':6000,
'p90':10000,
'mp7':11000,
'uzi':12000,
'mp5':14000,
'm16':20000,
'ak-47':19000,
'm416':24000,
'famas':21000,
'AWM':30000,
'Dragunov':31000,
'Barett':50000,
'RPG':100000,
'Topol-M':2000000
}
# def order(name):
# if name in data.keys():
# return name
# else:
# print('Такого товара нет!')
# name=order(input("название: "))
# print(name,data[name])
#
# def order1(money):
# if money>=data[name]:
# print('success')
# return money
# else:
# print('Недостаточно средств ')
# order1(int(input('money:')))"""
"""# def order(weapon_name):
# if weapon_name in data.keys():
# return weapon_name
# else:
# print('Такого товара нет!')
# weapon_name=order(input("weapon: "))
# print(weapon_name,data[weapon_name])
def count_money(price,quantity,cash):
if cash>=data[weapon_name]*quantity:
print('success')
return cash-price*quantity
else:
print('Недостаточно средств')
def shop(weapon_name,quantity,cash):
if weapon_name in data.keys():
print('success')
return weapon_name,quantity
else:
print('Такого товара нет!')
payback = count_money(data[weapon_name],1, cash)
shop(weapon_name,int(input('quantity'),int(input('Total'))))
print('ok')"""
""""# def order(weapon_name):
# if weapon_name in data.keys():
# return weapon_name
# else:
# print('Такого товара нет!')
# weapon_name=order(input("weapon: "))
# print(weapon_name,data[weapon_name])"""
def count_money(price,quantity,cash):
if cash>=price*quantity:
print('success')
return cash-price*quantity
else:
return 'Недостаточно средств'
def shop(weapon_name,quantity,cash):
if weapon_name in data.keys():
print('success')
data1 = {}
data1[weapon_name] = quantity
data1[quantity*data[weapon_name]] = cash-quantity*data[weapon_name]
print(data1)
return weapon_name
else:
print('Такого товара нет!')
payback = count_money(data[weapon_name], quantity, cash)
# except KeyError:
shop(input('weapon name'), int(input('quantity')), int(input('Total')))
|
"""This module contains the GeneFlow DataManager class."""
import inspect
from geneflow.uri_parser import URIParser
from geneflow.log import Log
from geneflow.extend import data_manager_contexts
class DataManager:
"""
Copy/move, list, delete data for various contexts.
Currently, these contexts include: local, agave.
"""
@classmethod
def list(cls, uri=None, parsed_uri=None, **kwargs):
"""
List data in various contexts.
URIs are parsed to extract contexts, and the appropriate method is
called. Either uri or parsed_uri may be specified, but not both. If
both are specified, parsed_uri is used.
Args:
uri: URI to list.
parsed_uri: URI to list, already parsed.
**kwargs: Other arguments specific to context.
Returns:
On success: True.
On failure: False.
"""
# parse and validate URI
if not parsed_uri:
parsed_uri = URIParser.parse(uri)
if not parsed_uri:
Log.an().error('invalid uri: %s', uri)
return False
# check if list method exists for context
try:
list_func = getattr(cls, '_list_{}'.format(parsed_uri['scheme']))
except AttributeError:
Log.an().error('_list_%s method not defined', parsed_uri['scheme'])
return False
return list_func(parsed_uri, **kwargs)
@classmethod
def exists(cls, uri=None, parsed_uri=None, **kwargs):
"""
Check if URI exists.
URIs are parsed to extract contexts, and the appropriate method is
called. Either uri or parsed_uri may be specified, but not both, if
both are specified, parsed_uri is used.
Args:
uri: URI to check.
parsed_uri: URI to check, already parsed.
**kwargs: Other arguments specific to context.
Returns:
True if the URI exists, False if it doesn't exist, None if
an exception occurs.
"""
# parse and validate URI
if not parsed_uri:
parsed_uri = URIParser.parse(uri)
if not parsed_uri:
Log.an().error('invalid uri: %s', uri)
return None
# check if the exists method exists for context
try:
exists_func = getattr(cls, '_exists_{}'\
.format(parsed_uri['scheme']))
except AttributeError:
Log.an().error(
'_exists_%s method not defined', parsed_uri['scheme']
)
return None
return exists_func(parsed_uri, **kwargs)
@classmethod
def delete(cls, uri=None, parsed_uri=None, **kwargs):
"""
Delete URI.
URIs are parsed to extract contexts, and the appropriate method is
called. Either uri or parsed_uri may be specified, but not both, if
both are specified, parsed_uri is used.
Args:
uri: URI to delete.
parsed_uri: URI to delete, already parsed.
**kwargs: Other arguments specific to context.
Returns:
On success: True.
On failure: False.
"""
# parse and validate URI
if not parsed_uri:
parsed_uri = URIParser.parse(uri)
if not parsed_uri:
Log.an().error('invalid uri: %s', uri)
return False
# check if the delete method exists for context
try:
delete_func = getattr(cls, '_delete_{}'\
.format(parsed_uri['scheme']))
except AttributeError:
Log.an().error(
'_delete_%s method not defined', parsed_uri['scheme']
)
return False
return delete_func(parsed_uri, **kwargs)
@classmethod
def mkdir(cls, uri=None, parsed_uri=None, recursive=False, **kwargs):
"""
Create directory at URI.
URIs are parsed to extract contexts, and the appropriate method is
called. Either uri or parsed_uri may be specified, but not both, if
both are specified, parsed_uri is used.
Args:
uri: URI to create.
parsed_uri: URI to create, already parsed.
recursive: If true, recursively create parent directories.
**kwargs: Other arguments specific to context.
Returns:
On success: True.
On failure: False.
"""
# parse and validate URI
if not parsed_uri:
parsed_uri = URIParser.parse(uri)
if not parsed_uri:
Log.an().error('invalid uri: %s', uri)
return False
# check if the mkdir method exists for context
if recursive:
try:
mkdir_func = getattr(cls, '_mkdir_recursive_{}'\
.format(parsed_uri['scheme']))
except AttributeError:
Log.an().error(
'_mkdir_recursive_%s method not defined',
parsed_uri['scheme']
)
return False
else:
try:
mkdir_func = getattr(cls, '_mkdir_{}'\
.format(parsed_uri['scheme']))
except AttributeError:
Log.an().error(
'_mkdir_%s method not defined', parsed_uri['scheme']
)
return False
# always remove final slash from URI before calling mkdir
return mkdir_func(URIParser.parse(parsed_uri['chopped_uri']), **kwargs)
@classmethod
def copy(
cls,
src_uri=None,
parsed_src_uri=None,
dest_uri=None,
parsed_dest_uri=None,
**kwargs
):
"""
Copy data to/from/within workflow contexts.
Source and destination URIs are parsed to extract contexts, and
the appropriate methods are called accordingly.
Args:
src: Source URI.
dest: Destination URI.
**kwargs: Other arguments specific to context.
Returns:
On success: True.
On failure: False.
"""
# parse and validate src URI
if not parsed_src_uri:
parsed_src_uri = URIParser.parse(src_uri)
if not parsed_src_uri:
Log.an().error('invalid src uri: %s', src_uri)
return False
# parse and validate dest URI
if not parsed_dest_uri:
parsed_dest_uri = URIParser.parse(dest_uri)
if not parsed_dest_uri:
Log.an().error('invalid dest uri: %s', dest_uri)
return False
# check if copy method exists for contexts
try:
copy_func = getattr(cls, '_copy_{}_{}'.format(
parsed_src_uri['scheme'], parsed_dest_uri['scheme']
))
except AttributeError:
Log.an().error(
'_copy_%s_%s method not defined',
parsed_src_uri['scheme'],
parsed_dest_uri['scheme']
)
return False
return copy_func(
parsed_src_uri,
parsed_dest_uri,
**{
list_item: kwargs[list_item]
for list_item in set(
[parsed_src_uri['scheme'], parsed_dest_uri['scheme']]
)
}
)
def init():
"""Import methods in the data_manager_contexts module as static methods."""
all_funcs = inspect.getmembers(data_manager_contexts, inspect.isfunction)
for func in all_funcs:
setattr(DataManager, func[0], staticmethod(func[1]))
# initialize the module when imported
init()
|
from rest_framework import serializers
from .models import ExternalLink
class ExternalLinkSerializer(serializers.ModelSerializer):
class Meta:
model = ExternalLink
fields = '__all__'
|
# Copyright (C) 2018 lukerm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Imports ##
import os
import pickle
import numpy as np
np.random.seed(2018)
import sys
sys.path.append(os.path.join(os.path.expanduser('~'), 'find-tune', 'train'))
import perf_utils as pu
from imblearn.over_sampling import SMOTE
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from keras.layers import Input, Dense
from keras.models import Model, load_model
from keras.optimizers import Adam
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from definitions import DATA_DIR
## Main ##
print('=== Preparing data ===')
# Load data from file
# X = embedding features (output from VGGish model)
# y = binary target variable (1 = target track, 0 = other sound)
# c = sound categories (e.g. 'Jingle Bells', 'target')
# s = seconds in the original YouTube track where this clip begins
# ids = YouTube unique identifiers for the track
# L = log-mel attributes (input to VGGish model)
data = np.load(os.path.join(DATA_DIR, 'embedding_data.npz'))
X, y, c, s, ids, L = data['X'], data['y'], data['c'], data['s'], data['i'], data['L']
# Train (80%) / val (20%) split
p = np.mean(y)
N = int(len(y)*0.2)
print('p: %.4f' % p) # ~ 7.5/1000
print('N: %d' % N) # ~ 700
# Note that whilst the mean of Binomial(N, p) is ~5, there is a very real chance that
# we could have very few positive data points in the validation set if we're not careful
print('P(X < 3) = %.3f (where X ~ Bin(N, p))' % np.mean(np.random.binomial(N, p, size=100000) < 3))
# So, to ensure this doesn't happen, we will explicitly hold out 5 positive examples for testing
i_pos = np.where(y == 1)[0]
i_neg = np.where(y == 0)[0]
i_pos_va = np.random.choice(i_pos, size=5, replace=False) # 5 for validation
i_pos_tr = np.array(list(set(i_pos) - set(i_pos_va))) # compliment for training
i_neg_tr = np.random.choice(i_neg, size=len(X)-N, replace=False) # Training
i_neg_va = np.array(list(set(i_neg) - set(i_neg_tr))) # Validation
# Create split datasets
X_tr = X[np.append(i_neg_tr, i_pos_tr), :]
y_tr = y[np.append(i_neg_tr, i_pos_tr)]
c_tr = c[np.append(i_neg_tr, i_pos_tr)]
s_tr = s[np.append(i_neg_tr, i_pos_tr)]
ids_tr = ids[np.append(i_neg_tr, i_pos_tr)]
L_tr = L[np.append(i_neg_tr, i_pos_tr), :, :]
X_va = X[np.append(i_neg_va, i_pos_va), :]
y_va = y[np.append(i_neg_va, i_pos_va)]
c_va = c[np.append(i_neg_va, i_pos_va)]
s_va = s[np.append(i_neg_va, i_pos_va)]
ids_va = ids[np.append(i_neg_va, i_pos_va)]
L_va = L[np.append(i_neg_va, i_pos_va), :, :]
print('Positive labels in training set: %d' % y_tr.sum())
print('Positive labels in validation set: %d' % y_va.sum())
print('Negative labels in training set: %d' % (y_tr==0).sum())
print('Negative labels in validation set: %d' % (y_va==0).sum())
# As this is quite an imbalanced problem, we'll use SMOTE to over sample the positive class
sm = SMOTE(random_state=2018)
X_tr_bal, y_tr_bal = sm.fit_sample(X_tr, y_tr)
print('Positive labels in balanced training set: %d' % y_tr_bal.sum())
print('Negative labels in balanced training set: %d' % (y_tr_bal==0).sum())
# Centre and scale the features
ss = StandardScaler()
X_tr_bal = ss.fit_transform(X_tr_bal)
X_tr = ss.transform(X_tr)
X_va = ss.transform(X_va)
## Linear classifier ##
print('\n')
print('=== Linear classifier ===')
print()
# Yields accuracy and perfect recall on positive class
# Suffers from imperfect precision on positive class (gives false positives)
# Fit multiple classifiers with increasing regularization strength
# Print a report card for each model
for alpha in np.logspace(-4, -4, 1):
print('alpha = %.e' % alpha)
lr = LogisticRegression(C=1/alpha, solver='liblinear')
lr.fit(X_tr_bal, y_tr_bal)
y_pred_tr_bal = lr.predict(X_tr_bal)
y_pred_tr = lr.predict(X_tr)
y_pred_va = lr.predict(X_va)
pu.print_scorecard(y_tr_bal, y_pred_tr_bal, title='TRAIN (BAL.)')
pu.print_scorecard(y_tr, y_pred_tr, title='TRAIN')
pu.print_scorecard(y_va, y_pred_va, title='VALIDATION')
print()
# An analysis of incorrect predictions
pu.print_negatives(y_va, y_pred_va, c_va, ytids=ids_va, num_secs=s_va)
## Random Forest ##
print('\n')
print('=== Random forest classifier ===')
print()
# Performs quite well, good accuracy
# Perfect precision on positive class (desirable: don't want false positives)
# Note: overfits without cap on depth
rf = RandomForestClassifier(n_estimators = 50, max_depth = 5)
#rf.fit(X_tr_bal, y_tr_bal)
rf.fit(X_tr, y_tr)
y_pred_tr_bal = rf.predict(X_tr_bal)
y_pred_tr = rf.predict(X_tr)
y_pred_va = rf.predict(X_va)
pu.print_scorecard(y_tr_bal, y_pred_tr_bal, title='TRAIN (BAL.)')
pu.print_scorecard(y_tr, y_pred_tr, title='TRAIN')
pu.print_scorecard(y_va, y_pred_va, title='VALIDATION')
print()
pu.print_negatives(y_va, y_pred_va, c_va, ytids=ids_va, num_secs=s_va)
## Dense Neural Network ##
print('\n')
print('=== Neural network classifier ===')
print()
# Without any hyperparameter tuning, the network gets a perfect score on all metrics!
def fit_nn_model(lr0, h1, bsz, verbose=0, cp_path=None):
"""
Generic model for fitting a 1-layer neural network
"""
# Callbacks
lr_red = ReduceLROnPlateau(monitor='val_loss', min_delta=0, factor=0.75, patience=2)
e_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=15)
cbacks = [lr_red, e_stop]
if cp_path is not None:
cbacks.append(ModelCheckpoint(cp_path, save_best_only=True))
# Model architecture
ix = Input((X_tr_bal.shape[1],), name='vggish_feat_input')
x = Dense(h1, activation='relu', name='fc_last')(ix)
x = Dense(1, activation='sigmoid', name='classify')(x)
# Compile the Model
model = Model(ix, x)
optzr = Adam(lr=lr0)
model.compile(optzr, loss='binary_crossentropy', metrics=['accuracy'])
# Pre-training metrics
ep0_tr = model.evaluate(X_tr_bal, y_tr_bal, verbose=verbose)
ep0_va = model.evaluate(X_va, y_va, verbose=verbose)
# Fit the model
history = model.fit(X_tr_bal, y_tr_bal,
epochs=200, batch_size=bsz,
callbacks=cbacks,
validation_data=(X_va, y_va),
verbose=verbose,
)
return (model, history, (ep0_tr, ep0_va))
# Hyperparameters
lr0 = 0.01
h1 = 128
bsz = 64
model, history, _ = fit_nn_model(lr0, h1, bsz, verbose=0)
# Make (probability) predictions
y_pred_tr_bal = model.predict(X_tr_bal)[:, 0]
y_pred_tr = model.predict(X_tr)[:, 0]
y_pred_va = model.predict(X_va)[:, 0]
p_thresh = 0.5
pu.print_scorecard(y_tr_bal, y_pred_tr_bal > p_thresh, title='TRAIN (BAL.)')
pu.print_scorecard(y_tr, y_pred_tr > p_thresh, title='TRAIN')
pu.print_scorecard(y_va, y_pred_va > p_thresh, title='VALIDATION')
# More rigorous test: use cross-validation, 5-folds
# Again, perfect scorecards for each fold - shuffling is important for diversity of training data!
print('\n')
print('=== 5-fold CV ===')
print()
histories = []
fold_cntr = 0
skf = StratifiedKFold(5, shuffle=True, random_state=2018)
for i_tr, i_va in skf.split(X, y):
# Storage for data / models relating to this fold
fold_dir = os.path.join(DATA_DIR, 'fold%d' % fold_cntr)
os.makedirs(fold_dir, exist_ok=True)
# Create split datasets
X_tr = X[i_tr, :]
y_tr = y[i_tr]
c_tr = c[i_tr]
s_tr = s[i_tr]
ids_tr = ids[i_tr]
L_tr = L[i_tr, :, :]
X_va = X[i_va, :]
y_va = y[i_va]
c_va = c[i_va]
s_va = s[i_va]
ids_va = ids[i_va]
L_va = L[i_va, :, :]
print('FOLD %d' % fold_cntr)
print('======')
print()
print('Positive labels in training set: %d' % y_tr.sum())
print('Positive labels in validation set: %d' % y_va.sum())
# over sample with SMOTE
sm = SMOTE(random_state=2018)
X_tr_bal, y_tr_bal = sm.fit_sample(X_tr, y_tr)
print('Positive labels in balanced set: %d' % y_tr_bal.sum())
print()
print('=== Linear classifier ===')
alpha = 1e-4
lr = LogisticRegression(C=1/alpha, solver='liblinear')
lr.fit(X_tr_bal, y_tr_bal)
y_pred_va = lr.predict(X_va)
pu.print_scorecard(y_va, y_pred_va, title='VALIDATION')
pu.print_negatives(y_va, y_pred_va > p_thresh, c_va, ytids=ids_va, num_secs=s_va)
# Centre and scale the features
ss = StandardScaler()
X_tr_bal = ss.fit_transform(X_tr_bal)
X_tr = ss.transform(X_tr)
X_va = ss.transform(X_va)
with open(os.path.join(fold_dir, 'sc_fold%d.pkl' % fold_cntr), 'wb') as f:
pickle.dump(ss, f)
# Save data (with input scaled features) for later reference
np.savez(os.path.join(fold_dir, 'foldwise_data_tr.npz'), X=X_tr, y=y_tr, c=c_tr, s=s_tr, i=ids_tr, L=L_tr)
np.savez(os.path.join(fold_dir, 'foldwise_data_va.npz'), X=X_va, y=y_va, c=c_va, s=s_va, i=ids_va, L=L_va)
np.savez(os.path.join(fold_dir, 'foldwise_data_bal.npz'), X=X_tr_bal, y=y_tr_bal)
# Fit the model
save_path = os.path.join(fold_dir, 'nn_fold%d.model' % fold_cntr)
_, history, _ = fit_nn_model(lr0, h1, bsz, verbose=0, cp_path=save_path)
histories.append(history)
# Load the best model and predict
print()
print('=== Dense neural network ===')
model = load_model(save_path)
y_pred_va = model.predict(X_va)[:, 0]
pu.print_scorecard(y_va, y_pred_va > p_thresh, title='VALIDATION')
pu.print_negatives(y_va, y_pred_va > p_thresh, c_va, ytids=ids_va, num_secs=s_va)
print()
fold_cntr += 1
|
import asyncio
import sys
import threading
import time
from concurrent.futures import CancelledError
from contextlib import suppress
import pytest
from anyio import (
create_blocking_portal, create_capacity_limiter, create_event, create_task_group,
run_async_from_thread, run_sync_in_worker_thread, sleep, start_blocking_portal,
wait_all_tasks_blocked)
if sys.version_info < (3, 9):
current_task = asyncio.Task.current_task
else:
current_task = asyncio.current_task
pytestmark = pytest.mark.anyio
async def test_run_async_from_thread():
async def add(a, b):
assert threading.get_ident() == event_loop_thread_id
return a + b
def worker(a, b):
assert threading.get_ident() != event_loop_thread_id
return run_async_from_thread(add, a, b)
event_loop_thread_id = threading.get_ident()
result = await run_sync_in_worker_thread(worker, 1, 2)
assert result == 3
async def test_run_anyio_async_func_from_thread():
def worker(*args):
run_async_from_thread(sleep, *args)
return True
assert await run_sync_in_worker_thread(worker, 0)
async def test_run_in_thread_cancelled():
def thread_worker():
nonlocal state
state = 2
async def worker():
nonlocal state
state = 1
await run_sync_in_worker_thread(thread_worker)
state = 3
state = 0
async with create_task_group() as tg:
await tg.spawn(worker)
await tg.cancel_scope.cancel()
assert state == 1
async def test_run_in_thread_exception():
def thread_worker():
raise ValueError('foo')
with pytest.raises(ValueError) as exc:
await run_sync_in_worker_thread(thread_worker)
exc.match('^foo$')
async def test_run_in_custom_limiter():
def thread_worker():
nonlocal num_active_threads, max_active_threads
num_active_threads += 1
max_active_threads = max(num_active_threads, max_active_threads)
event.wait(1)
num_active_threads -= 1
async def task_worker():
await run_sync_in_worker_thread(thread_worker, limiter=limiter)
event = threading.Event()
num_active_threads = max_active_threads = 0
limiter = create_capacity_limiter(3)
async with create_task_group() as tg:
for _ in range(4):
await tg.spawn(task_worker)
await sleep(0.1)
assert num_active_threads == 3
assert limiter.borrowed_tokens == 3
event.set()
assert num_active_threads == 0
assert max_active_threads == 3
def test_run_async_from_unclaimed_thread():
async def foo():
pass
exc = pytest.raises(RuntimeError, run_async_from_thread, foo)
exc.match('This function can only be run from an AnyIO worker thread')
@pytest.mark.parametrize('cancellable, expected_last_active', [
(False, 'task'),
(True, 'thread')
], ids=['uncancellable', 'cancellable'])
async def test_cancel_worker_thread(cancellable, expected_last_active):
"""
Test that when a task running a worker thread is cancelled, the cancellation is not acted on
until the thread finishes.
"""
def thread_worker():
nonlocal last_active
run_async_from_thread(sleep_event.set)
time.sleep(0.2)
last_active = 'thread'
run_async_from_thread(finish_event.set)
async def task_worker():
nonlocal last_active
try:
await run_sync_in_worker_thread(thread_worker, cancellable=cancellable)
finally:
last_active = 'task'
sleep_event = create_event()
finish_event = create_event()
last_active = None
async with create_task_group() as tg:
await tg.spawn(task_worker)
await sleep_event.wait()
await tg.cancel_scope.cancel()
await finish_event.wait()
assert last_active == expected_last_active
@pytest.mark.parametrize('anyio_backend', ['asyncio'])
async def test_cancel_asyncio_native_task():
async def run_in_thread():
nonlocal task
task = current_task()
await run_sync_in_worker_thread(time.sleep, 1, cancellable=True)
task = None
async with create_task_group() as tg:
await tg.spawn(run_in_thread)
await wait_all_tasks_blocked()
task.cancel()
class TestBlockingPortal:
async def test_successful_call(self):
async def async_get_thread_id():
return threading.get_ident()
def external_thread():
thread_ids.append(portal.call(threading.get_ident))
thread_ids.append(portal.call(async_get_thread_id))
thread_ids = []
async with create_blocking_portal() as portal:
thread = threading.Thread(target=external_thread)
thread.start()
await run_sync_in_worker_thread(thread.join)
for thread_id in thread_ids:
assert thread_id == threading.get_ident()
async def test_aexit_with_exception(self):
"""Test that when the portal exits with an exception, all tasks are cancelled."""
def external_thread():
try:
portal.call(sleep, 3)
except BaseException as exc:
results.append(exc)
else:
results.append(None)
results = []
with suppress(Exception):
async with create_blocking_portal() as portal:
thread1 = threading.Thread(target=external_thread)
thread1.start()
thread2 = threading.Thread(target=external_thread)
thread2.start()
await sleep(0.1)
assert not results
raise Exception
await run_sync_in_worker_thread(thread1.join)
await run_sync_in_worker_thread(thread2.join)
assert len(results) == 2
assert isinstance(results[0], CancelledError)
assert isinstance(results[1], CancelledError)
async def test_aexit_without_exception(self):
"""Test that when the portal exits, it waits for all tasks to finish."""
def external_thread():
try:
portal.call(sleep, 0.2)
except BaseException as exc:
results.append(exc)
else:
results.append(None)
results = []
async with create_blocking_portal() as portal:
thread1 = threading.Thread(target=external_thread)
thread1.start()
thread2 = threading.Thread(target=external_thread)
thread2.start()
await sleep(0.1)
assert not results
await run_sync_in_worker_thread(thread1.join)
await run_sync_in_worker_thread(thread2.join)
assert results == [None, None]
async def test_call_portal_from_event_loop_thread(self):
async with create_blocking_portal() as portal:
exc = pytest.raises(RuntimeError, portal.call, threading.get_ident)
exc.match('This method cannot be called from the event loop thread')
@pytest.mark.parametrize('use_contextmanager', [False, True],
ids=['contextmanager', 'startstop'])
def test_start_with_new_event_loop(self, anyio_backend_name, anyio_backend_options,
use_contextmanager):
async def async_get_thread_id():
return threading.get_ident()
if use_contextmanager:
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
thread_id = portal.call(async_get_thread_id)
else:
portal = start_blocking_portal(anyio_backend_name, anyio_backend_options)
try:
thread_id = portal.call(async_get_thread_id)
finally:
portal.call(portal.stop)
assert isinstance(thread_id, int)
assert thread_id != threading.get_ident()
def test_call_stopped_portal(self, anyio_backend_name, anyio_backend_options):
portal = start_blocking_portal(anyio_backend_name, anyio_backend_options)
portal.call(portal.stop)
pytest.raises(RuntimeError, portal.call, threading.get_ident).\
match('This portal is not running')
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import regex
from jsonpath_rw import parse
from .constants import EdgeConstants
class CreateOptionParser(object):
def __init__(self, create_option):
self.create_option = create_option
def parse_create_option(self):
ret = {}
for compose_key in COMPOSE_KEY_CREATE_OPTION_MAPPING:
create_option_value = self.get_create_option_value(compose_key)
if create_option_value:
parser_func = COMPOSE_KEY_CREATE_OPTION_MAPPING[compose_key]['parser_func']
ret[compose_key] = parser_func(create_option_value)
return ret
def get_create_option_value(self, compose_key):
create_option_value_dict = {}
for API_key, API_jsonpath in COMPOSE_KEY_CREATE_OPTION_MAPPING[compose_key]['API_Info'].items():
jsonpath_expr = parse(API_jsonpath)
value_list = jsonpath_expr.find(self.create_option)
if value_list:
create_option_value_dict[API_key] = value_list[0].value
return create_option_value_dict
def service_parser_naive(create_options_details):
return list(create_options_details.values())[0]
def service_parser_expose(create_options_details):
return list(create_options_details['ExposedPorts'].keys())
def service_parser_command(create_options_details):
cmd = create_options_details['Cmd']
if not isinstance(cmd, list):
return cmd
return ' '.join(cmd).strip()
def service_parser_healthcheck(create_options_details):
healthcheck_config = create_options_details['Healthcheck']
try:
return {
'test': healthcheck_config['Test'],
'interval': time_ns_ms(healthcheck_config['Interval']),
'timeout': time_ns_ms(healthcheck_config['Timeout']),
'retries': healthcheck_config['Retries'],
'start_period': time_ns_ms(healthcheck_config['StartPeriod'])
}
except KeyError as err:
raise KeyError('Missing key : {0} in Healthcheck'.format(err))
def service_parser_stop_timeout(create_options_details):
try:
return str(int(create_options_details['StopTimeout'])) + 's'
except TypeError:
raise TypeError('StopTimeout should be an integer.')
def service_parser_hostconfig_devices(create_options_details):
devices_list = []
for device in create_options_details['Devices']:
try:
devices_list.append("{0}:{1}:{2}".format(device['PathOnHost'],
device['PathInContainer'], device['CgroupPermissions']))
except KeyError as err:
raise KeyError('Missing key : {0} in HostConfig.Devices.'.format(err))
return devices_list
def service_parser_hostconfig_restart(create_options_details):
restart_config = create_options_details['RestartPolicy']
ret = ""
if restart_config['Name'] == "":
ret = "no"
elif restart_config['Name'] == "on-failure":
try:
ret = "on-failure:{0}".format(restart_config['MaximumRetryCount'])
except KeyError as err:
raise KeyError('Missing key : {0} in HostConfig.RestartPolicy.'.format(err))
elif restart_config['Name'] == "always" or restart_config['Name'] == "unless-stopped":
ret = restart_config['Name']
else:
raise ValueError("RestartPolicy Name should be one of '', 'always', 'unless-stopped', 'on-failure'")
return ret
def service_parser_hostconfig_ulimits(create_options_details):
ulimits_dict = {}
for ulimit in create_options_details['Ulimits']:
try:
ulimits_dict[ulimit['Name']] = {
'soft': ulimit['Soft'],
'hard': ulimit['Hard']
}
except KeyError as err:
raise KeyError('Missing key : {0} in HostConfig.Ulimits'.format(err))
return ulimits_dict
def service_parser_hostconfig_logging(create_options_details):
try:
logging_dict = {
'driver': create_options_details['LogConfig']['Type'],
'options': create_options_details['LogConfig']['Config']
}
except KeyError as err:
raise KeyError('Missing key : {0} in HostConfig.LogConfig'.format(err))
return logging_dict
def service_parser_hostconfig_ports(create_options_details):
ports_list = []
for container_port, host_ports in create_options_details['PortBindings'].items():
for host_port_info in host_ports:
host_port = ""
if 'HostIp' in host_port_info and 'HostPort' in host_port_info:
host_port = "{0}:{1}".format(host_port_info['HostIp'], host_port_info['HostPort'])
elif 'HostIp' in host_port_info:
host_port = host_port_info['HostIp']
elif 'HostPort' in host_port_info:
host_port = host_port_info['HostPort']
ports_list.append("{0}:{1}".format(host_port, container_port))
return ports_list
def service_parser_networks(create_options_details):
networks_dict = {}
for nw, nw_config in create_options_details['NetworkingConfig'].items():
networks_dict[nw] = {}
if 'Aliases' in nw_config:
networks_dict[nw]['aliases'] = nw_config['Aliases']
if 'IPAMConfig' in nw_config:
if 'IPv4Address' in nw_config['IPAMConfig']:
networks_dict[nw]['ipv4_address'] = nw_config['IPAMConfig']['IPv4Address']
if 'IPv6Address' in nw_config['IPAMConfig']:
networks_dict[nw]['ipv6_address'] = nw_config['IPAMConfig']['IPv6Address']
return networks_dict
def service_parser_volumes(create_options_details):
volumes_list = []
for mount in create_options_details.get('Mounts', []):
try:
volume_info = {
'target': mount['Target'],
'type': mount['Type']
}
if mount['Type'] == 'volume' or mount['Type'] == 'bind':
volume_info['source'] = mount['Source']
if 'ReadOnly' in mount:
volume_info['read_only'] = mount['ReadOnly']
if mount['Type'] == 'volume' and 'VolumeOptions' in mount:
if 'NoCopy' in mount['VolumeOptions']:
volume_info['volume'] = {
'nocopy': mount['VolumeOptions']['NoCopy']
}
if mount['Type'] == 'bind' and 'BindOptions' in mount:
if 'Propagation' in mount['BindOptions']:
volume_info['bind'] = {
'propagation': mount['BindOptions']['Propagation']
}
if mount['Type'] == 'tmpfs' and 'TmpfsOptions' in mount:
if 'SizeBytes' in mount['TmpfsOptions']:
volume_info['tmpfs'] = {
'size': mount['TmpfsOptions']['SizeBytes']
}
except KeyError as e:
raise KeyError('Missing key {0} in create option HostConfig Mounts.'.format(e))
volumes_list.append(volume_info)
for bind in create_options_details.get('Binds', []):
target = None
# Binds should be in the format [source:]destination[:mode]
# Windows format and LCOW format are more strict than Linux format due to colons in Windows paths,
# so match with them first
match = regex.match(EdgeConstants.MOUNT_WIN_REGEX, bind) or regex.match(EdgeConstants.MOUNT_LCOW_REGEX, bind)
if match is not None:
source = match.group('source') or ''
target = match.group('destination')
read_only = match.group('mode') == 'ro'
else:
# Port of Docker daemon
# https://github.com/docker/docker-ce/blob/1c27a55b6259743f35549e96d06334a53d0c0549/components/engine/volume/mounts/linux_parser.go#L18-L28
parts = bind.split(':')
if len(parts) == 2 or (len(parts) == 3 and parts[2] in ('ro', 'rw', '')):
if parts[0] != '':
source = parts[0]
target = parts[1]
read_only = len(parts) == 3 and parts[2] == 'ro'
if target is not None:
volume_info = {
'type': 'bind' if source and os.path.isabs(source) else 'volume',
'source': source,
'target': target
}
if read_only:
volume_info['read_only'] = True
volumes_list.append(volume_info)
else:
raise ValueError('Invalid create option Binds: {0}'.format(bind))
return volumes_list
def time_ns_ms(ns):
if ns != 0 and ns < 1000000:
raise ValueError('The time should be 0 or at least 1000000 (1 ms)')
return str(int(ns / 1000000)) + 'ms'
'''
The mapping relationship between docker compose key and create option API key
'docker compose key': {'API_Info': {'API key':'API jsonpath'}, 'parser_func': parser_func},
'''
COMPOSE_KEY_CREATE_OPTION_MAPPING = {
'hostname': {'API_Info': {'Hostname': "$['Hostname']"}, 'parser_func': service_parser_naive},
'domainname': {'API_Info': {'Domainname': "$['Domainname']"}, 'parser_func': service_parser_naive},
'user': {'API_Info': {'User': "$['User']"}, 'parser_func': service_parser_naive},
'expose': {'API_Info': {'ExposedPorts': "$['ExposedPorts']"}, 'parser_func': service_parser_expose},
'tty': {'API_Info': {'Tty': "$['Tty']"}, 'parser_func': service_parser_naive},
'environment': {'API_Info': {'Env': "$['Env']"}, 'parser_func': service_parser_naive},
'command': {'API_Info': {'Cmd': "$['Cmd']"}, 'parser_func': service_parser_command},
'healthcheck': {'API_Info': {'Healthcheck': "$['Healthcheck']"}, 'parser_func': service_parser_healthcheck},
'image': {'API_Info': {'Image': "$['Image']"}, 'parser_func': service_parser_naive},
'working_dir': {'API_Info': {'WorkingDir': "$['WorkingDir']"}, 'parser_func': service_parser_naive},
'entrypoint': {'API_Info': {'Entrypoint': "$['Entrypoint']"}, 'parser_func': service_parser_naive},
'mac_address': {'API_Info': {'MacAddress': "$['MacAddress']"}, 'parser_func': service_parser_naive},
'labels': {'API_Info': {'Labels': "$['Labels']"}, 'parser_func': service_parser_naive},
'stop_signal': {'API_Info': {'StopSignal': "$['StopSignal']"}, 'parser_func': service_parser_naive},
'stop_grace_period': {'API_Info': {'StopTimeout': "$['StopTimeout']"}, 'parser_func': service_parser_stop_timeout},
# HostConfig
'ports': {'API_Info': {'PortBindings': "$['HostConfig']['PortBindings']"}, 'parser_func': service_parser_hostconfig_ports},
'privileged': {'API_Info': {'Privileged': "$['HostConfig']['Privileged']"}, 'parser_func': service_parser_naive},
'network_mode': {'API_Info': {'NetworkMode': "$['HostConfig']['NetworkMode']"}, 'parser_func': service_parser_naive},
'devices': {'API_Info': {'Devices': "$['HostConfig']['Devices']"}, 'parser_func': service_parser_hostconfig_devices},
'dns': {'API_Info': {'Dns': "$['HostConfig']['Dns']"}, 'parser_func': service_parser_naive},
'dns_search': {'API_Info': {'DnsSearch': "$['HostConfig']['DnsSearch']"}, 'parser_func': service_parser_naive},
'restart': {
'API_Info': {'RestartPolicy': "$['HostConfig']['RestartPolicy']"},
'parser_func': service_parser_hostconfig_restart
},
'cap_add': {'API_Info': {'CapAdd': "$['HostConfig']['CapAdd']"}, 'parser_func': service_parser_naive},
'cap_drop': {'API_Info': {'CapDrop': "$['HostConfig']['CapDrop']"}, 'parser_func': service_parser_naive},
'ulimits': {'API_Info': {'Ulimits': "$['HostConfig']['Ulimits']"}, 'parser_func': service_parser_hostconfig_ulimits},
'logging': {'API_Info': {'LogConfig': "$['HostConfig']['LogConfig']"}, 'parser_func': service_parser_hostconfig_logging},
'extra_hosts': {'API_Info': {'ExtraHosts': "$['HostConfig']['ExtraHosts']"}, 'parser_func': service_parser_naive},
'read_only': {'API_Info': {'ReadonlyRootfs': "$['HostConfig']['ReadonlyRootfs']"}, 'parser_func': service_parser_naive},
'pid': {'API_Info': {'PidMode': "$['HostConfig']['PidMode']"}, 'parser_func': service_parser_naive},
'security_opt': {'API_Info': {'SecurityOpt': "$['HostConfig']['SecurityOpt']"}, 'parser_func': service_parser_naive},
'ipc': {'API_Info': {'IpcMode': "$['HostConfig']['IpcMode']"}, 'parser_func': service_parser_naive},
'cgroup_parent': {'API_Info': {'CgroupParent': "$['HostConfig']['CgroupParent']"}, 'parser_func': service_parser_naive},
# 'shm_size:':{'API_Info':'ShmSize','parser_func':service_parser_naive},
'sysctls': {'API_Info': {'Sysctls': "$['HostConfig']['Sysctls']"}, 'parser_func': service_parser_naive},
# 'tmpfs:':{'API_Info':'Tmpfs','parser_func':service_parser_naive},
'userns_mode': {'API_Info': {'UsernsMode': "$['HostConfig']['UsernsMode']"}, 'parser_func': service_parser_naive},
'isolation': {'API_Info': {'Isolation': "$['HostConfig']['Isolation']"}, 'parser_func': service_parser_naive},
# Volumes
'volumes': {
'API_Info': {
'Mounts': "$['HostConfig']['Mounts']",
'Binds': "$['HostConfig']['Binds']"
},
'parser_func': service_parser_volumes
},
# NetworkingConfig
'networks': {
'API_Info': {'NetworkingConfig': "$['NetworkingConfig']['EndpointsConfig']"},
'parser_func': service_parser_networks
}
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from test.common import test_dict
from box import Box, ConfigBox
class TestConfigBox:
def test_config_box(self):
g = {
"b0": "no",
"b1": "yes",
"b2": "True",
"b3": "false",
"b4": True,
"i0": "34",
"f0": "5.5",
"f1": "3.333",
"l0": "4,5,6,7,8",
"l1": "[2 3 4 5 6]",
}
cns = ConfigBox(bb=g)
assert cns.bb.list("l1", spliter=" ") == ["2", "3", "4", "5", "6"]
assert cns.bb.list("l0", mod=lambda x: int(x)) == [4, 5, 6, 7, 8]
assert not cns.bb.bool("b0")
assert cns.bb.bool("b1")
assert cns.bb.bool("b2")
assert not cns.bb.bool("b3")
assert cns.bb.int("i0") == 34
assert cns.bb.float("f0") == 5.5
assert cns.bb.float("f1") == 3.333
assert cns.bb.getboolean("b4"), cns.bb.getboolean("b4")
assert cns.bb.getfloat("f0") == 5.5
assert cns.bb.getint("i0") == 34
assert cns.bb.getint("Hello!", 5) == 5
assert cns.bb.getfloat("Wooo", 4.4) == 4.4
assert cns.bb.getboolean("huh", True) is True
assert cns.bb.list("Waaaa", [1]) == [1]
assert repr(cns).startswith("<ConfigBox")
def test_dir(self):
b = ConfigBox(test_dict)
for item in ("to_yaml", "to_dict", "to_json", "int", "list", "float"):
assert item in dir(b)
def test_config_default(self):
bx4 = Box(default_box=True, default_box_attr=ConfigBox)
assert isinstance(bx4.bbbbb, ConfigBox)
|
def dev_only(func, *args, **kwargs):
def inner(*args, **kwargs):
request = kwargs.get("request", args[0])
# Check host
host = request.get_host()
if env_from_host(host) != "DEV":
raise Http404
else:
return func(*args, **kwargs)
return inner
def non_production(func, *args, **kwargs):
def inner(*args, **kwargs):
request = kwargs.get("request", args[0])
# Check host
host = request.get_host()
if env_from_host(host) not in ["DEV", "BETA"]:
raise Http404
else:
return func(*args, **kwargs)
return inner
def prod_only(func, *args, **kwargs):
def inner(*args, **kwargs):
request = kwargs.get("request", args[0])
# Check host
host = request.get_host()
if env_from_host(host) != "PROD":
raise Http404
else:
return func(*args, **kwargs)
return inner
|
#!/usr/bin/env python
import argparse
import os
import shutil
import sys
def validate(srcdir):
"""
Check if `srcdir` has an index.html in it.
"""
indexpath = os.path.join(srcdir, "index.html")
if not os.path.exists(indexpath):
print("Missing index.html file in", srcdir)
return False
return True
def main(args):
"""
Command line utility for previewing HTML5App content in Kolbri.
"""
if not os.path.exists(args.srcdir) or not os.path.isdir(args.srcdir):
print("Error:", args.srcdir, "is not a directory.")
sys.exit(1)
if not validate(args.srcdir):
print("Validation failed; exiting.")
sys.exit(2)
# Write the contents of `srcdir` to `destzip`
destzipbase, _ = os.path.splitext(args.destzip)
shutil.make_archive(destzipbase, "zip", args.srcdir)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument(
"--srcdir", help="HTML5 webroot (source directory)", default="."
)
parser.add_argument(
"--destzip",
help="Path to a HTML5 zip file in local Kolibri installation",
required=True,
)
args = parser.parse_args()
main(args)
|
# Generated by Django 3.0.6 on 2020-06-18 08:59
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("posthog", "0060_auto_20200616_0746"),
]
operations = [
migrations.CreateModel(
name="FeatureFlag",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=400)),
("key", models.CharField(max_length=400)),
(
"filters",
django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
("rollout_percentage", models.IntegerField(blank=True, null=True)),
("created_at", models.DateTimeField(default=django.utils.timezone.now)),
("deleted", models.BooleanField(default=False)),
("active", models.BooleanField(default=True)),
(
"created_by",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"team",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="posthog.Team"
),
),
],
),
migrations.AddConstraint(
model_name="featureflag",
constraint=models.UniqueConstraint(
fields=("team", "key"), name="unique key for team"
),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import signal
import logging
import urllib2
import json
import time
import ble_codes
import util
import multiprocessing as mp
from util import current_milli_time
# ----------------------------------------
# BLE state machine definitions
# ----------------------------------------
BLE_STATE_STANDBY = 0
BLE_STATE_SCANNING = 1
BLE_STATE_ADVERTISING = 2
BLE_STATE_CONNECTING = 3
BLE_STATE_CONNECTED_MASTER = 4
BLE_STATE_CONNECTED_SLAVE = 5
PROCESS_STATE_STOPPED = 0
PROCESS_STATE_RUNNING = 1
DEBUG = True
POWERBENCH = False
IBEACON = False
class Step(object):
def __init__(self, time='', ble_operation='advertising', adv_data='', short_name='',
sr_data='', long_name='', major='', minor='', adv_channels='', adv_interval_min='',
adv_interval_max='', gap_discoverable_mode='', connection_interval_min='',
connection_interval_max='', slave_latency='', supervision_timeout='', gap_connectable_mode=''):
self.time = time
self.ble_operation = ble_operation
self.adv_data = adv_data
self.short_name = short_name
# default BLE stack value: 140942474c69622055314131502033382e344e4657
self.sr_data = sr_data
self.long_name = long_name
self.major = major
self.minor = minor
self.adv_interval_min = adv_interval_min
self.adv_interval_max = adv_interval_max
self.adv_channels = adv_channels
self.gap_discoverable_mode = gap_discoverable_mode
self.gap_connectable_mode = gap_connectable_mode
self.connection_interval_min = connection_interval_min
self.connection_interval_max = connection_interval_max
self.slave_latency = slave_latency
self.supervision_timeout = supervision_timeout
class Peripheral(object):
def __init__(self, logger, steps, port_name, baud_rate="38400",
packet_mode=False, gap_role='broadcaster', gatt_role='server',
adv_data=[
0x08, # field length
# field type 0x08=shortname
0x08,
0x42, 0x4c, 0x45, 0x76, 0x61, 0x00, 0x00, # adv name
0x02, # field length
0x01, # BGLIB_GAP_AD_TYPE_FLAGS --> field type # data (0x02 | 0x04 = 0x06, # general discoverable + BLE only, no BR+EDR)
0x06,
0x03, # field length
0xFF, # BGLIB_GAP_AD_TYPE_SERVICES_128BIT_ALL, field type
0xDD, 0xDD],
short_name='BLEva',
sr_data=[
# 0x06, # field length
# 0x09, # BGLIB_GAP_AD_TYPE_LOCALNAME_COMPLETE
# 0x43, 0x4c, 0x45, 0x76, 0x61
],
long_name='',
major=0x200, minor=0x200, adv_channels=0x07,
adv_interval_min=0x200, adv_interval_max=0x200,
gap_discoverable_mode=ble_codes.gap_discoverable_mode[
'gap_user_data'],
gap_connectable_mode=ble_codes.gap_connectable_mode[
'gap_scannable_non_connectable'],
connection_interval_min=7.5, connection_interval_max=30,
slave_latency=0, supervision_timeout=250
):
self.logger = logger
self.port_name = port_name
self.baud_rate = baud_rate
self.packet_mode = packet_mode
self.gap_role = gap_role
self.gatt_role = gatt_role
self.steps = steps
self.adv_data = adv_data
self.short_name = short_name
self.sr_data = sr_data
self.long_name = long_name
self.major = major
self.minor = minor
self.adv_channels = adv_channels
self.adv_interval_min = adv_interval_min
self.adv_interval_max = adv_interval_max
self.ble_state = None
self.process_state = None
self.gap_discoverable_mode = gap_discoverable_mode
self.gap_connectable_mode = gap_connectable_mode
self.sr_data = sr_data
self.connection_interval_min = connection_interval_min
self.connection_interval_max = connection_interval_max
self.slave_latency = slave_latency
self.supervision_timeout = supervision_timeout
def start_benchmark(self):
'''
start benchmark
'''
import bglib
self.bg = bglib.BGLib()
import serial
print self.port_name
self.ser = serial.Serial(port=self.port_name, baudrate=38400, timeout=1)
self.register_handlers()
self.setup()
self.process_state = PROCESS_STATE_RUNNING
for step in self.steps:
self.set(step)
self.logger.info('Starting next Step')
self.run()
t0 = current_milli_time()
print "step time " + str(step.time)
print "t0 " + str(t0)
while current_milli_time() - t0 < step.time and self.process_state == PROCESS_STATE_RUNNING:
# catch all incoming data
self.bg.check_activity(self.ser)
# don't burden the CPU
time.sleep(0.001)
# if for some reason, we end up in standby, benchmark fails
if self.ble_state == BLE_STATE_STANDBY:
raise Exception("We are in standby, but we should not be!")
t1 = current_milli_time()
print "t1 " + str(t1)
print "diff " + str(t1 - t0)
self.standby()
self.stop() # close serial connection after benchmark
def setup(self):
'''
setups a device and puts it in standby at beginning of benchmark
'''
self.logger.info('Setting up BLEva...')
self.bg.packet_mode = self.packet_mode
# self.ser = serial.Serial(port=self.port_name, baudrate=self.baud_rate, timeout=1)
self.logger.debug('flushing input')
self.ser.flushInput()
self.logger.debug('flushing output')
self.ser.flushOutput()
# disconnect if we are connected already
self.logger.debug('disconnecting in case we are connected')
self.bg.send_command(self.ser, self.bg.ble_cmd_connection_disconnect(0))
self.bg.check_activity(self.ser, 1)
# stop advertising if we are advertising already
self.logger.debug('stop advertising in case we are still')
# 0 gap_non_discoverable, 0 gap_non_connectable
self.bg.send_command(self.ser, self.bg.ble_cmd_gap_set_mode(0, 0))
self.bg.check_activity(self.ser, 1)
# stop scanning if we are scanning already
# This command ends the current GAP discovery procedure and stop the scanning
# of advertising devices
self.logger.debug('stop scanning in case we are still')
self.bg.send_command(self.ser, self.bg.ble_cmd_gap_end_procedure())
self.bg.check_activity(self.ser, 1)
# now we must be in STANDBY state
# TODO here notify phone that we are ready
self.ble_state = BLE_STATE_STANDBY
self.logger.debug('BLEva is in STANDBY now.')
def standby(self):
'''
puts device in standby between steps or at the end of benchmark
'''
# disconnect if we are connected already
self.logger.debug('disconnecting in case we are connected')
self.bg.send_command(self.ser, self.bg.ble_cmd_connection_disconnect(0))
self.bg.check_activity(self.ser, 1)
# stop advertising if we are advertising already
self.logger.debug('stop advertising in case we are still')
# 0 gap_non_discoverable, 0 gap_non_connectable
self.bg.send_command(self.ser, self.bg.ble_cmd_gap_set_mode(0, 0))
self.bg.check_activity(self.ser, 1)
# stop scanning if we are scanning already
# This command ends the current GAP discovery procedure and stop the scanning
# of advertising devices
self.logger.debug('stop scanning in case we are still')
self.bg.send_command(self.ser, self.bg.ble_cmd_gap_end_procedure())
self.bg.check_activity(self.ser, 1)
# now we must be in STANDBY state
# TODO here notify phone that we are ready
self.ble_state = BLE_STATE_STANDBY
self.logger.debug('BLEva is in STANDBY now.')
def set(self, step):
'''
Prepares the run of a new step by setting its parameters
'''
if step.ble_operation != "":
self.ble_operation = step.ble_operation
if step.adv_data != "":
self.adv_data = step.adv_data
if step.short_name != "":
self.short_name = step.short_name
if step.sr_data != "":
self.sr_data = step.sr_data
if step.long_name != "":
self.long_name = step.long_name
if step.major != "":
self.major = step.major
if step.minor != "":
self.minor = step.minor
if step.adv_interval_min != "":
self.adv_interval_min = step.adv_interval_min
if step.adv_interval_max != "":
self.adv_interval_max = step.adv_interval_max
if step.adv_channels != "":
self.adv_channels = step.adv_channels
if step.gap_discoverable_mode != "":
self.gap_discoverable_mode = step.gap_discoverable_mode
if step.gap_connectable_mode != "":
self.gap_connectable_mode = step.gap_connectable_mode
if step.connection_interval_min != "":
self.connection_interval_min = step.connection_interval_min
if step.connection_interval_max != "":
self.connection_interval_max = step.connection_interval_max
if step.slave_latency != "":
self.slave_latency = step.slave_latency
if step.supervision_timeout != "":
self.supervision_timeout = step.supervision_timeout
def run(self):
'''
set all parameters on the device (e.g., for a new step)
'''
if self.ble_state != BLE_STATE_STANDBY:
raise
self.logger.debug('set advertisement parameters')
# This is just to tweak channels, radio...
self.bg.send_command(self.ser, self.bg.ble_cmd_gap_set_adv_parameters(
int(self.adv_interval_min * 0.625), int(self.adv_interval_max * 0.625), self.adv_channels))
self.bg.check_activity(self.ser, 1)
# 4 means custom user data in advertisment packet
if self.gap_discoverable_mode == ble_codes.gap_discoverable_mode['gap_user_data']: # TODO, use always user_data
self.logger.debug('Setting user defined advertising data.')
if self.short_name != "":
print "setting advertisement"
self.adv_data[2:9] = util.get_char_array(self.short_name)[0:7]
logging.debug('Advertising Data: %s', self.adv_data)
print self.adv_data
self.bg.send_command(self.ser,
self.bg.ble_cmd_gap_set_adv_data(0, self.adv_data))
self.bg.check_activity(self.ser, 1)
if self.long_name != "":
self.sr_data = [
0x0F, # field length
0x09, # BGLIB_GAP_AD_TYPE_LOCALNAME_COMPLETE
0x42, 0x4c, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00
]
print "setting scan response"
print self.long_name
self.sr_data[2:16] = util.get_char_array(self.long_name)[0:14]
logging.debug('Advertising Data: %s', self.sr_data)
print self.sr_data
else:
self.sr_data = []
# NOTE SR is not necessary
if self.sr_data != "":
self.bg.send_command(self.ser,
self.bg.ble_cmd_gap_set_adv_data(1, self.sr_data))
self.bg.check_activity(self.ser, 1)
# start advertising as discoverable
# ibeacon was 0x84, 0x03
self.logger.debug('Entering advertising mode.')
self.bg.send_command(self.ser, self.bg.ble_cmd_gap_set_mode(
self.gap_discoverable_mode, self.gap_connectable_mode))
self.bg.check_activity(self.ser, 1)
# set state to advertising
self.ble_state = BLE_STATE_ADVERTISING
self.logger.debug('BLEva is in BLE_STATE_ADVERTISING now.')
def stop(self):
'''
stop thread
'''
print "stop"
self.standby()
print "standby"
self.ser.close()
print "closed"
# --------------------------------------------------------------------------
# Event Handlers
# --------------------------------------------------------------------------
def handler_on_timeout(self, sender, args):
'''
Gets called when we send a command but we don't get a response back e.g.
'''
self.logger.debug('handler_on_timeout: %s', args)
# might want to try the following lines to reset, though it probably
# wouldn't work at this point if it's already timed out:
self.bg.send_command(self.ser, self.bg.ble_cmd_system_reset(0))
self.ble_state = BLE_STATE_STANDBY
self.logger.debug('BLEva is in STANDBY now.')
# NOTE not needed as we are not in enhanced broadcasting mode,
# This will never get called
def handler_ble_evt_gap_scan_response(self, sender, args):
"""
Handler to print scan responses with a timestamp (this only works when
discoverable mode is set to enhanced broadcasting 0x80/0x84)
"""
def handler_ble_evt_connection_status(self, sender, args):
'''
This gets called when a client has just been connected to us.
We need to check if we already know it, potentially check if it is blacklisted etc...
Then we set it as our active client.
'''
self.logger.debug('handler_ble_evt_connection_status: %s', args)
self.logger.info('Connection Status has changed')
# self.connection_interval_min=640
# self.connection_interval_max=1280
# self.slave_latency=0
# self.supervision_timeout=3600
# {'latency': 0, 'connection': 0, 'conn_interval': 39, 'flags': 5, 'timeout': 2000, 'address': [216, 96, 156, 121, 224, 248], 'address_type': 0, 'bonding': 255}
# {'latency': 0, 'connection': 0, 'conn_interval': 6, 'flags': 9, 'timeout': 2000, 'address': [216, 96, 156, 121, 224, 248], 'address_type': 0, 'bonding': 255}
# check if connection status flags are 5 (bit0=connection_connected + bit2=connection completed)
if (args['flags'] & 0x05) == 0x05:
self.ble_state = BLE_STATE_CONNECTED_SLAVE
self.logger.debug('BLEva is in BLE_STATE_CONNECTED_SLAVE now.')
if self.connection_interval_min != "":
print self.connection_interval_min
print self.connection_interval_max
print self.supervision_timeout
connection = args['connection']
print "Requesting to upgrade connection"
self.logger.debug('Requesting to upgrade connection')
# self.bg.send_command(self.ser, self.bg.ble_cmd_connection_update(
# connection, 6, 24, 0, 25))
self.bg.send_command(self.ser, self.bg.ble_cmd_connection_update(
connection, self.connection_interval_min / 1.25,
self.connection_interval_max / 1.25, self.slave_latency / 1.25,
self.supervision_timeout / 10))
elif (args['flags'] & 0x09) == 0x09:
self.logger.debug('BLEva connection parameters have been changed by master.')
if self.connection_interval_min != "":
if not self.connection_interval_min * 1.25 <= args['conn_interval'] <= self.connection_interval_max * 1.25:
self.logger.warning('Master set intervals outside our desired region :(')
if not self.supervision_timeout * 10 == args['timeout']:
self.logger.warning('Master set timeout outside our desired region :(')
if not self.slave_latency * 1.25 == args['latency']:
self.logger.warning('Master set slave latency outside our desired region :(')
else:
self.logger.warning('Connection was not correctly established!')
def handler_ble_evt_connection_disconnected(self, sender, args):
'''
A client has just disconnected from us
'''
self.logger.debug('handler_ble_evt_connection_disconnected: %s', args)
if args['reason'] == 0x213:
self.logger.debug('User on the remote device terminated the connection')
# We need to advertise ourselves again as a slave
self.bg.send_command(self.ser, self.bg.ble_cmd_gap_set_mode(
self.gap_discoverable_mode, self.gap_connectable_mode))
# we can now set state to advertise again
self.ble_state = BLE_STATE_ADVERTISING
self.logger.debug('BLEva is in BLE_STATE_ADVERTISING now.')
self.process_state = PROCESS_STATE_STOPPED
def handler_ble_rsp_gap_set_mode(self, senser, args):
'''
GAP mode has been set in response to
self.bg.ble_cmd_gap_set_mode
'''
self.logger.debug('handler_ble_rsp_gap_set_mode: %s', args)
if args["result"] != 0:
self.logger.warning('ble_rsp_gap_set_mode FAILED\n Re-running setup()')
self.setup()
else:
self.logger.debug('GAP mode sucessfully set')
def handler_ble_evt_attributes_value(self, sender, args):
'''
Gets called when an attribute has been written by a client
{'connection': 0, 'handle': 25, 'reason': 2, 'value': [1], 'offset': 0}
'''
self.logger.debug('handler_ble_evt_attributes_value: %s', args)
client_con = args['connection']
self.bg.send_command(self.ser, self.bg.ble_cmd_attributes_user_write_response(client_con, 0))
def handler_ble_evt_attributes_user_read_request(self, sender, args):
'''
This is called whenever a client reads an attribute that has the user type
enabled. We then serve the data dynamically. Each packet payload is 22 Bytes.
Whenever a client is receiving 22 bytes, the client needs to issue another
read request on the same atttribute as long as all the data is received
(This can be concludes when we receive less than 22 bytes)
'connection': connection, 'handle': handle, 'offset': offset, 'maxsize': maxsize
'''
self.logger.debug('handler_ble_evt_attributes_user_read_request: %s', args)
client_con = args['connection'] # --> we should not care about connection No for identification
# as we only can have one single client connected at the same time
# for feedback if previous write has been suceeded
value = [0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01]
self.bg.send_command(self.ser, self.bg.ble_cmd_attributes_user_read_response(client_con, 0, value))
def handler_ble_rsp_connection_update(self, sender, args):
'''
Gets called as a result of us trying to upgrade an existing connection.
'''
self.logger.debug('handler_ble_rsp_connection_update: %s', args)
if args['result'] != 0:
self.logger.warning('Upgrading od connection failed, resetting BLEva')
self.bg.send_command(self.ser, self.bg.ble_cmd_system_reset(0))
self.ble_state = BLE_STATE_STANDBY
self.logger.debug('BLEva is in STANDBY now.')
def handler_ble_evt_attributes_status(self, sender, args):
'''
Gets called when a client enables notification or indication
'''
self.logger.debug('handler_ble_evt_attributes_status: %s', args)
def handler_ble_rsp_attributes_read(self, sender, args):
self.logger.debug('handler_ble_rsp_attributes_read: %s', args)
def handler_ble_rsp_attributes_user_read_response(self, sender, args):
self.logger.debug('handler_ble_rsp_attributes_user_read_response: %s', args)
def handler_ble_rsp_attributes_write(self, sender, args):
self.logger.debug('handler_ble_rsp_attributes_write: %s', args)
def handler_ble_rsp_attributes_user_write_response(self, sender, args):
self.logger.debug('handler_ble_rsp_attributes_user_write_response: %s', args)
def handler_ble_rsp_attributes_read_type(self, sender, args):
self.logger.debug('handler_ble_rsp_attributes_read_type: %s', args)
def handler_ble_evt_attclient_indicated(self, sender, args):
self.logger.debug('handler_ble_evt_attclient_indicated: %s', args)
# gracefully exit without a big exception message if possible
# FIXME should flush our buffers here
def handler_ctrl_c(self, signal, frame):
self.standby()
print "BLEva shut down"
exit(0)
def register_handlers(self):
self.logger.debug('registering handlers...')
self.bg.on_timeout += self.handler_on_timeout
self.bg.ble_evt_gap_scan_response += self.handler_ble_evt_gap_scan_response
self.bg.ble_evt_connection_disconnected += self.handler_ble_evt_connection_disconnected
self.bg.ble_evt_connection_status += self.handler_ble_evt_connection_status
self.bg.ble_rsp_attributes_read += self.handler_ble_rsp_attributes_read
self.bg.ble_rsp_attributes_user_read_response += self.handler_ble_rsp_attributes_user_read_response
self.bg.ble_rsp_attributes_write += self.handler_ble_rsp_attributes_write
self.bg.ble_rsp_attributes_user_write_response += self.handler_ble_rsp_attributes_user_write_response
self.bg.ble_rsp_attributes_read_type += self.handler_ble_rsp_attributes_read_type
self.bg.ble_evt_attclient_indicated += self.handler_ble_evt_attclient_indicated
self.bg.ble_evt_attributes_value += self.handler_ble_evt_attributes_value
self.bg.ble_evt_attributes_user_read_request += self.handler_ble_evt_attributes_user_read_request
self.bg.ble_rsp_gap_set_mode += self.handler_ble_rsp_gap_set_mode
self.bg.ble_rsp_connection_update += self.handler_ble_rsp_connection_update
signal.signal(signal.SIGINT, self.handler_ctrl_c)
def getBenchmark(url):
try:
r = urllib2.urlopen(url).read()
except Exception:
return ""
return r
def main():
parser = argparse.ArgumentParser(description='''Starts a BLEva Gateway
service on the device.''', epilog='''Note: This requires a BLED112
dongle from Bluegiga.''')
parser.add_argument('-u', '--url', help='''URL of BLEva server''', required=True)
parser.add_argument('-d', '--debug', help='Debug level (0-4)', type=int,
default=20, choices=[10, 20, 30, 40, 50])
args = parser.parse_args()
url = args.url
print url
tty_paths = util.get_tty_paths()
FORMAT = '%(asctime)s - %(name)s - %(processName)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT, filename='bled112.log')
logger = logging.getLogger('BLEva')
logger.setLevel(args.debug)
import multiprocessing_logging
multiprocessing_logging.install_mp_handler()
logger.info('\n--------------------')
logger.info('BLEva has started')
logger.info('\n--------------------')
while True:
logger.info('\n--------------------')
logger.info('BLEva is waiting for new benchmark')
print "BLEva is waiting for new benchmarks"
b = getBenchmark(url + '/benchmark')
print b
if b != '':
logger.info('BLEva received new benchmark')
print "got new benchmark"
j = json.loads(b)
instances = []
for dongle in j['dongles']:
gap_role = dongle['gap_role']
gatt_role = dongle['gatt_role']
replicas = dongle['replicas']
print replicas
logger.debug("Replicas: " + str(replicas))
if replicas > len(tty_paths):
raise Exception("Too few dongles connected.")
for replica in xrange(0, replicas):
if gap_role in ['broadcaster', 'peripheral']:
a = dongle['steps']
steps = []
for v in a:
s = Step()
s.time = v['time']
print "json time " + str(['time'])
s.ble_operation = v['ble_operation']
# s.adv_data = map(ord, v['adv_data'][2:].decode("hex"))
# s.short_name = util.pad_truncate(s.short_name, 5)
# s.long_name = util.pad_truncate(s.long_name, 12)
s.long_name = v['long_name']
if replica < 10:
s.short_name = v['short_name'] + str(0) + str(replica)
if s.long_name != "":
s.long_name = v['long_name'] + str(0) + str(replica)
else:
s.short_name = v['short_name'] + str(replica)
if s.long_name != "":
s.long_name = v['long_name'] + str(replica)
s.short_name = util.pad_truncate(s.short_name, 7)
if s.long_name != "":
s.long_name = util.pad_truncate(s.long_name, 14)
logger.debug("Replica Short Name: " + s.short_name)
logger.debug("Replica Long Name: " + s.long_name)
s.major = int(v['major'], 0) # NOTE base=0 guesses base from string
s.minor = int(v['minor'], 0)
s.adv_interval_min = int(v['adv_interval_min'], 0)
s.adv_interval_max = int(v['adv_interval_max'], 0)
s.adv_channels = int(v['adv_channels'], 0)
s.gap_discoverable_mode = ble_codes.gap_discoverable_mode[v['gap_discoverable_mode']]
s.gap_connectable_mode = ble_codes.gap_connectable_mode[v['gap_connectable_mode']]
if "connection_interval_min" in v:
s.connection_interval_min = v["connection_interval_min"]
if "connection_interval_max" in v:
s.connection_interval_max = v["connection_interval_max"]
if "slave_latency" in v:
s.slave_latency = v["slave_latency"]
if "supervision_timeout" in v:
s.supervision_timeout = v["supervision_timeout"]
steps.append(s)
peripheral = Peripheral(logger=logger, steps=steps, port_name=tty_paths[replica], gap_role=gap_role, gatt_role=gatt_role)
instances.append(peripheral)
logger.info('BLEva is starting benchmark now')
print "BLEva is starting benchmark now"
processes = []
logger.debug('Telling Phone to start')
print "notifying phone"
urllib2.urlopen(url + '/benchmark/sync/dongle').read()
print "done notified"
if not IBEACON:
for i in instances:
print i
p = mp.Process(target=i.start_benchmark, name=i.steps[0].short_name)
p.start()
processes.append(p)
for p in processes:
p.join()
else:
time.sleep(40)
print "finished one benchmark"
logger.info('BLEva finished one benchmark') # FIXME fix logger to also log spawned processes
if b == '':
print "BLEva server not available, sleeping a while and try again."
logger.info('BLEva server not available, sleeping a while and try again.') # FIXME fix logger to also log spawned processes
time.sleep(10) # sleep and then try again until server is available
if __name__ == '__main__':
main()
|
from bs4 import BeautifulSoup
from ..occurrences.occurrences import Occurrences
from ..occurrences.occurrence_interface import OccurrenceInterface
class Recommendation16:
"""
Recomendação 16 – Identificar o idioma principal da página
"""
def __init__(self, sourcecode):
self.rec = 16
self.sourcecode = sourcecode
self.occurrences = Occurrences()
def avaliacao(self):
soap = BeautifulSoup(self.sourcecode, 'html.parser')
html = soap.find("html")
if html:
lang = html.get('lang')
if lang:
if lang.lower() == 'pt-br':
self.occurrences.add(OccurrenceInterface(self.rec, 0, html, 2))
else:
self.occurrences.add(OccurrenceInterface(self.rec, 1, html, 2))
return self.occurrences.list_of_occurrences
|
from . import device
class USF5P(device.Device):
def __init__(self, site, data):
super(USF5P, self).__init__(site, data)
self.port = {}
self.parse_stat(data['stat'])
self.parse_uplink(data.get('uplink'))
self.parse_port_table(data['port_table'])
self.general_temperature = data.get('general_temperature')
self.parse_sysstat(data.get('sys_stats'))
def parse_port_table(self, data):
for port in data:
self.port[port['port_idx']] = port
|
import random
import numpy as np
from numpy import array, mod, floor, ceil, sin, cos, dot
from OpenGL.GL import *
from PyEngine3D.Utilities import normalize
from PyEngine3D.OpenGLContext import CreateTexture, Texture2D, Texture2DArray, Texture3D, TextureCube
def generate_3d_data(size):
value = 255.0 / float(size)
data = array([0, 0, 0, 255] * size * size * size, dtype=np.uint8)
for z in range(size):
for y in range(size):
for x in range(size):
index = (x + y * size + z * size * size) * 4
data[index] = x * value
data[index + 1] = y * value
data[index + 2] = z * value
return data
def generate_random_data(texture_size, data_type):
texture_data = np.zeros((texture_size * texture_size, 4), dtype=data_type)
for i in range(texture_size * texture_size):
texture_data[i][0] = random.random()
texture_data[i][1] = random.random()
texture_data[i][2] = random.random()
texture_data[i][3] = random.random()
return texture_data
def generate_random_normal(texture_size, data_type):
texture_data = np.zeros((texture_size * texture_size, 3), dtype=data_type)
for i in range(texture_size * texture_size):
texture_data[i][0] = random.uniform(-1.0, 1.0)
texture_data[i][1] = 0.0
texture_data[i][2] = random.uniform(-1.0, 1.0)
texture_data[i][:] = normalize(texture_data[i])
return texture_data
def generate_common_textures(texture_loader):
resource_name = "common.default_3d"
if not texture_loader.hasResource(resource_name):
size = 64
data = generate_3d_data(size)
texture = CreateTexture(
name=resource_name,
texture_type=Texture3D,
width=size,
height=size,
depth=size,
internal_format=GL_RGBA8,
texture_format=GL_RGBA,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
data_type=GL_UNSIGNED_BYTE,
wrap=GL_REPEAT,
data=data,
)
texture_loader.create_resource(resource_name, texture)
texture_loader.save_resource(resource_name)
resource_name = "common.default_2d_array"
if not texture_loader.hasResource(resource_name):
size = 64
data = generate_3d_data(size)
texture = CreateTexture(
name=resource_name,
texture_type=Texture2DArray,
width=size,
height=size,
depth=size,
internal_format=GL_RGBA8,
texture_format=GL_RGBA,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
data_type=GL_UNSIGNED_BYTE,
wrap=GL_REPEAT,
data=data,
)
texture_loader.create_resource(resource_name, texture)
texture_loader.save_resource(resource_name)
resource_name = "common.random"
if not texture_loader.hasResource(resource_name):
size = 512
data = generate_random_data(size, np.float16)
texture = CreateTexture(
name=resource_name,
texture_type=Texture2D,
width=size,
height=size,
internal_format=GL_RGBA16F,
texture_format=GL_RGBA,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
data_type=GL_FLOAT,
wrap=GL_REPEAT,
data=data,
)
texture_loader.create_resource(resource_name, texture)
texture_loader.save_resource(resource_name)
resource_name = "common.random_normal"
if not texture_loader.hasResource(resource_name):
size = 4
data = generate_random_normal(size, np.float16)
texture = CreateTexture(
name=resource_name,
texture_type=Texture2D,
width=size,
height=size,
internal_format=GL_RGB16F,
texture_format=GL_RGB,
data_type=GL_FLOAT,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
wrap=GL_REPEAT,
data=data
)
texture_loader.create_resource(resource_name, texture)
texture_loader.save_resource(resource_name)
def generate_color_texture(resource_name, size, color):
if not texture_loader.hasResource(resource_name):
data = array([color for i in range(size * size)], dtype=np.uint8)
component_count = len(color)
texture = CreateTexture(
name=resource_name,
texture_type=Texture2D,
width=size,
height=size,
internal_format=GL_RGBA8 if component_count == 4 else GL_RGB8,
texture_format=GL_RGBA if component_count == 4 else GL_RGB,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
data_type=GL_UNSIGNED_BYTE,
wrap=GL_CLAMP_TO_EDGE,
data=data,
)
texture_loader.create_resource(resource_name, texture)
texture_loader.save_resource(resource_name)
generate_color_texture("common.flat_red", 2, [255, 0, 0, 255])
generate_color_texture("common.flat_green", 2, [0, 255, 0, 255])
generate_color_texture("common.flat_blue", 2, [0, 0, 255, 255])
generate_color_texture("common.flat_black", 2, [0, 0, 0, 255])
generate_color_texture("common.flat_gray", 2, [128, 128, 128, 255])
generate_color_texture("common.flat_white", 2, [255, 255, 255, 255])
generate_color_texture("common.flat_normal", 2, [128, 128, 255, 255])
generate_color_texture("common.flat_black_no_alpha", 2, [0, 0, 0, 0])
generate_color_texture("common.flat_white_no_alpha", 2, [255, 255, 255, 0])
generate_color_texture("common.flat_normal_no_alpha", 2, [128, 128, 255, 0])
|
#!/usr/bin/env python3
"""
A simple caching layer on top of a TANGO database.
"""
from collections import OrderedDict
from tango import Database, DeviceProxy, GreenMode
from tangogql.ttldict import TTLDict
class CachedMethod(object):
"""A cached wrapper for a DB method."""
def __init__(self, method, ttl=10):
self.cache = TTLDict(default_ttl=ttl)
self.method = method
def __call__(self, *args):
if args in self.cache:
return self.cache[args]
value = self.method(*args)
self.cache[args] = value
return value
class CachedDatabase(object):
"""A TANGO database wrapper that caches 'get' methods."""
_db = Database()
_methods = {}
def __init__(self, ttl):
self._ttl = ttl
def __getattr__(self, method):
if not method.startswith("get_"):
# caching 'set' methods doesn't make any sense anyway
# TODO: check that this really catches the right methods
return getattr(self._db, method)
if method not in self._methods:
self._methods[method] = CachedMethod(getattr(self._db, method),
ttl=self._ttl)
return self._methods[method]
class DeviceProxyCache(object):
"""Keep a limited cache of device proxies that are reused."""
# TODO: does this actually work? Are the proxies really cleaned up
# by PyTango after they are deleted?
def __init__(self, max_proxies=100):
self.max_proxies = max_proxies
self._device_proxies = OrderedDict()
def get(self, devname):
if devname in self._device_proxies:
# Proxy to this device already exists
proxy = self._device_proxies.pop(devname)
self._device_proxies[devname] = proxy # putting it first
return proxy
# Unknown device; let's create a new proxy
proxy = DeviceProxy(devname, green_mode=GreenMode.Asyncio)
if len(self._device_proxies) == self.max_proxies:
# delete the oldest proxy last = False means FIFO
self._device_proxies.popitem(last=False)
self._device_proxies[devname] = proxy
return proxy
|
""" This scripts uploads location database to sdd-db """
import os
import re
import pandas as pd
from datetime import datetime
# compatibility with ipython
try:
__IPYTHON__
os.chdir(os.path.dirname(__file__))
except: pass
import json
import boto3
from pathlib import Path
from shapely.geometry import Point
import pymysql
from sqlalchemy import create_engine
config = json.load(open("../../credentials/credentials-aws-db.json", "r"))
engine = create_engine(
("mysql+pymysql://" +
config["user"] + ":" +
config["password"] + "@" +
config["host"] + ":" +
str(config["port"]) + "/" +
config["database"]),
pool_recycle=3600 # handles timeouts better, I think...
)
# download shapefiles
locations = pd.read_pickle("locations.pickle")
locations = locations[["id", "name", "districtType", "state", "geometry"]]
locations.columns = ["district_id", "district", "district_type", "state", "geometry"]
locations["lat"] = locations.geometry.apply(lambda x: x.centroid.y)
locations["lon"] = locations.geometry.apply(lambda x: x.centroid.x)
df_state_ids = pd.DataFrame([
["Brandenburg", "BB"],
["Berlin", "B"],
["Baden-Württemberg", "BW"],
["Bayern", "BY"],
["Bremen", "HB"],
["Hessen", "HE"],
["Hamburg", "HH"],
["Mecklenburg-Vorpommern", "MV"],
["Niedersachsen", "NI"],
["Nordrhein-Westfalen", "NRW"],
["Rheinland-Pfalz", "RP"],
["Schleswig-Holstein", "SH"],
["Saarland", "SL"],
["Sachsen", "SN"],
["Sachsen-Anhalt", "SA"],
["Thüringen", "TH"]
], columns=["state", "state_id"])
locations["country_id"] = "DE"
locations["country"] = "Deutschland"
locations = locations.merge(
df_state_ids,
on="state",
how="left",
suffixes=(False, False))
locations["geometry"] = locations.geometry.astype(str)
query = """
INSERT INTO locations
(
district_id,
district,
district_type,
state,
geometry,
lat,
lon,
country_id,
country,
state_id
)
VALUES (%s, %s, %s, %s, polygonfromtext(%s), %s, %s, %s, %s, %s)
"""
with engine.connect() as cnx:
cnx.execute(query, locations.values.tolist() , multi=True)
|
def tensor_out_in_axis(tensor):
nndct_layouts = {2: 'OI', 4: 'OHWI'}
data_format = nndct_layouts[tensor.ndim]
out_axis = data_format.index('O')
in_axis = data_format.index('I')
return out_axis, in_axis
|
import sys
from . import system_tools
TEST_CON = 'test_constants'
test_constants = {'tri_a_T':1, 'tri_b_T': 1,
'tim_a_T':1,'fil_a_T':1.,'cons_T':1,
'tri_a_S':1,'cons_S':1}
# These constants come from the Major Variables script that ran on
# tornado in serial
system_constants = {'tornado.agso.gov.au':{'tri_a_T':0.0000395,
'tri_b_T': 0.29575152,
'tim_a_T':0.03804736,'fil_a_T':0.005928693,
'cons_T':-135.0661178,
'tri_a_S':0.00369572,'cons_S':331.7128095},
TEST_CON:test_constants}
DEFAULT_HOST = 'tornado.agso.gov.au'
def estimate_time_mem(domain, yieldstep, finaltime, halt=False,
log_results=True, use_test_constants=False):
"""
Predict the time in seconds and memory in ?? that the simulation
will need.
params:
domain: a Domain instance, used to get number of triangles
yieldstep: the yieldstep of the simulation
finaltime: The final time used in the simulation.
halt: Set to True if you want ANUGA to stop after the prediction
log_results: Add the predictions to the log file.
use_test_constants: Use artificial test constants.
Example use:
anuga.estimate_time_mem(domain, yieldstep=yieldstep, finaltime=finaltime,
halt=True)
"""
time, memory = whole_equation(num_tri=len(domain),
yieldstep=yieldstep,
finaltime=finaltime,
use_test_constants=use_test_constants)
if log_results: #FIXME, not loging results yet
print("This program will run for: " + str(time) + " (s)")
print("This program will use: " + str(memory) + " (MB)")
if halt:
sys.exit()
return time, memory
def whole_equation(halt = False, **kwargs):
"""
num_tri = None,
tri_area = None,
time_length = None,
time_step = None,
water_depth = None,
velocity = None,
per_water_cover = None,
cpus = None,
cpu_speed = None,
halt = False
"""
if not kwargs['use_test_constants']:
host_name = system_tools.get_host_name()
else:
host_name = TEST_CON
constants = system_constants.get(host_name, system_constants[DEFAULT_HOST])
kwargs['constants'] = constants
time = time_equation(**kwargs)
memory = space_equation(**kwargs)
result = (time, memory)
return result
# Using the constants from the experiments into
# memory and time the Time and Memory are estimated
def time_equation(**kwargs):
time = kwargs['constants']['tri_a_T'] * (kwargs['num_tri']) ** 2 + \
kwargs['constants']['tri_b_T'] * kwargs['num_tri'] + \
kwargs['constants']['tim_a_T'] * kwargs['finaltime'] + \
kwargs['constants']['fil_a_T'] * \
((kwargs['finaltime'] / kwargs['yieldstep'])) + \
kwargs['constants']['cons_T']
return time
def space_equation(**kwargs):
memory = kwargs['constants']['tri_a_S'] * kwargs['num_tri'] + \
kwargs['constants']['cons_S']
return memory
################################################################################
if __name__ == "__main__":
whole_equation(num_tri = 7)
|
from . import transformation
import copy
import collections
import operator
import logging
logger = logging.getLogger(__name__)
class Grid:
def __init__( self):
"""
grid is a list of pairs: the grid coord and associated attributes (e.g., width, color)
"""
self.grid = []
self.legalIndices = set()
def semantic( self):
assert self.n > 0
def addGridLine( self, value, isLegal, attrs=None):
self.grid.append( (value, attrs))
if isLegal:
self.legalIndices.add( len(self.grid)-1)
def copyShift( self, shift=None):
result = copy.copy( self)
if shift is not None:
result.grid = []
for (c,attrs) in self.grid:
result.grid.append( (c+shift,attrs))
return result
@property
def n( self):
return len(self.grid)-1
@property
def period( self):
return self.grid[-1][0] - self.grid[0][0]
def inverseBounds( self, physical):
(q,r) = divmod(physical - self.grid[0][0], self.period)
last_lt = None
ge = None
for (idx,(c,_)) in enumerate(self.grid):
if c - self.grid[0][0] < r:
last_lt = idx
else:
ge = idx
break
assert ge is not None
if physical < self.value( (q,ge), check=False)[0]:
assert last_lt is not None
return ((q,last_lt), (q,ge))
else:
return ((q,ge), (q,ge))
def snapToLegal(self, idx, direction):
assert len(idx) == 2
assert len(self.legalIndices) > 0
assert direction == 1 or direction == -1
if direction == -1:
op, func = operator.le, max
else:
op, func = operator.ge, min
legal = { x for x in self.legalIndices if op(x, idx[1]) }
if len(legal) > 0:
return (idx[0], func(legal))
else:
return (idx[0] + direction, func(self.legalIndices))
return idx
def value( self, idx, check=True):
assert self.n > 0
v = idx[0]*self.n + idx[1] if type(idx) is tuple else idx
(whole, fract) = divmod(v, self.n)
if check:
assert fract in self.legalIndices, (v, self.n, whole, fract, self.legalIndices)
(c,attrs) = self.grid[fract]
c += whole*self.period
return (c,attrs)
class CenteredGrid(Grid):
def __init__( self, *, pitch, offset=0):
super().__init__()
self.addGridLine( offset, False)
self.addGridLine( offset + pitch//2, True)
self.addGridLine( offset + pitch, False)
class CenterLineGrid(Grid):
def addCenterLine( self, value, width, isLegal=True, *, color=None):
assert width % 2 == 0
self.addGridLine( value, isLegal, (width, color))
def semantic( self):
assert self.n > 0
# width and color both need to be the same
assert self.grid[0][1] == self.grid[-1][1]
class UncoloredCenterLineGrid(CenterLineGrid):
def __init__( self, *, pitch, width, offset=0, repeat=1):
super().__init__()
for i in range(repeat+1):
self.addCenterLine( offset+i*pitch, width)
self.semantic()
class ColoredCenterLineGrid(CenterLineGrid):
def __init__( self, *, colors, pitch, width, offset=0, repeat=None):
assert len(colors) % 2 == 0
super().__init__()
if repeat is not None:
c = (colors * repeat)[:repeat]
else:
c = colors
for (idx, color) in enumerate(c + [c[0]]):
self.addCenterLine( offset+idx*pitch, width, color=color)
self.semantic()
class EnclosureGrid(Grid):
def __init__( self, *, clg=None, pitch, offset=0, stoppoint, check=False):
if check and 2*stoppoint > pitch:
logger.debug( f"Enclosure grid stop point ({stoppoint}) is more than half the pitch ({pitch}) causing the physical coordinate to be non-monotonic with the grid ordering")
super().__init__()
self.addGridLine( offset, False)
self.addGridLine( offset + stoppoint, True)
self.addGridLine( offset + pitch//2, False)
self.addGridLine( offset + pitch - stoppoint, True)
self.addGridLine( offset + pitch, False)
self.semantic()
class SingleGrid(Grid):
def __init__( self, *, clg=None, pitch, offset=0, repeat=1):
super().__init__()
for i in range(repeat+1):
self.addGridLine( offset + i*pitch, True)
self.semantic()
|
from paddle.trainer_config_helpers import *
settings(
batch_size=1000,
learning_rate=1e-5
)
din = data_layer(name='data', size=30)
data_seq = data_layer(name='data_seq', size=30)
outputs(expand_layer(input=din, expand_as=data_seq,
expand_level=ExpandLevel.FROM_SEQUENCE),
expand_layer(input=din, expand_as=data_seq,
expand_level=ExpandLevel.FROM_TIMESTEP))
|
# -*- coding: UTF-8 -*-
from collections import *
import heapq
class Priority_Queue(object):
"""
by XRH
date: 2020-05-01
利用堆实现 优先队列
提供以下功能:
1.根据主键直接访问 堆中的元素,包括读取和更新
2.选择 比较大小 进而做相应的堆调整 的键
3.当前最小键 从堆中 弹出
4.元素插入,并调整堆
"""
def __init__(self, initial=None, key_func=lambda x: x, compare_func=lambda x: x):
"""
:param initial: 初始的 key-value list,eg.[('a',1),('b',2),...,]
:param key_func: 指定主键 key 的 lambda 函数,可以根据主键直接访问 堆中的元素
:param compare_func: 指定 比较键 的lambda 函数,堆 根据此键 来比较元素之间的大小
"""
self.key_func = key_func
self.compare_func = compare_func
self.hash_table = {}
self._data = []
if initial:
self.length = len(initial)
for item in initial: # [(key1,value),(key2,value)]
p_index = [compare_func(item), item] # p_index 是一个指针,指向了 list 所在的内存地址
self.hash_table[key_func(item)] = p_index
self._data.append(p_index)
heapq.heapify(self._data)
else:
self.length =0
self._data = []
def __len__(self):
return self.length
def has_Key(self, key):
"""
判断 Key 是否存在
:param key:
:return:
"""
return key in self.hash_table
def get_byKey(self, key):
"""
通过 key 读取对应的元组
self.hash_table= {'a': [0, ('a', 0)], 'b': [2, ('b', 2)], 'c': [3, ('c', 3)]}
:param key: 'a'
:return: ('a', 0)
"""
if key in self.hash_table:
return self.hash_table[key][1]
else:
return None
def push(self, item):
"""
插入一个 元组,然后调整堆
:param item: (key,value)
:return:
"""
self.length += 1
p_index = [self.compare_func(item), item]
self.hash_table[self.key_func(item)] = p_index
heapq.heappush(self._data, p_index)
def pop(self):
"""
弹出 堆顶元素,即最小元素
:return:
"""
self.length -= 1
ele = heapq.heappop(self._data)
self.hash_table.pop(self.key_func(ele[1]))
return ele[1]
def update(self, new_tuple):
"""
从元组中 找到 key 和 用来调整堆的 值
更新 key 对应的 元组 同时调整堆
eg.
self.hash_table= {'a': [0, ('a', 0, 'text')], 'b': [2, ('b', 2, 'text')], 'c': [3, ('c', 3, 'text')]}
new_tuple= ('a', 0, 'text2')
key 为 'a' , 用来调整堆的值为 0
:param new_tuple:
:return:
"""
key= self.key_func(new_tuple)
compare_value=self.compare_func(new_tuple)
self.hash_table[key][0] =compare_value
self.hash_table[key][1] = new_tuple
heapq.heapify(self._data)
def update_byKey(self, key, value):
"""
通过 key 找到对应的元组,并更新它的值,同时调整堆
self.hash_table= {'a': [0, ('a', 0)], 'b': [2, ('b', 2)], 'c': [3, ('c', 3)]}
:param key: 'b'
:param value: 4
:return:
"""
# self.hash_table[key]=[value,(key,value)] # 无法更新 堆中 被引用的 List
self.hash_table[key][0] = value # self.hash_table[key] 返回为 List(它也 在堆中被引用) 的内存地址 ,self.hash_table[key][0] 直接更改了 List 中的内容
self.hash_table[key][1] = (key, value)
heapq.heapify(self._data)
if __name__ == '__main__':
# heap = Priority_Queue(key_func=lambda x: x[0], compare_func=lambda x: x[1]) #
#
# heap.push(('a', 0))
# heap.push(('b', 2))
# heap.push(('c', 3))
#
# print(heap.get_byKey('a')) # 拿到键为'a' 的键值对 ('a', 0)
#
# print(heap.pop())
#
# heap.update_byKey('b', 4)
# print(heap.pop())
heap = Priority_Queue( key_func=lambda x: x[0], compare_func=lambda x: x[1])
heap.push(('a', 0,'text' )) # ('a', 0,'node1') key='a' compare_value=0
heap.push(('b',2,'text'))
heap.push(('c', 3,'text'))
print(heap.get_byKey('a')) # 拿到键为'a' 的 tuple
print(heap.pop())
heap.update(('b',4,'hhh'))
print(heap.pop())
print(heap.pop())
|
#!/usr/bin/env python3
'''
This is a simple control application for the servos on the box.
The application itself writes the control ommands for the servoblaster on stdout so it
can be directly forwarded by netcat.
The servo IDs (Channel IDs of servoblaster are hardcoded here)
'''
import sys
from time import sleep
import tkinter as tk
# Channel IDs for the servos
channel_upper = 0
channel_lower = 2
channel_side = 4
idle_timers = {}
# Tuples with (last, now)
servo_values = {}
# Helper function to print on stderr
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def idle_timer_func(id):
# The idea is to turn the servo back by 1% to stop it from making noise.
l, c = servo_values[id]
n = c
if l > c:
n = c + 2
elif l < c:
n = c - 2
set_channel(id, n, False)
eprint("IDLE ", id)
def end_idle_timer(id):
if not id in idle_timers:
return
t = idle_timers[id]
if t is not None:
root.after_cancel(t)
def start_idle_timer(id):
end_idle_timer(id)
idle_timers[id] = root.after(500, idle_timer_func, id)
def set_channel(id, percent, startIdleTimer=True):
last_val = 0
if id in servo_values:
_, last_val = servo_values[id]
d = ((float(percent) * (218 - 80)) / 100 + 80)
print("%d=%d" % (id, d))
sys.stdout.flush()
eprint("set %d to %d" %(id, d))
servo_values[id] = (last_val, percent)
if (startIdleTimer):
start_idle_timer(id)
class App:
def __init__(self, master):
# Add a delay here to avoid all servos statrting at the same time
self.update_upper(0)
sleep(0.5)
self.update_lower(0)
sleep(0.5)
self.update_smoke(100)
sleep(0.5)
frame = tk.Frame(master)
frame.pack()
tk.Label(frame, text="<---- CLOSE ---------------------------------------- OPEN ---->").grid(row=0, column=1)
upper = tk.Scale(frame, from_=0, to=100, length=300,
orient=tk.HORIZONTAL, command=self.update_upper)
upper.grid(row=1, column=1)
upper.set(50)
tk.Label(frame, text="\nTop").grid(row=1)
lower = tk.Scale(frame, from_=0, to=100, length=300,
orient=tk.HORIZONTAL, command=self.update_lower)
lower.grid(row=2, column=1)
lower.set(50)
tk.Label(frame, text="\nBottom").grid(row=2)
smoke = tk.Scale(frame, from_=0, to=100, length=300,
orient=tk.HORIZONTAL, command=self.update_smoke)
smoke.grid(row=3, column=1)
smoke.set(50)
tk.Label(frame, text="\nSide").grid(row=3)
def update_upper(self, percent):
set_channel(channel_upper, int(percent) * 0.92 + 8)
def update_lower(self, percent):
set_channel(channel_lower, int(percent))
def update_smoke(self, percent):
set_channel(channel_side, (100 - int(percent) * 0.8), False)
eprint("Start servo client")
root = tk.Tk()
root.wm_title('Servo Control')
app = App(root)
root.geometry("380x155+0+0")
#root.mainloop()
|
"""
"Genyal" (c) by Ignacio Slater M.
"Genyal" is licensed under a
Creative Commons Attribution 4.0 International License.
You should have received a copy of the license along with this
work. If not, see <http://creativecommons.org/licenses/by/4.0/>.
"""
from random import Random
from typing import Any, Callable, List, Optional, Tuple
from genyal.core import GenyalCore
from genyal.genotype import GeneFactory
from genyal.individuals import Individual
from genyal.operations.evolution import default_terminating_function, tournament_selection
class GenyalEngine(GenyalCore):
"""
The engine is the main component of Genyal.
This class is in charge of creating, maintaining and evolving a population.
"""
__fitness_function_args: Tuple
__factory_generator_args: Tuple
__crossover_args: Tuple
__fitness_function: Callable[[List[Any]], float]
__fittest: Optional[Individual]
__generations: int
__mutation_args: List[Any]
__population: List[Individual]
__selection_args: List[Any]
__selection_strategy: Callable[..., Individual]
__terminating_function: Callable[..., bool]
def __init__(self, random_generator: Random = Random(),
fitness_function: Callable[..., float] = lambda _: 0,
selection_strategy=tournament_selection,
terminating_function=default_terminating_function):
"""
Initializes the values of the engine.
Args:
random_generator:
The random number generator used by the engine.
fitness_function:
The function to calculate the fitness of the population's individuals.
If none given, the default function returns 0 for any individual.
selection_strategy:
The strategy to select the individuals that will participate in the crossover.
terminating_function:
The function that will decide when to stop the evolution.
"""
super(GenyalEngine, self).__init__(random_generator)
self.__population = []
self.__fitness_function = fitness_function
self.__fitness_function_args = ()
self.__fittest = None
self.__selection_strategy = selection_strategy
self.__selection_args = []
self.__crossover_args = ()
self.__mutation_args = []
self.__terminating_function = terminating_function
self.__generations = 0
self.__factory_generator_args = ()
def create_population(self, population_size: int, individual_size: int,
gene_factory: GeneFactory, mutation_rate=0.01):
"""
Creates a new population for the engine.
The new population is then sorted according to the individual's fitness
Args:
mutation_rate:
population_size:
The number of individuals of the population.
individual_size:
The number of genes of each member.
gene_factory:
The factory to create the genes of each individual
"""
self.__population = Individual.create(population_size, individual_size, gene_factory,
mutation_rate, *self.__factory_generator_args)
for member in self.__population:
member.compute_fitness_using(self.__fitness_function, *self.__fitness_function_args)
self.__population.sort()
self.__fittest = self.__population[-1]
def evolve(self, *args):
"""
Evolves the population until the condition given by the terminating function is met.
By default, the population will evolve until it reaches 100 generations.
Args:
*args:
The arguments passed to the terminating function.
"""
while not self.__terminating_function(self, *args):
new_population = []
for _ in range(0, len(self.__population)):
child = self.__create_offspring()
new_population.append(child)
new_population.sort()
self.__population = new_population
self.__fittest = new_population[-1]
self.__generations += 1
def crossover(self, partner_a: Individual, partner_b: Individual, *args) -> Individual:
"""Performs a crossover between two individuals and returns the offspring."""
partner_a.random_generator = self._random_generator
return partner_a.crossover(partner_b, *args)
def mutate(self, individual: Individual, *args) -> Individual:
"""Mutates an individual and returns the result of the mutation."""
individual.random_generator = self.random_generator
return individual.mutate(*args)
def __create_offspring(self):
"""
Creates an offspring from a couple.
The partners are selected from the population and the offspring is obtained via crossover
and mutation.
"""
partner_a = self.__selection_strategy(self.__population, self._random_generator,
*self.__selection_args)
partner_b = self.__selection_strategy(self.__population, self._random_generator,
*self.__selection_args)
child = self.mutate(self.crossover(partner_a, partner_b, *self.__crossover_args),
*self.__mutation_args)
child.compute_fitness_using(self.__fitness_function, *self.__fitness_function_args)
return child
@property
def population(self) -> List[Individual]:
"""The individuals of the current generation."""
return self.__population
@property
def generation(self) -> int:
"""The number of generations the population has evolved."""
return self.__generations
@property
def fittest(self):
"""The individual with the greatest fitness from the population"""
return self.__fittest
@property
def crossover_args(self) -> Tuple:
"""A tuple with extra arguments to be passed to the crossover operation."""
return self.__crossover_args
@crossover_args.setter
def crossover_args(self, args):
"""Sets the arguments needed by the crossover operation."""
self.__crossover_args = args
@property
def factory_generator_args(self) -> Tuple:
return self.__factory_generator_args
@factory_generator_args.setter
def factory_generator_args(self, args: Tuple) -> None:
self.__factory_generator_args = args
@property
def fitness_function_args(self) -> Tuple:
return self.__fitness_function_args
@fitness_function_args.setter
def fitness_function_args(self, args: Tuple) -> None:
self.__fitness_function_args = args
|
__version_info__ = (1, 1, 3)
__version__ = ".".join("{0}".format(x) for x in __version_info__)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
__title__ = 'yorktown'
__author__ = 'Jonathan Kelley'
__version__ = '0.0.1'
__license__ = 'FreeBSD'
__copyright__ = 'Copyright 2018 BoomTown LLC'
MODULES = [
'demo',
'gitexec',
'jenkins',
]
|
import os
import jax
from jax import numpy as jnp
def run_cmd(x):
os.system(x)
def test_platform_allocator():
os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform"
#os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"
a = jnp.ones(1 << 30)
run_cmd("nvidia-smi")
a = None
run_cmd("nvidia-smi")
if __name__ == "__main__":
test_platform_allocator()
|
'''
Web sederhana pemetaan kecelakaan
mirip first news app
'''
# app.py >> konfigurasi untuk server & route ada di sini
import csv
from flask import Flask
from flask import abort
from flask import render_template
app = Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True
def get_csv():
csv_path = './static/data-bencana-kecelakaan-transportasi-2011-2014.csv'
csv_file = open(csv_path, 'r', encoding='unicode_escape')
csv_obj = csv.DictReader(csv_file)
csv_list = list(csv_obj)
return csv_list
@app.route('/')
def index():
template = 'index.html'
object_list = get_csv()
return render_template(template, object_list = object_list)
@app.route('/<row_id>/')
def detail(row_id):
template = 'detail.html'
object_list = get_csv()
for row in object_list:
if row['no'] == row_id:
return render_template(template, object = row)
abort(404)
if __name__ == '__main__':
app.run(debug=True, use_reloader=True, host='0.0.0.0') |
#!/usr/bin/env python3
import argparse
from blockchain_fundamentals import *
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Perform the SHA256 function on input string. Can be hex or ascii.')
parser.add_argument('value', metavar='value')
args = parser.parse_args()
result = sha256(args.value)
print(result.hex())
|
import logging
import redis
import os
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from mapofinnovation.lib.base import BaseController, render
log = logging.getLogger(__name__)
class WikiapiController(BaseController):
def getWikiLink(self):
#return a wikilink given an id
return {'id':'','internal_wiki_link':''}
def addWikiLink(self):
#add a wiki page for a space
return {'Success':'True'}
|
# -*- python -*-
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
#
# (c) 2013-2018 parasim inc
# (c) 2010-2018 california institute of technology
# all rights reserved
#
# the package
import altar
# my protocol
from .Monitor import Monitor as monitor
# an implementation of the monitor protocol
class Reporter(altar.component, family="altar.simulations.monitors.reporter", implements=monitor):
"""
Reporter reports simulation progress by using application journal channels
"""
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize me given an {application} context
"""
# nothing to do
return self
# implementation details
def simulationStart(self, controller, **kwds):
"""
Handler invoked when the simulation is about to start
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: start")
# all done
return
def samplePosteriorStart(self, controller, **kwds):
"""
Handler invoked at the beginning of sampling the posterior
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: samplePosteriorStart")
# all done
return
def prepareSamplingPDFStart(self, controller, **kwds):
"""
Handler invoked at the beginning of the preparation of the sampling PDF
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: prepareSamplingPDFStart")
# all done
return
def prepareSamplingPDFFinish(self, controller, **kwds):
"""
Handler invoked at the end of the preparation of the sampling PDF
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: prepareSamplingPDFFinish")
# all done
return
def betaStart(self, controller, **kwds):
"""
Handler invoked at the beginning of the beta step
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: betaStart")
# all done
return
def walkChainsStart(self, controller, **kwds):
"""
Handler invoked at the beginning of the chain walk
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: walkChainsStart")
# all done
return
def chainAdvanceStart(self, controller, **kwds):
"""
Handler invoked at the beginning of a single step of chain walking
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: chainAdvanceStart")
# all done
return
def chainAdvanceFinish(self, controller, **kwds):
"""
Handler invoked at the end of a single step of chain walking
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: chainAdvanceFinish")
# all done
return
def verifyStart(self, controller, **kwds):
"""
Handler invoked before we start verifying the generated sample
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: verifyStart")
# all done
return
def verifyFinish(self, controller, **kwds):
"""
Handler invoked after we are done verifying the generated sample
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: verifyFinish")
# all done
return
def priorStart(self, controller, **kwds):
"""
Handler invoked before we compute the prior
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: priorStart")
# all done
return
def priorFinish(self, controller, **kwds):
"""
Handler invoked after we compute the prior
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: priorFinish")
# all done
return
def dataStart(self, controller, **kwds):
"""
Handler invoked before we compute the data likelihood
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: dataStart")
# all done
return
def dataFinish(self, controller, **kwds):
"""
Handler invoked after we compute the data likelihood
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: dataFinish")
# all done
return
def posteriorStart(self, controller, **kwds):
"""
Handler invoked before we assemble the posterior
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: posteriorStart")
# all done
return
def posteriorFinish(self, controller, **kwds):
"""
Handler invoked after we assemble the posterior
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: posteriorFinish")
# all done
return
def acceptStart(self, controller, **kwds):
"""
Handler invoked at the beginning of sample accept/reject
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: acceptStart")
# all done
return
def acceptFinish(self, controller, **kwds):
"""
Handler invoked at the end of sample accept/reject
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: acceptFinish")
# all done
return
def walkChainsFinish(self, controller, **kwds):
"""
Handler invoked at the end of the chain walk
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: walkChainsFinish")
# all done
return
def resampleStart(self, controller, **kwds):
"""
Handler invoked before we start resampling
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: resampleStart")
# all done
return
def resampleFinish(self, controller, **kwds):
"""
Handler invoked after we are done resampling
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: resampleFinish")
# all done
return
def betaFinish(self, controller, **kwds):
"""
Handler invoked at the end of the beta step
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: betaFinish")
# all done
return
def samplePosteriorFinish(self, controller, **kwds):
"""
Handler invoked at the end of sampling the posterior
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: samplePosteriorFinish")
# all done
return
def simulationFinish(self, controller, **kwds):
"""
Handler invoked when the simulation is about to finish
"""
# grab a channel
channel = controller.info
# say something
channel.log(f"{self.pyre_name}: finish")
# all done
return
# end of file
|
# -*- coding: utf-8 -*-
"""save_princess_peach.__init__.py
Initialize file.
"""
|
from fastapi import APIRouter
from .allocations import router as allocation_router
from .portfolios import router as portfolio_router
from .assets import router as asset_router
from .transactions import router as transaction_router
from .incomes import router as income_router
from .stats import router as dashboard_router
router = APIRouter()
router.include_router(allocation_router)
router.include_router(portfolio_router)
router.include_router(transaction_router)
router.include_router(income_router)
router.include_router(asset_router)
router.include_router(dashboard_router)
|
import os
# Configure Test Environment
def get_env(variable_name: str) -> str:
"""Returns a environment variable"""
try:
var = os.environ[variable_name]
if not var:
raise RuntimeError(f"Variable is null, Check {variable_name}.")
return var
except KeyError:
raise RuntimeError(f"Variable is not set, Check {variable_name}.")
def get_env_file(variable_name: str) -> str:
"""Returns a environment variable as path"""
try:
path = os.path.abspath(os.environ[variable_name])
if not path:
raise RuntimeError(f"Variable is null, Check {variable_name}.")
with open(path, "r") as f:
contents = f.read()
if not contents:
raise RuntimeError(f"Contents of file empty. Check {variable_name}.")
return contents
except KeyError:
raise RuntimeError(f"Variable is not set, Check {variable_name}.")
ENV = {
"signing_key": get_env_file("APPLICATION_RESTRICTED_SIGNING_KEY_PATH"),
"signing_key_with_asid": get_env_file("APPLICATION_RESTRICTED_WITH_ASID_SIGNING_KEY_PATH"),
"application_restricted_api_key": get_env("APPLICATION_RESTRICTED_API_KEY"),
"application_restricted_with_asid_api_key": get_env("APPLICATION_RESTRICTED_WITH_ASID_API_KEY"),
"pds_base_path": get_env("PDS_BASE_PATH"),
"environment": get_env("APIGEE_ENVIRONMENT"),
"key_id": get_env("KEY_ID"),
'client_id': get_env('CLIENT_ID'),
'client_secret': get_env('CLIENT_SECRET'),
'nhs_login_private_key': get_env('ID_TOKEN_NHS_LOGIN_PRIVATE_KEY_ABSOLUTE_PATH'),
'jwt_private_key': get_env('JWT_PRIVATE_KEY_ABSOLUTE_PATH'),
'test_patient_id': get_env('TEST_PATIENT_ID'),
'auth_token_expiry_ms': get_env('AUTH_TOKEN_EXPIRY_MS'),
'auth_token_expiry_ms_int': get_env('AUTH_TOKEN_EXPIRY_MS_INT')
}
|
import time
import multiprocessing as mp
from tqdm import trange
import torch.nn as nn
from torch.utils.data import DataLoader
from torchio import ImagesDataset, Queue, DATA
from torchio.data.sampler import ImageSampler
from torchio.utils import create_dummy_dataset
from torchio.transforms import (
ZNormalization,
RandomNoise,
RandomFlip,
RandomAffine,
Compose,
)
# Define training and patches sampling parameters
num_epochs = 4
patch_size = 128
queue_length = 400
samples_per_volume = 10
batch_size = 4
class Network(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv3d(
in_channels=1,
out_channels=3,
kernel_size=3,
)
def forward(self, x):
return self.conv(x)
model = Network()
# Create a dummy dataset in the temporary directory, for this example
subjects_list = create_dummy_dataset(
num_images=100,
size_range=(193, 229),
force=False,
)
# Each element of subjects_list is an instance of torchio.Subject:
# subject = Subject(
# one_image=torchio.Image(path_to_one_image, torchio.INTENSITY),
# another_image=torchio.Image(path_to_another_image, torchio.INTENSITY),
# a_label=torchio.Image(path_to_a_label, torchio.LABEL),
# )
# Define transforms for data normalization and augmentation
transforms = (
ZNormalization(),
RandomNoise(std=(0, 0.25)),
RandomAffine(scales=(0.9, 1.1), degrees=10),
RandomFlip(axes=(0,)),
)
transform = Compose(transforms)
subjects_dataset = ImagesDataset(subjects_list, transform)
# Run a benchmark for different numbers of workers
workers = range(mp.cpu_count() + 1)
for num_workers in workers:
print('Number of workers:', num_workers)
# Define the dataset as a queue of patches
queue_dataset = Queue(
subjects_dataset,
queue_length,
samples_per_volume,
patch_size,
ImageSampler,
num_workers=num_workers,
shuffle_subjects=False,
shuffle_patches=False,
verbose=True
)
batch_loader = DataLoader(queue_dataset, batch_size=batch_size)
start = time.time()
for epoch_index in trange(num_epochs, leave=False):
for batch in batch_loader:
# The keys of batch have been defined in create_dummy_dataset()
inputs = batch['one_modality'][DATA]
targets = batch['segmentation'][DATA]
logits = model(inputs)
print('Time:', int(time.time() - start), 'seconds')
print()
|
import unittest
from malcolm.modules.builtin.vmetas import BooleanArrayMeta
class TestValidate(unittest.TestCase):
def setUp(self):
self.meta = BooleanArrayMeta("test description")
def test_init(self):
assert "test description" == self.meta.description
assert self.meta.label == ""
assert self.meta.typeid == "malcolm:core/BooleanArrayMeta:1.0"
def test_validate_none(self):
assert list(self.meta.validate(None)) == []
def test_validate_array(self):
array = ["True", "", True, False, 1, 0]
assert (
[True, False, True, False, True, False]) == (
list(self.meta.validate(array)))
def test_not_iterable_raises(self):
value = True
with self.assertRaises(TypeError):
self.meta.validate(value)
def test_null_element_raises(self):
array = ["test", None]
assert (
[True, False]) == list(self.meta.validate(array))
|
"""empty message
Revision ID: 0148_add_letters_as_pdf_svc_perm
Revises: 0147_drop_mapping_tables
Create Date: 2017-12-01 13:33:18.581320
"""
# revision identifiers, used by Alembic.
revision = "0148_add_letters_as_pdf_svc_perm"
down_revision = "0147_drop_mapping_tables"
from alembic import op
def upgrade():
op.get_bind()
op.execute("insert into service_permission_types values('letters_as_pdf')")
def downgrade():
op.get_bind()
op.execute("delete from service_permissions where permission = 'letters_as_pdf'")
op.execute("delete from service_permission_types where name = 'letters_as_pdf'")
|
# html helpers
from lxml import html
def make_source_link(kind: str, stage: str, name: str) -> html.Element:
d = html.Element("span")
if kind != stage and kind != "source":
a = html.Element("a")
# "http://covid19-api.exemplartech.com/github-data/raw/AZ.html
a.attrib["href"] = f"../{stage}/{name}"
a.text = stage
d.append(a)
else:
d.text = stage
d.tail = " < "
return d
def make_source_links(kind: str, name: str, source: str):
div = html.Element("div")
div.attrib["class"] = "source"
kind = kind.lower()
if not kind in ["extract", "clean", "raw", "source"]:
raise Exception("Invalid kind: " + kind)
d = make_source_link(kind, "extract", name)
div.append(d)
d = make_source_link(kind, "clean", name)
div.append(d)
d = make_source_link(kind, "raw", name)
div.append(d)
d = make_source_link(kind, source, name)
d.tail = ""
div.append(d)
return div
|
from aiohttp import web
from src.combinators import AmountCombinator, CurrencyCombinator, ReferenceDateCombinator, validate_payload
from src.prices_service import PricesServiceImpl
from src.utils import catch_exceptions
from src.vo_service import MicroCurrencyConverterVOServiceImpl
routes = web.RouteTableDef()
prices_service = PricesServiceImpl()
vo_service = MicroCurrencyConverterVOServiceImpl(prices_service)
@routes.get('/convert')
@catch_exceptions
@validate_payload(
amount=AmountCombinator,
src_currency=CurrencyCombinator,
dest_currency=CurrencyCombinator,
reference_date=ReferenceDateCombinator
)
async def convert(request):
response = await vo_service.get_price_for_pair(
request.query['amount'],
request.query['reference_date'],
request.query['src_currency'],
request.query['dest_currency']
)
return web.json_response(data=response)
|
# -*- coding: utf-8 -*-
"""
@author: dong.lu
@contact: ludong@cetccity.com
@software: PyCharm
@file: phrase.py
@time: 2018/11/15 14:18
@desc: 从大量语料中统计预选短语的
`
"""
import math
from collections import Counter
import numpy as np
from src.linefile import persistence
class PhraseMine(object):
def __init__(self, alpha=0., unk_num=0, unk_idf=0, min_token=5, sample_list=True):
self.sample_list = sample_list
# char在全文的数量
self.chars = Counter()
# phrase在全文的数量
self.phrases = Counter()
# char的文档频数
self.df = Counter()
# 文档集的总数
self.docs = 0
self.min_token = min_token
self.alpha = alpha
self.unk_num = unk_num
self.unk_idf = unk_idf
self.pmi = {}
self.plk = {}
self.char_idf = {}
self.phrase_idf = {}
self.phrases_ls = {}
def seg_sentence(self, sentence):
self.chars.update(sentence)
phrases = []
for i, cw in enumerate(sentence):
if i < len(sentence) and cw != '|':
substring = sentence[i+1:]
for j, sw in enumerate(substring, 1):
if sw != '|':
tmp = cw + substring[:j]
phrases.append(tmp)
else:
break
self.phrases.update(phrases)
def statistics(self, sentences):
for sentence in sentences:
self.df.update(set(sentence))
self.docs += 1
self.seg_sentence(sentence)
def _get_token(self, chars_phrases, min_token=None, top=None):
if not top:
return dict(chars_phrases.most_common(top))
elif min_token:
return {
word: num for word, num in chars_phrases.items() if num >= min_token
}
else:
return {
word: num for word, num in chars_phrases.items() if num >= self.min_token
}
def pick_chars(self, min_char=None, top=None):
self.chars = self._get_token(self.chars, min_token=min_char, top=top)
def pick_phrases(self, min_phrase=None, top=None):
self.phrases = self._get_token(self.phrases, min_token=min_phrase, top=top)
def _prob_u(self, phrase):
accumulate = 0
for char in phrase:
accumulate += self.chars.get(char, self.unk_num)
if len(phrase) > 1:
return self.phrases[phrase] * 1. / accumulate
else:
return 1
def _phrase_pmi_plk(self, phrase):
prob_whole = self._prob_u(phrase)
if len(phrase) > 2:
min_mutual_info = float('inf')
best_left = None
best_right = None
for i in range(len(phrase)):
u_left = phrase[:i]
u_right = phrase[i:]
info = math.log(prob_whole * 1. / (self._prob_u(u_left) * self._prob_u(u_right)))
if info < min_mutual_info:
min_mutual_info = info
best_left = u_left
best_right = u_right
else:
best_left = phrase[0]
best_right = phrase[1]
phrase_pmi = math.log(prob_whole * 1. / (self._prob_u(best_left) * self._prob_u(best_right)))
phrase_plk = prob_whole * math.log(prob_whole * 1. / (self._prob_u(best_left) * self._prob_u(best_right)))
if self.sample_list:
self.phrases_ls[phrase] = [phrase_pmi]
self.phrases_ls[phrase].append(phrase_plk)
else:
self.pmi[phrase] = phrase_pmi
self.plk[phrase] = phrase_plk
def calculate_char_idf(self):
for char, count in self.df.items():
self.char_idf[char] = math.log(self.docs + self.alpha) - math.log(count + self.alpha)
# kv = list(zip(*self.df.items()))
# key, value = kv[0], np.array(kv[1])
# value = np.log((self.docs + self.alpha)) - np.log(value + self.alpha)
# self.df = dict(zip(key, value))
def average_phrase_idf(self, phrase):
idf = 0
for char in phrase:
idf += self.char_idf.get(char, self.unk_idf)
idf = idf * 1.0 / len(phrase)
if self.sample_list:
self.phrases_ls[phrase].append(idf)
else:
self.phrase_idf[phrase] = idf
def calculate_phrase(self):
self.calculate_char_idf()
for phrase in self.phrases:
self._phrase_pmi_plk(phrase)
self.average_phrase_idf(phrase)
def get_sample(self, phrase):
if not self.sample_list:
return [self.pmi[phrase], self.plk[phrase], self.phrase_idf[phrase]]
else:
return self.phrases_ls[phrase]
def save_model(self, path):
persistence(self, path, 'phrase_model', 'save')
@classmethod
def load_model(cls, path):
return persistence(path, 'load')
class QualityPhrase(object):
def __init__(self):
self._model = None
@property
def model(self):
if self._model:
raise AttributeError('QualityPhrase object have not model!')
return self._model
@model.setter
def model(self, value):
self._model = value
@classmethod
def mapping_data(cls, phrases, phd):
return list(map(lambda x: phd.get(x[0], [0] * 3), phrases))
def obtain_train_phrase(self, train_phrases, phd):
x = self.mapping_data(train_phrases, phd)
y = list(map(lambda label: int(label[1]), train_phrases))
shuffle = np.random.permutation(len(train_phrases))
shuffle_x = np.array(x)[shuffle]
shuffle_y = np.array(y)[shuffle]
return shuffle_x, shuffle_y
@classmethod
def separate_data(cls, x, y, ratio=0):
assert len(x) == len(y)
if ratio:
return x, y
split_idx = int(len(x) * ratio)
train_x, test_x = x[:split_idx], x[split_idx:]
train_y, test_y = y[:split_idx], y[split_idx:]
return train_x, test_x, train_y, test_y
def predict_prob(self, test_phrases, phd):
test_x = list(map(lambda x: phd[x], test_phrases))
predict_data = self._model.predict_proba(test_x)
good_phrase, bad_phrase = [], []
for ind, phrase in enumerate(test_phrases):
if predict_data[ind][1] >= 0.7:
good_phrase.append(phrase)
else:
bad_phrase.append(phrase)
return good_phrase, bad_phrase
def save_model(self, path):
persistence(self.model, path, 'classifier_model', 'save')
@classmethod
def load_model(cls, path):
return persistence(path, 'load')
|
# note: qt and kivy use different i18n methods
# FIXME all these messages *cannot* be localized currently!
def to_rtf(msg):
return '\n'.join(['<p>' + x + '</p>' for x in msg.split('\n\n')])
MSG_CAPITAL_GAINS = """
This summary covers only on-chain transactions (no lightning!). Capital gains are computed by attaching an acquisition price to each UTXO in the wallet, and uses the order of blockchain events (not FIFO).
"""
|
import os
from instaloader import Instaloader, Post, Profile
from zipfile import ZipFile
class IGDownloader:
"""IGDownloader Class - takes login username and password as argument"""
def __init__(self, login_username, login_password):
self.loader = Instaloader()
self.loader.login(login_username, login_password)
self.loader.post_metadata_txt_pattern = ''
self.loader.save_metadata = False
self.loader.download_comments = False
def download_profile_picture(self, username: str) -> str:
"""Download profile picture for any Instagram profile
Keyword arguments:
username -- str
Return: image filename
"""
self.loader.download_pictures = True
self.loader.download_videos = False
self.profile = Profile.from_username(self.loader.context, username)
self.loader.download_profilepic(self.profile)
img_file_name = os.listdir(username)[0]
return img_file_name
def download_image(self, post_url: str):
"""Download post image from public accounts
Keyword arguments:
post_url -- str
Return: image filename or None
"""
self.loader.download_pictures = True
self.loader.download_videos = False
post_id = post_url.split('/')[4]
try:
post = Post.from_shortcode(self.loader.context, post_id)
if post.is_video:
return None
if post.mediacount > 1:
self.loader.download_post(post, post_id)
zfname = f'{post_id}.zip'
foo = ZipFile(zfname, 'w')
# Adding files from directory 'post_id'
for root, dirs, files in os.walk(f'{post_id}'):
for f in files:
if f.lower().endswith(('.png', 'jpg', '.jpeg', '.tiff', '.bmp', '.gif')):
foo.write(os.path.join(root, f))
os.remove(os.path.join(root, f))
foo.close()
os.removedirs(post_id)
return zfname
self.loader.download_pic(post_id, post.url, post.date_utc)
return f'{post_id}.jpg'
except Exception as e:
print(e)
return None
def download_video(self, video_url) -> str or None:
"""Download post image from public accounts
Keyword arguments:
video_url -- str
Return: video filename or None
"""
self.loader.download_pictures = False
self.loader.download_videos = True
self.loader.post_metadata_txt_pattern = ''
self.loader.save_metadata = False
self.loader.download_comments = False
video_id = video_url.split('/')[4]
try:
post = Post.from_shortcode(self.loader.context, video_id)
if not post.is_video:
return None
self.loader.download_post(post, video_id)
return video_id
except Exception as e:
print(e)
return None
def download_latest_stories(self, username: str):
"""Download post image from public accounts
Keyword arguments:
username -- str
Return: zip filename or None
"""
profile_id = self.loader.check_profile_id(username).userid
try:
self.loader.download_stories(userids=[profile_id], filename_target=username)
zfname = f'{username}.zip'
foo = ZipFile(zfname, 'w')
# Adding files from directory 'post_id'
for root, dirs, files in os.walk(username):
for f in files:
if not f.lower().startswith(('id')):
foo.write(os.path.join(root, f))
os.remove(os.path.join(root, f))
foo.close()
os.removedirs(username)
return zfname
except Exception as e:
print(e)
return None
|
# flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from fds.sdk.SPAREngine.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from fds.sdk.SPAREngine.model.account_directories import AccountDirectories
from fds.sdk.SPAREngine.model.calculation import Calculation
from fds.sdk.SPAREngine.model.calculation_status import CalculationStatus
from fds.sdk.SPAREngine.model.calculation_status_summary import CalculationStatusSummary
from fds.sdk.SPAREngine.model.calculation_unit_status import CalculationUnitStatus
from fds.sdk.SPAREngine.model.component_summary import ComponentSummary
from fds.sdk.SPAREngine.model.document_directories import DocumentDirectories
from fds.sdk.SPAREngine.model.frequency import Frequency
from fds.sdk.SPAREngine.model.pa_calculation_column import PACalculationColumn
from fds.sdk.SPAREngine.model.pa_calculation_group import PACalculationGroup
from fds.sdk.SPAREngine.model.pa_calculation_parameters import PACalculationParameters
from fds.sdk.SPAREngine.model.pa_date_parameters import PADateParameters
from fds.sdk.SPAREngine.model.pa_identifier import PAIdentifier
from fds.sdk.SPAREngine.model.pub_calculation_parameters import PubCalculationParameters
from fds.sdk.SPAREngine.model.pub_date_parameters import PubDateParameters
from fds.sdk.SPAREngine.model.pub_identifier import PubIdentifier
from fds.sdk.SPAREngine.model.spar_benchmark import SPARBenchmark
from fds.sdk.SPAREngine.model.spar_calculation_parameters import SPARCalculationParameters
from fds.sdk.SPAREngine.model.spar_date_parameters import SPARDateParameters
from fds.sdk.SPAREngine.model.spar_identifier import SPARIdentifier
from fds.sdk.SPAREngine.model.vault_calculation_parameters import VaultCalculationParameters
from fds.sdk.SPAREngine.model.vault_date_parameters import VaultDateParameters
from fds.sdk.SPAREngine.model.vault_identifier import VaultIdentifier
|
from kombu import Connection
from .resolver_types import RESOVLER_TYPE_MAP
from .resolver_types import DnsService
import toml
import os
import socketserver
import logging
class ClownFactory(object):
REQUIRED_BLOCKS = ['dnstap', 'dnsproviders']
REQUIRED_DNSTAP = ['dns_host', 'dns_port']
@classmethod
def parse(cls, config_file):
try:
os.stat(config_file)
except:
raise
config_dict = toml.load(config_file)
# TODO parse the dnstap configs
# TODO parse the subscribers and publishers
# TODO parse the DNS providers
# TODO parse the logging setup
# TODO create the clever-clown service
return ClownService()
class ClownService(object):
DEFAULT_NAME = 'clever-clown'
DEFAULT_RESOLVER = 'Google'
DEFAULT_HOST='0.0.0.0'
DEFAULT_PORT=53530
DEFAULT_RESOLVER = 'Google'
GV6 = '2001:4860:4860:0:0:0:0:8888'
GV4 = '8.8.8.8'
RESOLVERS = {DEFAULT_RESOLVER: DnsService(DEFAULT_RESOLVER, GV4)}
RESOLVERS_64 = {DEFAULT_RESOLVER: DnsServicev6(DEFAULT_RESOLVER, GV6)}
def __init__(self, name=DEFAULT_NAME, default_responder=DEFAULT_RESOLVER,
default_responder_64=DEFAULT_RESOLVER, query_response=False,
enabled_responders=[DEFAULT_RESOLVER, ],
enabled_responders_64=[DEFAULT_RESOLVER, ],
dns_host=DEFAULT_PORT, dns_port=DEFAULT_HOST,
dns_providers={}, publishers={}, subscripers={},
query_limit_per_pub=10):
self.dns_host = dns_host
self.dns_port = dns_port
self.subscripers = subscripers
self.publishers = publishers
self.enabled_responders = enabled_responders
if len(enabled_responders) == 0:
self.enabled_responders.append(DEFAULT_RESOLVER)
self.enabled_responders_64 = enabled_responders
if len(enabled_responders_64) == 0:
self.enabled_responders_64.append(DEFAULT_RESOLVER)
self.tcpclown = TCPClown(listen_port=dns_port, listen_host=dns_host)
self.udpclown = UDPClown(listen_port=dns_port, listen_host=dns_host)
self.query_limit_per_pub = query_limit_per_pub
def consumer_callback(self, pub, msg):
# TODO extract the domain name or IP address to look up
# TODO For each DNS service process the domain and save the results
if 'domains' in msg:
for d in msg['domains']:
pass
def consume(self):
# TODO iterate through all of the publishers and pull off N messages
# and process them with the callback
query_results = {}
for name, publisher in list(self.publisher.items()):
queries = publisher.recv_messages(cnt=self.query_limit_per_pub,
callback=self.consumer_callback)
query_results[name] = queries
return query_results
def publish(self, results):
# TODO publish query results to all relevant subscribers
pass
class Clown(socketserver.BaseRequestHandler):
DEFAULT_RESOLVER = 'Google'
RESOLVERS = {'Google': DnsService('google', '8.8.8.8')}
LISTEN_PORT = 5454
STORE_URI = "redis://127.0.0.1:6379"
STORE_QUEUE = "clever-clown-results"
RESPOND_TO_REQUEST = False
@classmethod
def configure(cls, default_resolver=DEFAULT_RESOLVER,
default_resolvers=RESOLVERS,
store_uri=STORE_URI,
store_queue=STORE_QUEUE, **kargs):
cls.LISTEN_PORT = kargs.get('listen_port', 5454)
cls.STORE_URI = store_uri
cls.STORE_QUEUE = store_queue
cls.DEFAULT_RESOLVER = default_resolver
for name, resolver in kargs.get('resolvers', []):
name = resolver.get('name', None)
type_ = resolver.get('type_', None)
if name is None or type_ is None:
raise Exception("Resolver name and type need to be set")
r_cls = RESOVLER_TYPE_MAP.get(type_, None)
if r_cls is None:
raise Exception("Invalid resolver specified")
r = r_cls(**resolver)
cls.RESOLVERS[name] = r
if cls.DEFAULT_RESOLVER not in cls.RESOLVERS:
n = list(cls.RESOLVERS.keys())[0]
cls.DEFAULT_RESOLVER = n
def handle(self):
host = self.client_address[0]
logging.debug("Handling request from %s" % host)
self.data = self.recv_request(self.request)
# send requests out to each resolver
responses = self.resolve_requests(self.data)
# etl response data
etl_data = self.response_etl(responses)
# send the data to logs
self.store_kombu(etl_data)
# TODO forward response to client
# XXX XXX XXX XXX
def resolve_requests(self, data):
# extract the specific DNS request
responses = {}
for r in self.RESOLVERS:
# wait for response?
response = r.send_request(data)
responses[r.name] = response
return responses
def response_etl(self, responses):
etl = {}
for name, response in list(responses.items()):
response_json = response.to_json()
for k, v in list(response_json.items()):
etl_key = '%s_%s' % (name, k)
etl[etl_key] = v
return etl
def store_kombu(self, etl_data):
logging.debug("Storing message in logstash queue")
try:
with Connection(self.STORE_URI) as conn:
q = conn.SimpleQueue(self.STORE_URI)
q.put(json.dumps(etl_data))
q.close()
except:
logging.debug("Storing message done")
def read_data(self):
raise Exception("Not implemented")
class TCPClown(Clown):
@classmethod
def recv_request(cls, sock):
data = sock.recv(8192)
if len(data) < 2:
raise Exception("Packet size too small")
sz = int(data[:2].encode('hex'), 16)
if sz > len(data) - 2:
while True:
if sz > len(data) - 2:
tmp = sock.recv(sz-len(data)-2)
if tmp == '':
break
data = data + tmp
if sz < len(data) - 2:
logging.debug("TCP packet under the specified size")
raise Exception("TCP packet under the specified size")
elif sz > len(data) - 2:
logging.debug("TCP packet over the specified size")
raise Exception("TCP packet over the specified size")
self.data = data
return data
@classmethod
def send_data(cls, client, sock, data):
return sock.sendall(data)
class UDPClown(Clown):
@classmethod
def recv_request(cls, sock):
return sock[0]
@classmethod
def send_data(cls, client, sock, data):
return sock.sendto(data, client)
|
import string
LOWERCASE_OFFSET = ord("a")
ALPHABET = string.ascii_lowercase[:16]
def b16_encode(plain):
enc = ""
for c in plain:
binary = "{0:08b}".format(ord(c))
enc += ALPHABET[int(binary[:4], 2)]
enc += ALPHABET[int(binary[4:], 2)]
return enc
def shift(c, k):
t1 = ord(c) - LOWERCASE_OFFSET
t2 = ord(k) - LOWERCASE_OFFSET
return ALPHABET[(t1 + t2) % len(ALPHABET)]
flag = "redacted"
key = "redacted"
assert all([k in ALPHABET for k in key])
assert len(key) == 1
b16 = b16_encode(flag)
enc = ""
for i, c in enumerate(b16):
enc += shift(c, key[i % len(key)])
print(enc)
|
#!/usr/bin/env python
'''
This file makes an docx (Office 2007) file from scratch, showing off most of python-docx's features.
If you need to make documents from scratch, use this file as a basis for your work.
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
'''
import shutil
from copy import deepcopy
from tempfile import TemporaryFile, mkdtemp
import docx as dx
if __name__ == '__main__':
temp_dir = mkdtemp()
# Default set of relationshipships - these are the minimum components of a document
relationships = dx.relationshiplist()
# Make a new document tree - this is the main part of a Word document
document = dx.newdocument()
# This xpath location is where most interesting content lives
docbody = document.xpath('/w:document/w:body', namespaces=dx.nsprefixes)[0]
# Append two headings and a paragraph
docbody.append(dx.heading('''Welcome to Python's docx module''',1) )
docbody.append(dx.heading('Make and edit docx in 200 lines of pure Python',2))
docbody.append(dx.paragraph('The module was created when I was looking for a Python support for MS Word .doc files on PyPI and Stackoverflow. Unfortunately, the only solutions I could find used:'))
# Add a numbered list
for point in ['''COM automation''','''.net or Java''','''Automating OpenOffice or MS Office''']:
docbody.append(dx.paragraph(point,style='ListNumber'))
docbody.append(dx.paragraph('''For those of us who prefer something simpler, I made docx.'''))
docbody.append(dx.heading('Making documents',2))
docbody.append(dx.paragraph('''The docx module has the following features:'''))
# Add some bullets
for point in ['Paragraphs','Bullets','Numbered lists','Multiple levels of headings','Tables','Document Properties']:
docbody.append(dx.paragraph(point,style='ListBullet'))
docbody.append(dx.paragraph('Tables are just lists of lists, like this:'))
# Append a table
docbody.append(dx.table([['A1','A2','A3'],['B1','B2','B3'],['C1','C2','C3']]))
docbody.append(dx.heading('Editing documents',2))
docbody.append(dx.paragraph('Thanks to the awesomeness of the lxml module, we can:'))
for point in ['Search and replace',
'Extract plain text of document',
'Add and delete items anywhere within the document']:
docbody.append(dx.paragraph(point, style='ListBullet'))
# Add an image
relationships, picpara = dx.picture(
relationships,
'image1.png',
'This is a test description',
temp_dir = temp_dir)
docbody.append(picpara)
docbody.append(dx.paragraph([
('hello', ''),
('2', [dx.makeelement('vertAlign', attributes={'val': 'superscript'})]),
]))
# Append a table with special properties and cells
spec_cell = dx.paragraph([
('2', [dx.makeelement('vertAlign', attributes={'val': 'superscript'})]),
])
t_prop_margin = dx.makeelement('tblCellMar')
for margin_type in ['top', 'left', 'right', 'bottom']:
t_prop_margin.append(dx.makeelement(margin_type, attributes={'w': '0', 'type': 'dxa'}))
CELL_SIZE = 12*30 # twenties of a point
docbody.append(dx.table([['A1',
{'content': spec_cell, 'style': {'vAlign': {'val': 'top'},
'shd': {'fill': '777777'}}},
('A3', 'ttt')],
['B1','B2','B3'],
['C1','C2','C3']],
heading=False,
colw=[CELL_SIZE]*3,
cwunit='dxa', # twenties of a point
borders={'all': {'color': 'AAAAAA'}},
celstyle=[{'align': 'center', 'vAlign': {'val': 'center'}}]*3,
rowstyle={'height': CELL_SIZE},
table_props={'jc': {'val': 'center'},
'__margin__': t_prop_margin,
},
))
# Search and replace
print('Searching for something in a paragraph ...',)
if dx.search(docbody, 'the awesomeness'):
print('found it!')
else:
print('nope.')
print('Searching for something in a heading ...',)
if dx.search(docbody, '200 lines'):
print('found it!')
else:
print('nope.')
print('Replacing ...',)
docbody = dx.replace(docbody,'the awesomeness','the goshdarned awesomeness')
print('done.')
# Add a pagebreak
docbody.append(dx.pagebreak(type='page', orient='portrait'))
docbody.append(dx.heading('Ideas? Questions? Want to contribute?',2))
docbody.append(dx.paragraph('''Email <python.docx@librelist.com>'''))
# Create our properties, contenttypes, and other support files
coreprops = dx.coreproperties(
title='Python docx demo',
subject='A practical example of making docx from Python',
creator='Mike MacCana',
keywords=['python','Office Open XML','Word'])
appprops = dx.appproperties()
my_contenttypes = dx.contenttypes()
my_websettings = dx.websettings()
my_wordrelationships = dx.wordrelationships(relationships)
# Save our document
temp_file = TemporaryFile()
dx.savedocx(
document,
coreprops,
appprops,
my_contenttypes,
my_websettings,
my_wordrelationships,
# can be either file-like object, or file name:
output=temp_file,
temp_dir=temp_dir,
)
temp_file.seek(0)
print("temp dir is:", temp_dir)
shutil.rmtree(temp_dir)
print("temp dir deleted")
f = open('example.docx', 'w+b')
f.write(temp_file.read())
f.close()
|
"""
The ordinal directions.
A collection of normalized vectors to be referenced by name.
Best used for the positions or facings of :class:`Sprites <ppb.Sprite>`.
"""
from ppb_vector import Vector
Up = Vector(0, 1).normalize() #: Unit vector to the top of the screen from center.
Down = Vector(0, -1).normalize() #: Unit vector to the bottom of the screen from center.
Left = Vector(-1, 0).normalize() #: Unit vector to the left of the screen from center.
Right = Vector(1, 0).normalize() #: Unit vector to the right of the screen from center.
UpAndLeft = (Up + Left).normalize() #: Unit vector diagonally up and to the left of the screen from center.
UpAndRight = (Up + Right).normalize() #: Unit vector diagonally up and to the right of the screen from center.
DownAndLeft = (Down + Left).normalize() #: Unit vector diagonally down and to the left of the screen from center.
DownAndRight = (Down + Right).normalize() #: Unit vector diagonally down and to the right of the screen from center.
|
import json
import random
WEEKDAYS = {
"mon": 'Понедельник',
"tue": 'Вторник',
"wed": 'Среда',
"thu": 'Четверг',
"fri": 'Пятница',
"sat": 'Суббота',
"sun": 'Воскресенье'
}
ICONS = {
"travel": "⛱",
"study": "🏫",
"work": "🏢",
"relocate": "🚜",
"programming": "🎮"
}
def read_json(json_path):
with open(json_path, 'r', encoding='utf-8') as file:
contents = file.read()
if contents:
return json.loads(contents)
return []
def write_json(json_path, data):
with open(json_path, 'w', encoding='utf-8') as file:
json.dump(data, file)
def get_profile_from_json_by_id(id, json_path):
profiles = read_json(json_path)
for profile in profiles:
if profile['id'] == id:
return profile
def get_profile_goals(profile, json_path):
goals = read_json(json_path)
profile_goals = []
for goal in profile['goals']:
profile_goals.append(goals[goal])
return profile_goals
def get_free_profile_hours(profile):
free_hours = {}
for day, hours in profile['free'].items():
day_hour = {}
for hour, possibility in hours.items():
if possibility:
day_hour[hour] = possibility
free_hours[day] = day_hour
return free_hours
def write_lesson_to_json(id, weekday, time, json_path):
profiles = read_json(json_path)
for profile in profiles:
if profile['id'] == id:
profile['free'][weekday][time] = False
break
write_json(json_path, profiles)
def get_goals_for_request_form(json_path):
goals = read_json(json_path)
return [(key, value) for key, value in goals.items()]
def write_request_to_json(goal, time, name, phone, json_path):
requests = read_json(json_path)
requests.append({
'goal': goal,
'time': time,
'name': name,
'phone': phone,
})
write_json(json_path, requests)
def get_profiles_by_goal(goal, json_path):
profiles = read_json(json_path)
return [profile for profile in profiles if goal in profile['goals']]
def get_random_profiles_from_file(number, json_path):
profiles = read_json(json_path)
return random.sample(profiles, number)
|
# -*- coding: utf-8 -*-
"""
Various utilities for processing physiological data. These should not be called
directly but should support wrapper functions stored in `peakdet.operations`.
"""
from functools import wraps
import inspect
import numpy as np
from peakdet import physio
def make_operation(*, exclude=None):
"""
Wrapper to make functions into Physio operations
Wrapped functions should accept a :class:`peakdet.Physio` instance, `data`,
as their first parameter, and should return a :class:`peakdet.Physio`
instance
Parameters
----------
exclude : list, optional
What function parameters to exclude from being stored in history.
Default: 'data'
"""
def get_call(func):
@wraps(func)
def wrapper(data, *args, **kwargs):
# exclude 'data', by default
ignore = ['data'] if exclude is None else exclude
# grab parameters from `func` by binding signature
name = func.__name__
sig = inspect.signature(func)
params = sig.bind(data, *args, **kwargs).arguments
# actually run function on data
data = func(data, *args, **kwargs)
# it shouldn't be, but don't bother appending to history if it is
if data is None:
return data
# get parameters and sort by key name, dropping ignored items and
# attempting to coerce any numpy arrays or pandas dataframes (?!)
# into serializable objects; this isn't foolproof but gets 80% of
# the way there
provided = {k: params[k] for k in sorted(params.keys())
if k not in ignore}
for k, v in provided.items():
if hasattr(v, 'tolist'):
provided[k] = v.tolist()
# append everything to data instance history
data._history += [(name, provided)]
return data
return wrapper
return get_call
def _get_call(*, exclude=None, serializable=True):
"""
Returns calling function name and dict of provided arguments (name : value)
Parameters
----------
exclude : list, optional
What arguments to exclude from provided argument : value dictionary.
Default: ['data']
serializable : bool, optional
Whether to coerce argument values to JSON serializable form. Default:
True
Returns
-------
function: str
Name of calling function
provided : dict
Dictionary of function arguments and provided values
"""
exclude = ['data'] if exclude is None else exclude
if not isinstance(exclude, list):
exclude = [exclude]
# get one function call up the stack (the bottom is _this_ function)
calling = inspect.stack(0)[1]
frame, function = calling.frame, calling.function
# get all the args / kwargs from the calling function
argspec = inspect.getfullargspec(frame.f_globals[function])
args = sorted(argspec.args + argspec.kwonlyargs)
# save arguments + argument values for everything not in `exclude`
provided = {k: frame.f_locals[k] for k in args if k not in exclude}
# if we want `provided` to be serializable, we can do a little cleaning up
# this is NOT foolproof, but will coerce numpy arrays to lists which tends
# to be the main issue with these sorts of things
if serializable:
for k, v in provided.items():
if hasattr(v, 'tolist'):
provided[k] = v.tolist()
return function, provided
def check_physio(data, ensure_fs=True, copy=False):
"""
Checks that `data` is in correct format (i.e., `peakdet.Physio`)
Parameters
----------
data : Physio_like
ensure_fs : bool, optional
Raise ValueError if `data` does not have a valid sampling rate
attribute.
copy: bool, optional
Whether to return a copy of the provided data. Default: False
Returns
-------
data : peakdet.Physio
Loaded physio object
Raises
------
ValueError
If `ensure_fs` is set and `data` doesn't have valid sampling rate
"""
from peakdet.io import load_physio
if not isinstance(data, physio.Physio):
data = load_physio(data)
if ensure_fs and np.isnan(data.fs):
raise ValueError('Provided data does not have valid sampling rate.')
if copy is True:
return new_physio_like(data, data.data,
copy_history=True,
copy_metadata=True)
return data
def new_physio_like(ref_physio, data, *, fs=None, dtype=None,
copy_history=True, copy_metadata=True):
"""
Makes `data` into physio object like `ref_data`
Parameters
----------
ref_physio : Physio_like
Reference `Physio` object
data : array_like
Input physiological data
fs : float, optional
Sampling rate of `data`. If not supplied, assumed to be the same as
in `ref_physio`
dtype : data_type, optional
Data type to convert `data` to, if conversion needed. Default: None
copy_history : bool, optional
Copy history from `ref_physio` to new physio object. Default: True
Returns
-------
data : peakdet.Physio
Loaded physio object with provided `data`
"""
if fs is None:
fs = ref_physio.fs
if dtype is None:
dtype = ref_physio.data.dtype
history = list(ref_physio.history) if copy_history else []
metadata = dict(**ref_physio._metadata) if copy_metadata else None
# make new class
out = ref_physio.__class__(np.array(data, dtype=dtype),
fs=fs, history=history, metadata=metadata)
return out
def check_troughs(data, peaks):
"""
Confirms that `troughs` exists between every set of `peaks` in `data`
Parameters
----------
data : array-like
Input data for which `troughs` and `peaks` were detected
peaks : array-like
Indices of suspected peak locations in `data`
Returns
-------
troughs : np.ndarray
Indices of trough locations in `data`, dependent on `peaks`
"""
all_troughs = np.zeros(peaks.size - 1, dtype=int)
for f in range(peaks.size - 1):
dp = data[peaks[f]:peaks[f + 1]]
idx = peaks[f] + np.argwhere(dp == dp.min())[0]
all_troughs[f] = idx
return all_troughs
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from oslo_config import cfg
from magnum.i18n import _
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ['23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz'] # Removed: l
utils_opts = [
cfg.StrOpt('rootwrap_config',
default="/etc/magnum/rootwrap.conf",
help='Path to the rootwrap configuration file to use for '
'running commands as root.'),
cfg.StrOpt('tempdir',
help='Explicitly specify the temporary working directory.'),
cfg.ListOpt('password_symbols',
default=DEFAULT_PASSWORD_SYMBOLS,
help='Symbols to use for passwords')
]
periodic_opts = [
cfg.IntOpt('service_down_time',
default=180,
help='Max interval size between periodic tasks execution in '
'seconds.'),
]
urlfetch_opts = [
cfg.IntOpt('max_manifest_size',
default=524288,
help=_('Maximum raw byte size of any manifest.'))
]
ALL_OPTS = list(itertools.chain(
utils_opts,
periodic_opts,
urlfetch_opts
))
def register_opts(conf):
conf.register_opts(ALL_OPTS)
def list_opts():
return {
"DEFAULT": ALL_OPTS
}
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
app.logger.debug('A value for debugging')
app.logger.warning('A warning occurred')
app.logger.error('An error occurrd')
return "Hello Workd"
if __name__ == '__main__':
app.run(debug=True)
|
#!/usr/bin/env python
#
# Map weather service icon codes to my limited set held on the e-ink device
#
def interpret_icons(service,id):
if service == "openweathermap":
icons = {
"200": {
"label": "thunderstorm with light rain",
"icon": "RAIN"
},
"201": {
"label": "thunderstorm with rain",
"icon": "RAIN"
},
"202": {
"label": "thunderstorm with heavy rain",
"icon": "RAIN"
},
"210": {
"label": "light thunderstorm",
"icon": "RAIN"
},
"211": {
"label": "thunderstorm",
"icon": "RAIN"
},
"212": {
"label": "heavy thunderstorm",
"icon": "RAIN"
},
"221": {
"label": "ragged thunderstorm",
"icon": "RAIN"
},
"230": {
"label": "thunderstorm with light drizzle",
"icon": "RAIN"
},
"231": {
"label": "thunderstorm with drizzle",
"icon": "RAIN"
},
"232": {
"label": "thunderstorm with heavy drizzle",
"icon": "RAIN"
},
"300": {
"label": "light intensity drizzle",
"icon": "RAIN"
},
"301": {
"label": "drizzle",
"icon": "RAIN"
},
"302": {
"label": "heavy intensity drizzle",
"icon": "RAIN"
},
"310": {
"label": "light intensity drizzle rain",
"icon": "RAIN"
},
"311": {
"label": "drizzle rain",
"icon": "RAIN"
},
"312": {
"label": "heavy intensity drizzle rain",
"icon": "RAIN"
},
"313": {
"label": "shower rain and drizzle",
"icon": "RAIN"
},
"314": {
"label": "heavy shower rain and drizzle",
"icon": "RAIN"
},
"321": {
"label": "shower drizzle",
"icon": "RAIN"
},
"500": {
"label": "light rain",
"icon": "RAIN"
},
"501": {
"label": "moderate rain",
"icon": "RAIN"
},
"502": {
"label": "heavy intensity rain",
"icon": "RAIN"
},
"503": {
"label": "very heavy rain",
"icon": "RAIN"
},
"504": {
"label": "extreme rain",
"icon": "RAIN"
},
"511": {
"label": "freezing rain",
"icon": "RAIN"
},
"520": {
"label": "light intensity shower rain",
"icon": "RAIN"
},
"521": {
"label": "shower rain",
"icon": "RAIN"
},
"522": {
"label": "heavy intensity shower rain",
"icon": "RAIN"
},
"531": {
"label": "ragged shower rain",
"icon": "RAIN"
},
"600": {
"label": "light snow",
"icon": "RAIN"
},
"601": {
"label": "snow",
"icon": "RAIN"
},
"602": {
"label": "heavy snow",
"icon": "RAIN"
},
"611": {
"label": "sleet",
"icon": "RAIN"
},
"612": {
"label": "shower sleet",
"icon": "RAIN"
},
"615": {
"label": "light rain and snow",
"icon": "RAIN"
},
"616": {
"label": "rain and snow",
"icon": "RAIN"
},
"620": {
"label": "light shower snow",
"icon": "RAIN"
},
"621": {
"label": "shower snow",
"icon": "RAIN"
},
"622": {
"label": "heavy shower snow",
"icon": "RAIN"
},
"701": {
"label": "mist",
"icon": "RAIN"
},
"711": {
"label": "smoke",
"icon": "CLOUD"
},
"721": {
"label": "haze",
"icon": "MOST"
},
"731": {
"label": "sand, dust whirls",
"icon": "MOST"
},
"741": {
"label": "fog",
"icon": "MOST"
},
"751": {
"label": "sand",
"icon": "MOST"
},
"761": {
"label": "dust",
"icon": "MOST"
},
"762": {
"label": "volcanic ash",
"icon": "MOST"
},
"771": {
"label": "squalls",
"icon": "MIXED"
},
"781": {
"label": "tornado",
"icon": "MIXED"
},
"800": {
"label": "clear sky",
"icon": "SUN"
},
"801": {
"label": "few clouds",
"icon": "CLOUD"
},
"802": {
"label": "scattered clouds",
"icon": "MOST"
},
"803": {
"label": "broken clouds",
"icon": "MOST"
},
"804": {
"label": "overcast clouds",
"icon": "CLOUD"
},
"900": {
"label": "tornado",
"icon": "MIXED"
},
"901": {
"label": "tropical storm",
"icon": "MIXED"
},
"902": {
"label": "hurricane",
"icon": "MIXED"
},
"903": {
"label": "cold",
"icon": "MIXED"
},
"904": {
"label": "hot",
"icon": "SUN"
},
"905": {
"label": "windy",
"icon": "MIXED"
},
"906": {
"label": "hail",
"icon": "MIXED"
},
"951": {
"label": "calm",
"icon": "SUN"
},
"952": {
"label": "light breeze",
"icon": "MIXED"
},
"953": {
"label": "gentle breeze",
"icon": "MIXED"
},
"954": {
"label": "moderate breeze",
"icon": "MIXED"
},
"955": {
"label": "fresh breeze",
"icon": "MIXED"
},
"956": {
"label": "strong breeze",
"icon": "MIXED"
},
"957": {
"label": "high wind, near gale",
"icon": "MIXED"
},
"958": {
"label": "gale",
"icon": "MIXED"
},
"959": {
"label": "severe gale",
"icon": "MIXED"
},
"960": {
"label": "storm",
"icon": "MIXED"
},
"961": {
"label": "violent storm",
"icon": "MIXED"
},
"962": {
"label": "hurricane",
"icon": "MIXED"
}
}
elif service == "weatherunderground":
icons = {
"partlycloudy": {
"label": "Partly cloudy",
"icon": "MIXED"
},
"mostlycloudy": {
"label": "Mostly cloudy",
"icon": "MOST"
},
"clear": {
"label": "Clear",
"icon": "SUN"
},
"clear": {
"label": "Clear",
"icon": "SUN"
},
"cloudy": {
"label": "Cloudy",
"icon": "CLOUD"
}
}
else:
print("Error: bad service selected")
return(icons[id])
|
from django.contrib import admin
from .models import Acta, Grupo, Lote, Rodado, Subasta, Tipo
class ActaAdmin(admin.ModelAdmin):
filter_horizontal = ('profesionales', )
class GrupoAdmin(admin.ModelAdmin):
list_display = ['subasta',
'numero',
'martillero']
class RodadoAdmin(admin.ModelAdmin):
list_display = [
"lote",
"tipo",
"numero_inventario",
"descripcion",
"modelo",
"chasis",
"motor",
"dominio",
"marca",
"anio",
"precio_base",
"precio_venta",
"subastado",
]
class SubastaAdmin(admin.ModelAdmin):
list_display = [
'numero',
'fecha_hora',
'cerrado_el',
'decreto',
'domicilio',
]
filter_horizontal = ('personas', 'profesionales')
admin.site.register(Acta, ActaAdmin)
admin.site.register(Grupo, GrupoAdmin)
admin.site.register(Lote)
admin.site.register(Rodado, RodadoAdmin)
admin.site.register(Subasta, SubastaAdmin)
admin.site.register(Tipo)
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
from tools.utils import Timer
from torch.utils.data.sampler import Sampler
from torch._six import int_classes as _int_classes
class SceneBatchSampler(Sampler):
def __init__(self, sampler, batch_size, drop_last, \
train=True, strategy='uniform_viewpoints'):
if not isinstance(sampler, Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
assert strategy == 'uniform_viewpoints'
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
self.strategy = strategy
self.train = train
self.restrict_seq = None
def __iter__(self):
batch = []
for idx,_ in enumerate(self.sampler):
ii = idx % self.batch_size
if ii==0:
sample_fun = {
'uniform_viewpoints': self.sample_batch_vp_diff,
}[self.strategy]
with Timer(name='batch_sample', quiet=True):
batch, seq = sample_fun(idx)
if ii==(self.batch_size-1):
yield batch
batch = []
def _get_dataset_yaws(self):
dataset = self.sampler.data_source
rots = dataset.nrsfm_model_outputs['phi']['R']
pr_axes = rots[:, -1, :]
up = torch.svd(pr_axes)[2][:, -1]
x = torch.cross(up, torch.tensor([0., 0., 1.]))
x = x / x.norm()
y = torch.cross(x, up)
y = y / y.norm()
x_c = torch.matmul(pr_axes, x)
y_c = torch.matmul(pr_axes, y)
yaw = torch.atan2(x_c, y_c)
return yaw
def sample_batch_vp_diff(self, idx):
dataset = self.sampler.data_source
# get the cached log rots
assert (
hasattr(dataset, 'nrsfm_model_outputs') and
dataset.nrsfm_model_outputs is not None
), 'make sure to set cfg.annotate_with_c3dpo_outputs=True'
yaws = self._get_dataset_yaws()
hist, edges = np.histogram(yaws, bins=16)
bins = (yaws.cpu().data.numpy().reshape(-1, 1) > edges[1:]).sum(axis=1)
weights = 1. / hist[bins]
weights /= weights.sum()
pivot = np.random.choice(np.arange(len(dataset.db)), p=weights)
seq = dataset.dbT['seq'][pivot]
rots = dataset.nrsfm_model_outputs['phi']['R']
seqs = rots.new_tensor(dataset.dbT['seq'], dtype=torch.int64)
# convert bool array to indices
okdata = (seqs != seqs[pivot]).nonzero().view(-1).tolist()
for o in okdata:
assert o <= len(dataset.db), \
'%d out of range (%d)!' % (o, len(dataset.db))
if len(okdata) >= (self.batch_size-1):
replace = False
else:
replace = True
if len(okdata)==0:
print('no samples!!')
okdata = list(range(len(dataset.db)))
if weights is not None: # cross with okdata:
weights = weights[okdata] / weights[okdata].sum()
sample = np.random.choice(okdata, \
self.batch_size-1, replace=replace, p=weights).tolist()
sample.insert(0, pivot)
for si, s in enumerate(sample):
assert s < len(dataset.db), \
'%d out of range (%d)!' % (s, len(dataset.db))
return sample, seq
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
|
import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o
import h2o_hosts
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test1(self):
for x in xrange (1,2000,1):
if ((x % 100) == 0):
sys.stdout.write('.')
sys.stdout.flush()
trialString = "Trial" + str(x)
trialStringXYZ = "Trial" + str(x) + "XYZ"
put = h2o.nodes[0].put_value(trialString, key=trialStringXYZ, repl=None)
if __name__ == '__main__':
h2o.unit_main()
|
import fnmatch
from functools import partial
import click
import storefact
import yaml
from kartothek.api.discover import discover_cube
__all__ = ("filter_items", "get_cube", "get_store", "to_bold", "to_header")
def get_cube(store, uuid_prefix):
"""
Get cube from store.
Parameters
----------
uuid_prefix: str
Dataset UUID prefix.
store: Union[Callable[[], simplekv.KeyValueStore], simplekv.KeyValueStore]
KV store.
Returns
-------
cube: Cube
Cube specification.
datasets: Dict[str, kartothek.core.dataset.DatasetMetadata]
All discovered datasets.
Raises
------
click.UsageError
In case cube was not found.
"""
try:
return discover_cube(uuid_prefix, store)
except ValueError as e:
raise click.UsageError("Could not load cube: {e}".format(e=e))
def get_store(skv, store):
"""
Get simplekv store from storefact config file.
Parameters
----------
skv: str
Name of the storefact yaml. Normally ``'skv.yml'``.
store: str
ID of the store.
Returns
-------
store_factory: Callable[[], simplekv.KeyValueStore]
Store object.
Raises
------
click.UsageError
In case something went wrong.
"""
try:
with open(skv, "rb") as fp:
store_cfg = yaml.safe_load(fp)
except IOError as e:
raise click.UsageError("Could not open load store YAML: {e}".format(e=e))
except yaml.YAMLError as e:
raise click.UsageError("Could not parse provided YAML file: {e}".format(e=e))
if store not in store_cfg:
raise click.UsageError(
"Could not find store {store} in {skv}".format(store=store, skv=skv)
)
return partial(storefact.get_store, **store_cfg[store])
def _match_pattern(what, items, pattern):
"""
Match given pattern against given items.
Parameters
----------
what: str
Describes what is filterd.
items: Iterable[str]
Items to be filtered
include_pattern: str
Comma separated items which should be included. Can contain glob patterns.
"""
result = set()
for part in pattern.split(","):
found = set(fnmatch.filter(items, part.strip()))
if not found:
raise click.UsageError(
"Could not find {what} {part}".format(what=what, part=part)
)
result |= found
return result
def filter_items(what, items, include_pattern=None, exclude_pattern=None):
"""
Filter given string items based on include and exclude patterns
Parameters
----------
what: str
Describes what is filterd.
items: Iterable[str]
Items to be filtered
include_pattern: str
Comma separated items which should be included. Can contain glob patterns.
exclude_pattern: str
Comma separated items which should be excluded. Can contain glob patterns.
Returns
-------
filtered_datasets: Set[str]
Filtered set of items after applying include and exclude patterns
"""
items = set(items)
if include_pattern is not None:
include_datasets = _match_pattern(what, items, include_pattern)
else:
include_datasets = items
if exclude_pattern is not None:
exclude_datasets = _match_pattern(what, items, exclude_pattern)
else:
exclude_datasets = set()
return include_datasets - exclude_datasets
def to_header(s):
"""
Create header.
Parameters
----------
s: str
Header content.
Returns
-------
s: str
Header content including terminal escpae sequences.
"""
return click.style(s, bold=True, underline=True, fg="yellow")
def to_bold(s):
"""
Create bold text.
Parameters
----------
s: str
Bold text content.
Returns
-------
s: str
Given text including terminal escpae sequences.
"""
return click.style(s, bold=True)
|
import os
import csv
import pandas
from sklearn.svm import LinearSVC
from sklearn import linear_model, metrics
from sklearn.model_selection import train_test_split
from scipy.sparse import csr_matrix
from questionparser import QuestionParser
CORPUS_DIR = os.path.join(os.path.dirname(__file__), 'corpus')
def compare_model(train_file, test_file):
train_data = pandas.read_csv(train_file)
labels = train_data.pop('Class')
train_data.pop('Question')
test_data = pandas.read_csv(test_file)
test_labels = test_data.pop('Class')
test_data.pop('Question')
X_train, X_test = transform_data_matrix(train_data, test_data)
model = linear_model.LogisticRegression(multi_class='multinomial', solver='newton-cg').fit(X_train, labels)
print("train accuracy: {}".format(metrics.accuracy_score(labels, model.predict(X_train))))
print("test accuracy: {}".format(metrics.accuracy_score(test_labels, model.predict(X_test))))
lin = LinearSVC()
svm_model = lin.fit(X_train, labels)
print("svm train accuracy: {}".format(metrics.accuracy_score(labels, svm_model.predict(X_train))))
print("svm test accuracy: {}".format(metrics.accuracy_score(test_labels, svm_model.predict(X_test))))
# train_x, test_x, train_y, test_y = train_test_split(wh_data, labels, train_size=0.8)
# model = linear_model.LogisticRegression(multi_class='multinomial', solver='newton-cg').fit(train_x, train_y)
# print("train accuracy: {}".format(metrics.accuracy_score(train_y, model.predict(train_x))))
# print("test accuracy: {}".format(metrics.accuracy_score(test_y, model.predict(test_x))))
# lin = LinearSVC()
# svm_model = lin.fit(train_x, train_y)
# print("svm train accuracy: {}".format(metrics.accuracy_score(train_y, svm_model.predict(train_x))))
# print("svm test accuracy: {}".format(metrics.accuracy_score(test_y, svm_model.predict(test_x))))
def build_data(input_file, output_file):
original = ["Question", "Class", "Head"]
pos_dep = ["Head_POS", "Neck_Label", "Root_POS", "Syntax"]
entity = ["PER", "LOC", "OBJ", "TEM", "NUM"]
original.extend(pos_dep)
original.extend(entity)
header = original
with open(input_file, encoding = "ISO-8859-1") as train, open(output_file, "w") as feature:
writer = csv.writer(feature)
writer.writerow(header)
for line in train:
line = line.split(":")
label = line[0]
question = " ".join(line[1].split()[1:])
print(question)
qpp = QuestionParser(question)
qpp.parse()
result = [question, label, qpp.get_head(),
qpp.get_word('head'), qpp.get_word('neck'), qpp.get_word('root'), " ".join(qpp.get_syntax()),
qpp.has_entity('per'), qpp.has_entity('loc'), qpp.has_entity('obj'), qpp.has_entity('tem'), qpp.has_entity('num')]
print(result)
writer.writerow(result)
def add_rating_data(rating_file, training_file):
with open(rating_file, "r") as train, open(os.path.join(CORPUS_DIR, 'all_corpus_2_copy.csv'), "a") as feature:
reader = csv.reader(train)
next(reader, None)
writer = csv.writer(feature)
for line in reader:
rating = line[2]
if rating == "T":
question = line[0]
label = line[1]
qpp = QuestionParser(question)
qpp.parse()
result = [question, label, qpp.get_head(),
qpp.get_word('head'), qpp.get_word('neck'), qpp.get_word('root'), " ".join(qpp.get_syntax()),
qpp.has_entity('per'), qpp.has_entity('loc'), qpp.has_entity('obj'), qpp.has_entity('tem'), qpp.has_entity('num')]
writer.writerow(result)
def train_data_matrix(X_train):
X_train = pandas.get_dummies(X_train)
X_train_columns = list(X_train.columns)
trans_data_train = {}
for col in X_train_columns:
if col not in X_train:
trans_data_train[col] = [0 for i in range(len(X_train.index))]
else:
trans_data_train[col] = list(X_train[col])
XT_train = pandas.DataFrame(trans_data_train)
XT_train = csr_matrix(XT_train)
return XT_train
def transform_data_matrix(X_train, X_predict):
X_train = pandas.get_dummies(X_train)
X_predict = pandas.get_dummies(X_predict)
X_train_columns = list(X_train.columns)
X_predict_columns = list(X_predict.columns)
X_trans_columns = list(set(X_train_columns + X_predict_columns))
trans_data_train = {}
for col in X_trans_columns:
if col not in X_train:
trans_data_train[col] = [0 for i in range(len(X_train.index))]
else:
trans_data_train[col] = list(X_train[col])
XT_train = pandas.DataFrame(trans_data_train)
XT_train = csr_matrix(XT_train)
trans_data_predict = {}
for col in X_trans_columns:
if col not in X_predict:
trans_data_predict[col] = [0 for i in range(len(X_predict.index))]
else:
trans_data_predict[col] = list(X_predict[col]) # KeyError
XT_predict = pandas.DataFrame(trans_data_predict)
XT_predict = csr_matrix(XT_predict)
return XT_train, XT_predict
# if __name__ == "__main__":
# # input_file = os.path.join(CORPUS_DIR, 'train_5000.label.txt')
# # output_file = os.path.join(CORPUS_DIR, 'temp_features.csv')
# # build_data(input_file, output_file)
# train = os.path.join(CORPUS_DIR, 'train_5500_features.csv')
# test = os.path.join(CORPUS_DIR, 'TREC_10_features.csv')
# compare_model(train, test)
# rating_file = os.path.join(CORPUS_DIR, 'rating.csv')
# # training_file =
# # add_rating_data(rating_file, training_file)
|
import requests,subprocess,copy,time
import boto3, json
import sys,os
from netaddr import *
from requests.packages.urllib3 import Retry
from kubernetes.client.rest import ApiException
from kubernetes import client, config
from pprint import pprint
from ipassignhelper import *
### Class, representing a worker node for EKS
class WorkerNodeManager(object):
def __init__(self):
###Featch instanceId, hostname AZ and region details
data = getInstanceMetadata()
tprint(data)
self.instance_id = data[0]
self.region = data[1]
self.hostname=data[2]
self.az=data[3]
self.NetworkingData={}
self.ec2ClientArr={}
self.tags={}
self.clusterName=None
if not self.instance_id or not self.region or not self.hostname:
Exception("WorkerNodeManager: __init__" + " failed to get instanceid,hostname region data from worker metadata")
ec2_client = boto3.client('ec2', region_name=self.region)
### Featch ENIs on the instance and corresponding ipv4 cidrs
get_instanceDetails(ec2_client,self.instance_id,self.NetworkingData)
for cidr in self.NetworkingData:
k = boto3.client('ec2', region_name=self.region)
self.ec2ClientArr[cidr] = k
### Featch instance Tags
get_instanceTags(ec2_client,self.instance_id,self.tags)
self.clusterName=getEKSClusterNameFromTag(self.tags)
def getInstaceId(self):
return self.instance_id
def getRegion(self):
return self.region
def getHostname(self):
return self.hostname
def getEKSClusterName(self):
return self.clusterName
def getNetworkingData(self):
return self.NetworkingData
def getEc2ClientArr(self):
return self.ec2ClientArr
### Class representing Kubenrnetes/EKS manager & fetches kubernetes/EKS related details
# This class is used to run kubernetes commands on the EKS cluster
class Kubernetesmanager(object):
def __init__(self,region,cluster,roleARN=None):
self.region=region
self.cluster= cluster
### update the kubeconfig with EKS cluster details
if roleARN:
kubeconfigcmd="aws eks --region " + self.region+ " update-kubeconfig --name "+ self.cluster + " --role-arn " + roleARN
else:
kubeconfigcmd="aws eks --region " + self.region+ " update-kubeconfig --name "+ self.cluster
tprint("Kubernetesmanager:__init__","EKS command:" + kubeconfigcmd)
output = shell_run_cmd_old(kubeconfigcmd)
if output:
contexts, active_context = config.list_kube_config_contexts()
if not active_context:
raise Exception("Kubernetesmanager:__init__","Fatal!!! couldnt set active kubernetes context, kubeconfig is not set")
else:
raise Exception("Kubernetesmanager:__init__","Fatal!!! Got Error for " + kubeconfigcmd)
self.active_context = active_context
config.load_kube_config()
self.api_instance = client.CoreV1Api()
### function to refresh the kubeconfig token
def refresh(self):
config.load_kube_config()
self.api_instance = client.CoreV1Api()
### function to fetch the multus IP addresses & the mac-addresses from a pod (with a namespace) which has the multus annotations
# k8s.v1.cni.cncf.io/networks-status
def getMultusIps(self,name,namespace,ipAddress):
pretty = 'pretty_example' # str | If 'true', then the output is pretty printed. (optional)
try:
api_response = self.api_instance.read_namespaced_pod(name, namespace, pretty=pretty)
js = json.loads(api_response.metadata.annotations['k8s.v1.cni.cncf.io/networks-status'])
for i in js:
if 'interface' in i:
for j in i['ips'] :
ipAddress[j] = i['mac']
except ApiException as e:
tprint("Kubernetesmanager:getMultusIps","Exception when calling CoreV1Api->read_namespaced_pod: %s\n" % e)
config.load_kube_config()
self.api_instance = client.CoreV1Api()
### class to handle Multus IP addresses and related handlings
class MultusHandler(object):
def __init__(self,workerName,region,cluster,roleARN=None):
self.workerName=workerName
self.myKbernetesmgr=Kubernetesmanager(region=region,cluster=cluster,roleARN=roleARN)
self.cmd="kubectl get pods -o=jsonpath='{range .items[?(@.metadata.annotations.k8s\.v1\.cni\.cncf\.io/networks)]}{.metadata.name}{\" \"}{@.metadata.namespace}{\" \"}{@.spec.nodeName}{\"\\n\"}' "
self.multusNadcmd="kubectl get net-attach-def -o=jsonpath='{range .items[*]}{.metadata.name}{\" \"}{.metadata.namespace}{\"\\n\"}' "
self.multusPods={}
self.multsNads={}
self.multusNs=set()
def refresh(self):
self.myKbernetesmgr.refresh()
### Function to get multus NetworkAttachmentDefinitions across all the namespaces/given namespace
def getMultusNads(self,ns="--all-namespaces"):
try:
if ns=="--all-namespaces":
output = shell_run_cmd_old(self.multusNadcmd + "--all-namespaces") #shell_run_cmd_old(cmd,retCode)
else:
output = shell_run_cmd_old(self.multusNadcmd + " -n "+ ns) #shell_run_cmd_old(cmd,retCode)
if output:
output = output.rstrip()
allNadList=output.split("\n")
self.multsNads.clear()
for line in allNadList:
data=line.split(" ")
if len(data) > 1:
self.multsNads[data[0]] = { "namespace" : data[1] }
self.multusNs.add(data[1])
else:
raise Exception(line + " doesnt contain all 2 fileds, name namespace")
else:
tprint("MultusHandler:getMultusNads", "Empty NAD output" + output)
except Exception as e:
tprint ("MultusHandler:getMultusNads", "Exception:" + str(e) )
return self.multsNads
### function to get all the namespaces which are hosting NetworkAttachmentDefinitions
def getmultusNS(self):
self.getMultusNads("--all-namespaces")
return self.multusNs
def getMultusPodNamesOnWorker(self,nsSet=None):
self.multusPods.clear()
for ns in nsSet:
try:
if ns=="--all-namespaces":
output = shell_run_cmd_old(self.cmd + "--all-namespaces") #shell_run_cmd_old(cmd,retCode)
else:
output = shell_run_cmd_old(self.cmd + " -n "+ ns) #shell_run_cmd_old(cmd,retCode)
if output:
output = output.rstrip()
allPodList=output.split("\n")
#self.multusPods.clear()
for line in allPodList:
data=line.split(" ")
if len(data) > 2:
if self.workerName == data[2]:
ipAddress={}
self.myKbernetesmgr.getMultusIps(name=data[0],namespace=data[1],ipAddress=ipAddress)
self.multusPods[data[0]] = { "namespace" : data[1] , "ipAddress": ipAddress }
else:
raise Exception(line + " doesnt contain all 3 fileds, podname namespace workername")
else:
tprint("MultusHandler:getMultusPodNamesOnWorker", "Empty Multus Pod list " + output)
except Exception as e:
tprint ("MultusHandler:getMultusPodNamesOnWorker","Exception" + str(e))
return self.multusPods
class MultusPod(object):
def __init__(self,name,namespace,ipDict):
self.ipDict=ipDict
self.currIPList=list(ipDict.keys())
self.prevIPList=[]
self.name=name
self.namespace=namespace
def getName(self):
return self.name
def getNamespace(self):
return self.namespace
def getcurrIPList(self):
return self.currIPList
def setcurrIPList(self,currIPList):
self.currIPList = currIPList
def getprevIPList(self):
return self.prevIPList
def setprevIPList(self,prevIPList):
self.prevIPList = prevIPList
def __str__(self) :
return self.name+ " " +self.namespace + str(self.ipDict)
def getEKSClusterNameFromTag(tags):
clusterName=None
any(key.startswith("kubernetes.io/cluster/") for key in tags)
for key in tags.keys():
if key.startswith("kubernetes.io/cluster/"):
clusterName=key.split("kubernetes.io/cluster/",1)[1]
return clusterName
##MAIN
usePodMACtoIdentifyENIC = 0
#podIpAddress = {}
currIPList = []
currPods={}
multusPods={}
clsuterIAmRole=None
podSearchQuery="ALL"
multusNs=None
try:
if 'EKS_CLUSTER_ROLEARN' in os.environ:
clsuterIAmRole=os.environ['EKS_CLUSTER_ROLEARN']
if 'PODSEARCHQUERY'in os.environ:
podSearchQuery=os.environ['PODSEARCHQUERY']
myWorkerNode=WorkerNodeManager()
tprint("func:Main",myWorkerNode.getHostname() + " " + myWorkerNode.getEKSClusterName() + " " + "IAM Role:"+str(clsuterIAmRole))
myMultusMgr= MultusHandler(workerName=myWorkerNode.getHostname(),region=myWorkerNode.getRegion(),cluster=myWorkerNode.getEKSClusterName(),roleARN=clsuterIAmRole)
workerENIData=myWorkerNode.getNetworkingData()
ec2ClientArr=myWorkerNode.getEc2ClientArr()
except Exception as e:
tprint("func:Main",str(e)+ " Exiting!!")
exit(1)
ctrRefreshTime=300
tokenRefreshTime=30
ctr=0
while(1):
if (ctr % tokenRefreshTime) == 0:
myMultusMgr.refresh()
if ctr == 0 :
tprint("func:Main","Preiodic check .. Log Entry before the multus query, to check the query time taken")
if podSearchQuery != "ALL":
multusNs=myMultusMgr.getmultusNS()
if multusNs:
multusPods=myMultusMgr.getMultusPodNamesOnWorker(nsSet=multusNs)
else:
allns={"--all-namespaces"}
multusPods=myMultusMgr.getMultusPodNamesOnWorker(allns)
if ctr == 0 :
tprint("func:Main","Preiodic check .. Log Entry after the multus query, to check the query time taken")
for pod in multusPods.keys():
newIPList=list(multusPods[pod]["ipAddress"].keys())
obj= MultusPod(pod,multusPods[pod]["namespace"],multusPods[pod]["ipAddress"])
ipmap = defaultdict(list)
ip6map = defaultdict(list)
noChange=True
if pod in currPods.keys():
if set(currPods[pod].getcurrIPList()) == set(newIPList):
pass #tprint("func:Main", "No IP change for pod " + pod)
else:
noChange=False
tprint("func:Main", "IP change for pod " + str(newIPList))
else:
noChange=False
if noChange==False:
tprint("func:Main","working on pod :" + str(obj))
for ipaddress in newIPList:
for cidr in workerENIData.keys():
if IPAddress(ipaddress) in list(IPNetwork(cidr)):
if netaddr.valid_ipv4(str(ipaddress)):
ipmap[cidr].append(str(ipaddress))
else :
ip6map[cidr].append(str(ipaddress))
manageParallelIPv4(ipmap,workerENIData,ec2ClientArr)
currPods[pod]=obj
ctr=ctr+1
if ctr == ctrRefreshTime:
ctr=0
time.sleep(1) |
# -*- coding:utf-8 -*-
"""
A tensorflow implementation for text matching
model in paper MV-LSTM.
author: Bin Zhong
data: 2018-11-12
"""
import tensorflow as tf
class MVLSTM(object):
def __init__(
self, max_len_left, max_len_right, vocab_size,
embedding_size, num_hidden, num_k, l2_reg_lambda=0.0):
# Placeholders for input, output and dropout
self.input_left = tf.placeholder(tf.int32, [None, max_len_left], name="input_left")
self.input_right = tf.placeholder(tf.int32, [None, max_len_right], name="input_right")
self.input_y = tf.placeholder(tf.float32, [None, 2], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Embedding layer for both sentences
with tf.name_scope("embedding"):
W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -0.1, 0.1),
name="W")
self.embedded_chars_left = tf.nn.embedding_lookup(W, self.input_left)
self.embedded_chars_right = tf.nn.embedding_lookup(W, self.input_right)
# Create a bi-directional lstm
with tf.name_scope('bidirectional_lstm'):
fw_cell = tf.nn.rnn_cell.LSTMCell(num_hidden, state_is_tuple=True)
fw_cell = tf.nn.rnn_cell.DropoutWrapper(fw_cell, output_keep_prob=self.dropout_keep_prob)
bw_cell = tf.nn.rnn_cell.LSTMCell(num_hidden, state_is_tuple=True)
bw_cell = tf.nn.rnn_cell.DropoutWrapper(bw_cell, output_keep_prob=self.dropout_keep_prob)
# bidirectional_dynamic_rnn 运行网络
outputs_left, states_left = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, self.embedded_chars_left, dtype=tf.float32, time_major=False)
outputs_right, states_left = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, self.embedded_chars_right, dtype=tf.float32, time_major=False)
with tf.name_scope('integration'):
left_seq_encoder = tf.concat(outputs_left, -1, name='left_concat')
right_seq_encoder = tf.concat(outputs_right, -1, name='right_concat')
cross = tf.matmul(left_seq_encoder, tf.transpose(right_seq_encoder, [0, 2, 1])) # (N, len, len)
with tf.name_scope('k-max-pooling'):
cross_resh = tf.reshape(cross, [-1, max_len_left*max_len_right], name='reshape')
self.k_max_pool = tf.nn.top_k(cross_resh, k=num_k)[0]
with tf.name_scope("output"):
W = tf.get_variable(
"W_output",
shape=[num_k, 2],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[2]), name="b")
self.scores = tf.nn.xw_plus_b(self.k_max_pool, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y))
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name="accuracy")
|
"""Revelation root module with package info"""
from .app import Revelation
__author__ = "Humberto Rocha"
__email__ = "humrochagf@gmail.com"
__version__ = "2.0.0"
__all__ = ["Revelation"]
|
#!/usr/bin/env python3
rules = {}
tickets = []
with open('input.txt', 'r') as input:
l = input.readline().strip()
while l != "":
idx = l.index(':')
name = l[:idx]
rules_str = l[idx + 1:].split(' or ')
ranges = []
for r in rules_str:
idx = r.index('-')
ranges.append((int(r[:idx]), int(r[idx + 1:])))
rules[name] = ranges
l = input.readline().strip()
# my ticket
assert input.readline().startswith('your')
my_ticket = [int(a) for a in input.readline().split(',')]
input.readline() # burn empty line
# Other tickets
assert (input.readline().startswith('nearby'))
for l in input:
ticket = [int(a) for a in l.split(',')]
tickets.append(ticket)
ranges = [] # A sorted list of valid ranges
for range_ls in rules.values():
ranges.extend(range_ls)
valid_ranges = []
for rng in sorted(ranges, key=lambda x: x[0]):
if len(valid_ranges) == 0:
valid_ranges.append(rng)
continue
last = valid_ranges[-1]
if rng[0] <= last[1]: # Merge
valid_ranges[-1] = (min(rng[0], last[0]), max(rng[1], last[1]))
else: # Add it
valid_ranges.append(rng)
def is_valid(n):
for rng in valid_ranges:
if n < rng[0]:
return False
if rng[0] <= n <= rng[1]:
return True
return False
total = 0
for ticket in tickets:
for n in ticket:
if not is_valid(n):
total += n
print(total)
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieve, cache, manipulate list of known Object Format IDs."""
import datetime
import d1_cli.impl.client
import d1_common.date_time
CACHE_MINUTES = 60
class FormatIDs(object):
def __init__(self):
self._format_ids = None
self._last_update = None
self._last_cn = None
def get(self, cn_base_url):
if not self._cache_is_stale(cn_base_url):
return self._format_ids
self._update_format_id_cache(cn_base_url)
self._update_cache_time()
self._update_last_cn(cn_base_url)
return self._format_ids
def format(self, cn_base_url):
format_ids = self.get(cn_base_url)
return ["{}".format(format_id) for format_id in format_ids]
# Private.
def _update_format_id_cache(self, cn_base_url):
client = d1_cli.impl.client.CLICNClient(base_url=cn_base_url)
formats = client.listFormats()
format_ids = sorted([f.formatId for f in formats.objectFormat])
self._format_ids = format_ids
def _cache_is_stale(self, cn_base_url):
if self._last_update is None or cn_base_url != self._last_cn:
return True
return d1_common.date_time.utc_now() - self._last_update > datetime.timedelta(
minutes=CACHE_MINUTES
)
def _update_cache_time(self):
self._last_update = d1_common.date_time.utc_now()
def _update_last_cn(self, cn_base_url):
self._last_cn = cn_base_url
|
from tkinter import *
from .internals import *
import os
import importlib
class MainOS(Tk):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.app_path = os.path.join('os_main', 'apps')
self.wm_attributes('-fullscreen', True)
self.config(bg="orange")
self.taskbar = Taskbar(self)
self.taskbar.pack(side='bottom')
self.run_app("TestAtTheZoo")
def run_app(self, name):
if name in self.get_app_names():
app_module = importlib.import_module(f'os_main.apps.{name}')
app = app_module.App()
app.wm_attributes('-topmost', 1)
app.mainloop()
def get_app_names(self):
return os.listdir(self.app_path)
|
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
import time
# Initial shapes.
class Gaussian:
def __init__(self, a, b, c, k):
self.a = a
self.b = b
self.c = c
self.k = k
def r_at(self, x):
return self.a * np.exp(-(0.5 * (x - self.b) / self.c) ** 2) \
* np.cos(self.k * x)
def i_at(self, x):
return self.a * np.exp(-(0.5 * (x - self.b) / self.c) ** 2) \
* np.sin(self.k * x)
def at(self, x):
return np.transpose(np.array([self.r_at(x), self.i_at(x)]))
# Potential functions.
class ZeroPotential:
def __init__(self):
pass
def at(self, x):
return 0 * x
class SHOPotential:
def __init__(self, x, k):
self.x = x
self.k = k
def at(self, x):
return self.k * (x - self.x) ** 2
class WallPotential:
def __init__(self, x, a):
self.x = x
self.a = a
def at(self, x):
return np.where(x < self.x, 0, self.a)
# Staggered leapfrog solver.
class LeapfrogSolver:
def __init__(self, x_i, x_f, M, t_f, N, F, V, e = 0.1):
# The number of grid points between x_i and x_f.
self.M = M
# The spacing between grid points.
self.dx = (x_f - x_i) / M
# The number of time steps between 0 and t_f.
self.N = N
# The length of a time step.
self.dt = t_f / N
# This is a term used frequently during integration.
self.dtdx2 = self.dt / (2 * (self.dx ** 2))
# X.
self.X = np.array([x_i + m * self.dx for m in range(self.M + 1)])
# Psi.
self.Y = np.zeros((N + 1, M + 1, 2))
self.Y[0] = F(self.X)
# Probability.
self.P = np.zeros((N + 1, M + 1))
# Expectation of X.
self.U = np.zeros(N + 1)
# Potential.
self.V = V(self.X)
# Back-integrate the imaginary component of Psi half a time step.
d2R = self.Y[0, 2:, 0] - 2 * self.Y[0, 1:-1, 0] \
+ self.Y[0, 0:-2, 0]
d2R = np.concatenate(([0], d2R, [0]))
I = self.Y[0, :, 1] - self.dtdx2 * d2R + self.dt * \
np.multiply(self.V, self.Y[0, :, 0])
self.Y[0, :, 1] = I
# Normalize the initial state.
self.P[0] = self.norm_squared(0)
total_p = np.sum(self.P[0])
self.Y[0] = self.Y[0] / np.sqrt(total_p)
self.P[0] = self.P[0] / total_p
# Calculate the initial expected value of X.
self.U[0] = self.expected_x(0)
# Set the acceptable range for the total probability.
self.min_p = 1 - e
self.max_p = 1 + e
# Stores the last calculated time step, in case of failure.
self.MAX_N = N
# Calculate I(x, t + dt/2).
def next_i(self, n):
d2R = self.Y[n, 2:, 0] - 2 * self.Y[n, 1:-1, 0] \
+ self.Y[n, 0:-2, 0]
d2R = np.concatenate(([0], d2R, [0]))
I = self.Y[n, :, 1] + self.dtdx2 * d2R - self.dt * \
np.multiply(self.V, self.Y[n, :, 0])
return I
# Calculate R(x, t + dt).
def next_r(self, n, I):
d2I = I[2:] - 2 * I[1:-1] + I[0:-2]
d2I = np.concatenate(([0], d2I, [0]))
R = self.Y[n, :, 0] - self.dtdx2 * d2I + self.dt * \
np.multiply(self.V, I)
return R
# Calculate |Psi|^2.
def norm_squared(self, n):
P = np.sum(self.Y[n] ** 2, axis=1)
return P
# Calculate <X>.
def expected_x(self, n):
U = np.sum(np.multiply(self.X, self.P[n]))
return U
# Move forward one step in time.
def step(self, n):
I = self.next_i(n)
R = self.next_r(n, I)
self.Y[n + 1] = np.stack((R, I), axis = -1)
self.P[n + 1] = self.norm_squared(n + 1)
self.U[n + 1] = self.expected_x(n + 1)
# Propagate Psi from t=0 to t=t_f.
def integrate(self):
prev_time = time.time()
print("Propagating over " + str(self.N) + " time steps...")
for n in range(self.N):
self.step(n)
# Ensure the solution is still valid.
total_p = np.sum(self.P[n + 1])
if total_p < self.min_p:
print("Total probability below acceptable range at n=" \
+ str(n))
print("Total probability: " + str(total_p))
print("Minimum acceptable: " + str(self.min_p))
self.MAX_N = n
if total_p > self.max_p:
print("Total probability above acceptable range at n=" \
+ str(n))
print("Total probability: " + str(total_p))
print("Maximum acceptable: " + str(self.max_p))
self.MAX_N = n
# Give a progress report.
if (n % 1000 == 0) and (n > 0):
curr_time = time.time()
elap_time = curr_time - prev_time
prev_time = curr_time
print("Completed " + str(n) + " steps. Elapsed time: " \
+ str(elap_time))
return self.N
# Wavefunction animation.
class Animator:
def __init__(self, solver):
self.X = solver.X
self.R = solver.Y[:, :, 0]
self.I = solver.Y[:, :, 1]
self.P = solver.P
self.V = solver.V
self.T = np.array([n * solver.dt for n in range(solver.N + 1)])
self.U = solver.U
self.fig, ((self.r_ax, self.p_ax), (self.i_ax, self.u_ax)) \
= plt.subplots(2, 2)
self.fig.tight_layout(pad = 1.0)
self.r_ax.set_title("real")
self.i_ax.set_title("imaginary")
self.p_ax.set_title("probability")
self.u_ax.set_title("expected x")
max_p = np.amax(self.P)
self.r_ax.set_ylim(np.amin(self.R), np.amax(self.R))
self.i_ax.set_ylim(np.amin(self.I), np.amax(self.I))
self.p_ax.set_ylim(0, max_p)
self.u_ax.set_xlim(0, self.T[-1])
self.u_ax.set_ylim(np.amin(self.U), np.amax(self.U))
min_v = np.amin(self.V)
max_v = np.amax(self.V)
vp_scale = max_p / (max_v - min_v)
self.r_graph, = self.r_ax.plot(self.X, self.R[0])
self.i_graph, = self.i_ax.plot(self.X, self.I[0])
self.p_graph, = self.p_ax.plot(self.X, self.P[0])
self.vp_graph, = self.p_ax.plot(self.X, vp_scale \
* (self.V - min_v))
self.u_graph, = self.u_ax.plot(self.T[:1], self.U[:1])
def step(self, n):
self.r_graph.set_ydata(self.R[n])
self.i_graph.set_ydata(self.I[n])
self.p_graph.set_ydata(self.P[n])
self.u_graph.set_data(self.T[:n], self.U[:n])
def animate(self):
return FuncAnimation(self.fig, self.step, frames = len(self.T),\
interval = 1)
if __name__ == "__main__":
# The initial shape of the wave function.
F = Gaussian(1, 0, 0.3, 100)
#V = ZeroPotential() # A free particle.
#V = SHOPotential(0.5, 4000) # An SHO-like potential.
V = WallPotential(1, 4000) # A potential barrier.
solver = LeapfrogSolver(-2, 3, 1000, 0.1, 10000, F.at, V.at)
solver.integrate()
animator = Animator(solver)
animation = animator.animate()
plt.show()
|
import sys
import re
from .anjuke import community as anjuke_community
from .juejin import posts as juejin_posts
from .juejin import books as juejin_books
from . import __version__
def _print_welcom():
print('=========================================\nHi, guy! Welcome to use kcrwaler v{} !\nhttps://github.com/kenblikylee/kcrawler\n-----------------------------------------'.format(__version__))
def _print_run(app, ver):
_print_welcom()
print('run kcrawler-{}v{} ...'.format(app, ver))
_supported_apps = ['juejin', 'anjuke']
_arg_pattern = re.compile(r'--\w+')
def _parse_args(args_list):
argk = None
args = dict()
targets = list()
for arg in args_list:
if _arg_pattern.match(arg):
argk = arg.strip('-')
args[argk] = ''
elif argk:
args[argk] = arg
argk = None
else:
targets.append(arg)
return args, targets
def init_app(app, ver, args):
if args is None:
args = sys.argv[1:]
_print_run(app, ver)
return _parse_args(args)
def juejin(args=None):
args, targets = init_app('juejin', '0.1.0', args)
tar = targets[0]
if tar == 'post':
juejin_posts.main(args)
elif tar == 'book':
juejin_books.main(args)
def anjuke(args=None):
args, targets = init_app('anjuke', '0.1.1', args)
anjuke_community.main(args)
def args(args=None):
args, targets = init_app('args', '0.1.0', args)
print(args, targets)
def main():
cmds = sys.argv[1:]
if len(cmds) == 0:
_print_welcom()
exit(0)
app = cmds[0]
if not app in _supported_apps:
print('{} not supported!'.format(app))
exit(0)
args = cmds[1:]
eval('{}(args)'.format(app))
|
import pytest
pytestmark = [
pytest.mark.django_db,
pytest.mark.usefixtures('ten_percent_promocode'),
]
@pytest.fixture(autouse=True)
def _freeze_stripe_course(mocker):
mocker.patch('stripebank.bank.StripeBank.ue', 70) # let it be forever :'(
@pytest.mark.parametrize('code', [
'TESTCODE',
'testcode',
'testcode ',
' testcode',
])
def test(api, course, code):
got = api.get(f'/api/v2/courses/{course.slug}/promocode/?promocode={code}')
assert got['price'] == 90450
assert got['formatted_price'] == '90 450'
assert got['currency'] == 'RUB'
assert got['currency_symbol'] == '₽'
@pytest.mark.parametrize(('bank', 'expected_price', 'expected_formatted_price', 'expected_currency', 'expected_currency_symbol'), [
('tinkoff_bank', 90450, '90 450', 'RUB', '₽'),
('tinkoff_credit', 90450, '90 450', 'RUB', '₽'),
('stripe', 1292, '1 292', 'USD', '$'),
])
def test_promocode_with_bank(api, course, bank, expected_price, expected_formatted_price, expected_currency, expected_currency_symbol):
got = api.get(f'/api/v2/courses/{course.slug}/promocode/?promocode=TESTCODE&desired_bank={bank}')
assert got['price'] == expected_price
assert got['formatted_price'] == expected_formatted_price
assert got['currency'] == expected_currency
assert got['currency_symbol'] == expected_currency_symbol
@pytest.mark.parametrize('code', [
'EV1L',
'',
])
def test_bad_promocode(api, course, code):
got = api.get(f'/api/v2/courses/{course.slug}/promocode/?promocode={code}')
assert got['price'] == 100500
def test_incompatible_promocode(api, course, another_course, ten_percent_promocode):
ten_percent_promocode.courses.add(course)
got = api.get(f'/api/v2/courses/{another_course.slug}/promocode/?promocode=TESTCODE')
assert got['price'] == 100500
def test_compatible_promocode(api, course, ten_percent_promocode):
ten_percent_promocode.courses.add(course)
got = api.get(f'/api/v2/courses/{course.slug}/promocode/?promocode=TESTCODE')
assert got['price'] == 90450
def test_wihtout_promocode(api, course):
got = api.get(
f'/api/v2/courses/{course.slug}/promocode/',
)
assert got['price'] == 100500
assert got['formatted_price'] == '100 500'
assert got['currency'] == 'RUB'
assert got['currency_symbol'] == '₽'
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# 测试是否成功连接MySQL数据库,每隔3秒中检查一次
import time
import MySQLdb
def sleeptime(hour,min,sec):
return hour*3600 + min*60 + sec;
second = sleeptime(0,0,3)
# 打开数据库连接
conn = MySQLdb.connect("localhost","root","123456","crm" )
while 1==1:
print "do action"
# 使用cursor()方法获取操作游标
cursor = conn.cursor()
# 使用execute方法执行SQL语句
string1 = "show GLOBAL STATUS LIKE 'Questions';"
cursor.execute(string1)
# 使用 fetchone() 方法获取一条数据库。
data = cursor.fetchall()
print data
# print "Database version : %s " % data
time.sleep(second)
# 关闭数据库连接
conn.close()
|
import numpy as np
import csv
import datetime as dt
def split_csv(in_file_path, out_dir_path):
"""
Splits csv data file into smaller pieces using ticker names. It saves
split data as numpy arrays.
:param in_file_path: path to csv file
:param out_dir_path: path to output directory
:return: -
"""
# Indices of extracted columns.
ticker_i = 0
date_i = 1
open_i = 2
close_i = 5
volume_i = 6
# Format of the dates contained in the csv file.
date_format = "%Y-%m-%d"
in_file = open(in_file_path)
csv_reader = csv.reader(in_file)
# Skip the header row.
next(csv_reader)
# Int id of the currently processed ticker. His name will be switched to
# this id in the saved numpy array.
current_ticker_id = 0
previous_ticker = ""
ticker_data = []
for row in csv_reader:
# Convert numeric values in the row. We have to take care of possible
# empty strings.
open_price = float(row[open_i] or 0.0)
close_price = float(row[close_i] or 0.0)
volume = float(row[volume_i] or 0.0)
# Convert date to a tuple of (year, month, day)
date = dt.datetime.strptime(row[date_i], date_format).date()
date = date.year, date.month, date.day
# Get the ticker name.
ticker = row[ticker_i]
if ticker != previous_ticker and previous_ticker != "":
# Save accumulated data to numpy matrix file.
ticker_mat = np.array(ticker_data)
np.save(out_dir_path + "/" + previous_ticker, ticker_mat)
ticker_data = []
current_ticker_id += 1
# Accumulate current ticker data.
ticker_data.append(
[current_ticker_id, *date, open_price, close_price, volume]
)
previous_ticker = ticker
# Save the last ticker data.
ticker_mat = np.array(ticker_data)
np.save(out_dir_path + "/" + previous_ticker, ticker_mat)
in_file.close()
def main():
"""
Script main function.
:return: -
"""
# Parameters for split_csv call.
# Paths.
in_file_path = "../../data/raw/wiki_prices_data.csv"
out_dir_path = "../../data/split"
split_csv(in_file_path, out_dir_path)
return
if __name__ == "__main__":
main() |
import sys
from lib2to3 import refactor
# The original set of these fixes comes from lib3to2 (https://bitbucket.org/amentajo/lib3to2):
fix_names = set([
'libpasteurize.fixes.fix_add_all__future__imports', # from __future__ import absolute_import etc. on separate lines
'libpasteurize.fixes.fix_add_future_standard_library_import', # we force adding this import for now, even if it doesn't seem necessary to the fix_future_standard_library fixer, for ease of testing
# 'libfuturize.fixes.fix_order___future__imports', # consolidates to a single line to simplify testing -- UNFINISHED
'libpasteurize.fixes.fix_future_builtins', # adds "from future.builtins import *"
'libfuturize.fixes.fix_future_standard_library', # adds "from future import standard_library"
'libpasteurize.fixes.fix_annotations',
# 'libpasteurize.fixes.fix_bitlength', # ints have this in Py2.7
# 'libpasteurize.fixes.fix_bool', # need a decorator or Mixin
# 'libpasteurize.fixes.fix_bytes', # leave bytes as bytes
# 'libpasteurize.fixes.fix_classdecorator', # available in
# Py2.6+
# 'libpasteurize.fixes.fix_collections', hmmm ...
# 'libpasteurize.fixes.fix_dctsetcomp', # avail in Py27
'libpasteurize.fixes.fix_division', # yes
# 'libpasteurize.fixes.fix_except', # avail in Py2.6+
# 'libpasteurize.fixes.fix_features', # ?
'libpasteurize.fixes.fix_fullargspec',
# 'libpasteurize.fixes.fix_funcattrs',
'libpasteurize.fixes.fix_getcwd',
'libpasteurize.fixes.fix_imports', # adds "from future import standard_library"
'libpasteurize.fixes.fix_imports2',
# 'libpasteurize.fixes.fix_input',
# 'libpasteurize.fixes.fix_int',
# 'libpasteurize.fixes.fix_intern',
# 'libpasteurize.fixes.fix_itertools',
'libpasteurize.fixes.fix_kwargs', # yes, we want this
# 'libpasteurize.fixes.fix_memoryview',
# 'libpasteurize.fixes.fix_metaclass', # write a custom handler for
# this
# 'libpasteurize.fixes.fix_methodattrs', # __func__ and __self__ seem to be defined on Py2.7 already
'libpasteurize.fixes.fix_newstyle', # yes, we want this: explicit inheritance from object. Without new-style classes in Py2, super() will break etc.
# 'libpasteurize.fixes.fix_next', # use a decorator for this
# 'libpasteurize.fixes.fix_numliterals', # prob not
# 'libpasteurize.fixes.fix_open', # huh?
# 'libpasteurize.fixes.fix_print', # no way
'libpasteurize.fixes.fix_printfunction', # adds __future__ import print_function
# 'libpasteurize.fixes.fix_raise_', # TODO: get this working!
# 'libpasteurize.fixes.fix_range', # nope
# 'libpasteurize.fixes.fix_reduce',
# 'libpasteurize.fixes.fix_setliteral',
# 'libpasteurize.fixes.fix_str',
# 'libpasteurize.fixes.fix_super', # maybe, if our magic super() isn't robust enough
'libpasteurize.fixes.fix_throw', # yes, if Py3 supports it
# 'libpasteurize.fixes.fix_unittest',
'libpasteurize.fixes.fix_unpacking', # yes, this is useful
# 'libpasteurize.fixes.fix_with' # way out of date
])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.