content
stringlengths 5
1.05M
|
|---|
from utils.print import print_block
import pandas as pd
from utils.prediction import PredictionType
def generate_cf_for_all(packs, cf_func, feature_names):
output_column_names = [f'orgin_{f}' for f in feature_names] + [
f'cf_{f}' for f in feature_names] + ['time(sec)'] + ["prediction_type"]
# Create an empty dataframe for appending data.
result_df = pd.DataFrame({}, columns=output_column_names)
# Loop through each predict type.
for p_t in [PredictionType.TruePositive, PredictionType.TrueNegative, PredictionType.FalsePositive, PredictionType.FalseNegative]:
print_block("", "Doing %s" % p_t.value)
# Get the length, so we can through all the instance in this predict type.
total_length = packs.get_len(p_t)
# Loop through all the instance in this predict type.
for i in range(total_length):
print_block("Instance %d" % i, "Running...")
# Get the result (including counterfactal and running time) from the cf_func.
returned_case = cf_func(packs.get_instance(p_t, i))
# Using the information from returned_case to create a dataframe (for appending to result_df).
df_i = pd.DataFrame([
returned_case["original_vector"] + returned_case['cf'] + [returned_case['time'], returned_case['prediction_type']]], columns=output_column_names)
# appending the current result to the total result dataframe.
result_df = result_df.append(df_i)
return result_df
|
"""Test aspects of lldb commands on universal binaries."""
import os, time
import unittest2
import lldb
from lldbtest import *
import lldbutil
class UniversalTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.c', '// Set break point at this line.')
@python_api_test
@skipUnlessDarwin
@unittest2.skipUnless(hasattr(os, "uname") and os.uname()[4] in ['i386', 'x86_64'],
"requires i386 or x86_64")
def test_sbdebugger_create_target_with_file_and_target_triple(self):
"""Test the SBDebugger.CreateTargetWithFileAndTargetTriple() API."""
# Invoke the default build rule.
self.buildDefault()
# Note that "testit" is a universal binary.
exe = os.path.join(os.getcwd(), "testit")
# Create a target by the debugger.
target = self.dbg.CreateTargetWithFileAndTargetTriple(exe, "i386-apple-macosx")
self.assertTrue(target, VALID_TARGET)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple (None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
@skipUnlessDarwin
@unittest2.skipUnless(hasattr(os, "uname") and os.uname()[4] in ['i386', 'x86_64'],
"requires i386 or x86_64")
def test_process_launch_for_universal(self):
"""Test process launch of a universal binary."""
from lldbutil import print_registers
# Invoke the default build rule.
self.buildDefault()
# Note that "testit" is a universal binary.
exe = os.path.join(os.getcwd(), "testit")
# By default, x86_64 is assumed if no architecture is specified.
self.expect("file " + exe, CURRENT_EXECUTABLE_SET,
startstr = "Current executable set to ",
substrs = ["testit' (x86_64)."])
# Break inside the main.
lldbutil.run_break_set_by_file_and_line (self, "main.c", self.line, num_expected_locations=1, loc_exact=True)
# We should be able to launch the x86_64 executable.
self.runCmd("run", RUN_SUCCEEDED)
# Check whether we have a 64-bit process launched.
target = self.dbg.GetSelectedTarget()
process = target.GetProcess()
self.assertTrue(target and process and
self.invoke(process, 'GetAddressByteSize') == 8,
"64-bit process launched")
frame = process.GetThreadAtIndex(0).GetFrameAtIndex(0)
registers = print_registers(frame, string_buffer=True)
self.expect(registers, exe=False,
substrs = ['Name: rax'])
self.runCmd("continue")
# Now specify i386 as the architecture for "testit".
self.expect("file -a i386 " + exe, CURRENT_EXECUTABLE_SET,
startstr = "Current executable set to ",
substrs = ["testit' (i386)."])
# Break inside the main.
lldbutil.run_break_set_by_file_and_line (self, "main.c", self.line, num_expected_locations=1, loc_exact=True)
# We should be able to launch the i386 executable as well.
self.runCmd("run", RUN_SUCCEEDED)
# Check whether we have a 32-bit process launched.
target = self.dbg.GetSelectedTarget()
process = target.GetProcess()
self.assertTrue(target and process,
"32-bit process launched")
pointerSize = self.invoke(process, 'GetAddressByteSize')
self.assertTrue(pointerSize == 4,
"AddressByteSize of 32-bit process should be 4, got %d instead." % pointerSize)
frame = process.GetThreadAtIndex(0).GetFrameAtIndex(0)
registers = print_registers(frame, string_buffer=True)
self.expect(registers, exe=False,
substrs = ['Name: eax'])
self.runCmd("continue")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
from .baseinternal import BaseInternal
from math import sin, cos, radians, sqrt
class LiquidRing(BaseInternal):
def __init__(self):
return
def draw(self, ctx):
if self.parent is None:
Warning("Internal has no parent set!")
return
unit = self.parent
lines = 4
bladeLength = unit.size[0] / 2 * 0.5
for i in range(lines):
angle = 180 / lines * i
angleInRadians = radians(angle)
dxs = bladeLength * cos(angleInRadians)
dys = bladeLength * sin(angleInRadians)
ctx.line(
(
unit.position[0] + unit.size[0] / 2 + dxs,
unit.position[1] + unit.size[1] / 2 + dys,
),
(
unit.position[0] + unit.size[0] / 2 - dxs,
unit.position[1] + unit.size[1] / 2 - dys,
),
unit.lineColor,
unit.lineSize / 2,
)
return
|
# Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.tests.aws import StubberTestCase
from touchdown.tests.stubs.aws import CacheClusterStubber, LaunchConfigurationStubber
class TestCacheClusterCreation(StubberTestCase):
def test_create_cache_cluster(self):
goal = self.create_goal("apply")
cache_cluster = self.fixtures.enter_context(
CacheClusterStubber(
goal.get_service(
self.aws.add_cache_cluster(
name="my-cache-cluster", instance_class="cache.m3.medium"
),
"apply",
)
)
)
cache_cluster.add_describe_cache_clusters_empty_response()
cache_cluster.add_create_cache_cluster()
cache_cluster.add_describe_cache_clusters_one_response(status="creating")
cache_cluster.add_describe_cache_clusters_one_response()
cache_cluster.add_describe_cache_clusters_one_response()
goal.execute()
def test_create_cache_cluster_idempotent(self):
goal = self.create_goal("apply")
cache_cluster = self.fixtures.enter_context(
CacheClusterStubber(
goal.get_service(
self.aws.add_cache_cluster(
name="my-cache-cluster", instance_class="cache.m3.medium"
),
"apply",
)
)
)
cache_cluster.add_describe_cache_clusters_one_response()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(cache_cluster.resource)), 0)
class TestCacheClusterDeletion(StubberTestCase):
def test_delete_cache_cluster(self):
goal = self.create_goal("destroy")
cache_cluster = self.fixtures.enter_context(
CacheClusterStubber(
goal.get_service(
self.aws.add_cache_cluster(
name="my-cache-cluster", instance_class="cache.m3.medium"
),
"destroy",
)
)
)
cache_cluster.add_describe_cache_clusters_one_response()
cache_cluster.add_delete_cache_cluster()
# Wait for it to go away
cache_cluster.add_describe_cache_clusters_one_response(status="deleting")
cache_cluster.add_describe_cache_clusters_empty_response()
goal.execute()
def test_delete_cache_cluster_idempotent(self):
goal = self.create_goal("destroy")
cache_cluster = self.fixtures.enter_context(
CacheClusterStubber(
goal.get_service(
self.aws.add_cache_cluster(
name="my-cache-cluster", instance_class="cache.m3.medium"
),
"destroy",
)
)
)
cache_cluster.add_describe_cache_clusters_empty_response()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(cache_cluster.resource)), 0)
class TestCacheClusterComplications(StubberTestCase):
def test_with_launch_configuration(self):
goal = self.create_goal("apply")
cache_cluster = self.fixtures.enter_context(
CacheClusterStubber(
goal.get_service(
self.aws.add_cache_cluster(
name="my-cache-cluster", instance_class="cache.m3.medium"
),
"apply",
)
)
)
cache_cluster.add_describe_cache_clusters_empty_response()
cache_cluster.add_create_cache_cluster()
cache_cluster.add_describe_cache_clusters_one_response(status="creating")
cache_cluster.add_describe_cache_clusters_one_response()
cache_cluster.add_describe_cache_clusters_one_response()
launch_config = self.fixtures.enter_context(
LaunchConfigurationStubber(
goal.get_service(
self.aws.add_launch_configuration(
name="my-test-lc",
image="ami-cba130bc",
instance_type="t2.micro",
json_user_data={
"REDIS_ADDRESS": cache_cluster.resource.endpoint_address,
"REDIS_PORT": cache_cluster.resource.endpoint_port,
},
),
"apply",
)
)
)
user_data = (
'{"REDIS_ADDRESS": "mycacheclu.q68zge.ng.0001.use1devo.elmo-dev.amazonaws.com", '
'"REDIS_PORT": 6379}'
)
launch_config.add_describe_launch_configurations_empty_response()
launch_config.add_describe_launch_configurations_empty_response()
launch_config.add_create_launch_configuration(user_data=user_data)
launch_config.add_describe_launch_configurations_one_response(
user_data=user_data
)
launch_config.add_describe_launch_configurations_one_response(
user_data=user_data
)
launch_config.add_describe_launch_configurations_one_response(
user_data=user_data
)
goal.execute()
|
def main():
n = int(input())
x,y = map(int,input().split())
k = int(input())
a = list(map(int,input().split()))
a.append(x)
a.append(y)
if len(a) == len(list(set(a))):
print("YES")
else:
print("NO")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
"""
Reads output of Transcar sim, yielding Incoherent Scatter Radar plasma parameters.
python transcar2isr.py tests/data/beam52
"""
from pathlib import Path
from matplotlib.pyplot import show
from argparse import ArgumentParser
from datetime import datetime
#
import transcarread.plots as plots
import transcarread as tr
def compute(path: Path, tReq: datetime, plot_params: list, verbose: bool):
path = Path(path).expanduser().resolve()
# %% get sim parameters
datfn = path / "dir.input/DATCAR"
tctime = tr.readTranscarInput(datfn)
# %% load transcar output
iono = tr.read_tra(path, tReq)
# %% do plot
plots.plot_isr(iono, path, tctime, plot_params, verbose)
return iono, tctime
def main():
p = ArgumentParser(description="reads dir.output/transcar_output")
p.add_argument("path", help="path containing dir.output/transcar_output file")
p.add_argument("--tReq", help="time to extract data at")
p.add_argument("-v", "--verbose", help="more plots", action="store_true")
p.add_argument("-p", "--params", help="only plot these params", choices=["ne", "vi", "Ti", "Te"], nargs="+")
p = p.parse_args()
compute(p.path, p.tReq, p.params, p.verbose)
show()
if __name__ == "__main__":
main()
|
import numpy as np
from common.constant import *
from common.clusteringAlgorithms import ClusteringAlgorithm
from utils.distance import get_EuclideanDistance_matrix
class FCMA(ClusteringAlgorithm):
def __init__(self, X : np.ndarray, cluster_num : int, m = 2):
super(FCMA, self).__init__(X, cluster_num)
# hyper parameter
self.m = m
def __Update_U__(self, V : np.ndarray, U : np.ndarray) -> np.ndarray:
distance_matrix = get_EuclideanDistance_matrix(self.X, V)
times = 2 / (self.m - 1)
for i in range(self.c):
for j in range(self.n):
if distance_matrix[j][i] > epsilon: # 如果 x_j 离 v_i 太近的话,那 x_j 直接看作属于 i 类
_sum = 0
for h in range(self.c):
if distance_matrix[j][h] > epsilon: # 如果 x_j 离 v_h 太近的话,那 x_h 可以直接看作属于 h 类的聚类中心
_sum += (distance_matrix[j][i] / distance_matrix[j][h]) ** times
U[i][j] = 1 / _sum
else:
U[i][j] = 1
return U
def __Update_V__(self, U : np.ndarray) -> np.ndarray:
U_power_m = U ** self.m
U_row_sum = np.sum(U_power_m, axis = 1) # \sum_{j = 1}^{n} \mu_{ij}
# U_row_sum.shape = (c,)
donation = np.zeros((self.n, self.c))
for j in range(self.c):
if U_row_sum[j] > epsilon:
donation[:, j] = U_power_m.T[:, j] / U_row_sum[j]
V = np.matmul(self.X, donation)
return V
|
def foo(y1):
y1 + 1
print y1
|
#!/usr/bin/python
from expanduino.subdevice import Subdevice
from expanduino.codec import *
from enum import IntEnum
from time import time
from cached_property import cached_property
import asyncio
import os
import termios
import re
from fcntl import ioctl
from ..utils import run_coroutines, create_link, forever, fd_reader
EXTPROC = 0o200000
TIOCPKT_IOCTL = 64
BAUD_CONSTANTS = {
getattr(termios, x): int(x[1:])
for x in filter(lambda x: re.match("B\\d+", x), dir(termios))
}
CHARACTER_SIZE_CONSTANTS = {
getattr(termios, x): int(x[2:])
for x in filter(lambda x: re.match("CS\\d+", x), dir(termios))
}
class SerialSubdevice(Subdevice):
class Command(IntEnum):
NUM_SERIALS = 0,
NAME = 1,
WRITE = 2,
READ = 3,
AVAILABLE = 4
class Serial:
def __init__(self, subdevice, serialNum):
self.subdevice = subdevice
self.serialNum = serialNum
self.ptyMaster = None
self.ptySlave = None
self.ptyLink = None
@cached_property
def name(self):
return self.subdevice.call(SerialSubdevice.Command.NAME, args=[self.serialNum], parser=parseString)
@property
def available(self):
return self.subdevice.call(SerialSubdevice.Command.AVAILABLE, args=[self.serialNum], parser=parseByte)
def read(self, n):
return self.subdevice.call(SerialSubdevice.Command.READ, args=[self.serialNum, n], parser=parseBytes)
def write(self, data):
self.subdevice.call(SerialSubdevice.Command.WRITE, args=bytes([self.serialNum]) + bytes(data))
def __str__(self):
return "#%-3d %s" % (self.serialNum, self.name)
async def attach(self):
if self.ptyMaster:
return
self.ptyMaster, self.ptySlave = os.openpty()
try:
attr = termios.tcgetattr(self.ptyMaster)
attr[3] |= EXTPROC
termios.tcsetattr(self.ptyMaster, termios.TCSANOW, attr)
ioctl(self.ptyMaster, termios.TIOCPKT, b'\1')
def got_packet(packet):
if packet[0] == termios.TIOCPKT_DATA:
self.write(packet[1:])
if packet[0] & TIOCPKT_IOCTL:
attr = termios.tcgetattr(self.ptyMaster)
# Dont let the slave clear the EXTPROC flag (e.g., screen does so)
# IMO, allowing the slave fd to do this sounds pretty dumb
if not attr[3] & EXTPROC:
attr[3] |= EXTPROC
termios.tcsetattr(self.ptyMaster, termios.TCSANOW, attr)
ibaud = BAUD_CONSTANTS[attr[4]]
obaud = BAUD_CONSTANTS[attr[5]]
#FIXME: Pty driver assumes 8 bits, no parity, ALWAYS
#https://github.com/torvalds/linux/blob/master/drivers/tty/pty.c#L290-L291
bits = CHARACTER_SIZE_CONSTANTS[attr[2] & termios.CSIZE]
if attr[2] & termios.PARENB:
if attr[2] & termios.PARODD:
parity = 'O'
else:
parity = 'E'
else:
parity = 'N'
if attr[2] & termios.CSTOPB:
stop_bits = 2
else:
stop_bits = 1
print("Changed %s config: %d:%d %d%s%d" % (self, ibaud, obaud, bits, parity, stop_bits))
#TODO: Reconfigure the port
async with create_link(os.ttyname(self.ptySlave), "/dev/ttyExpanduino%d") as link:
async with fd_reader(self.ptyMaster, n=20, callback=got_packet):
await forever()
finally:
os.close(self.ptySlave)
os.close(self.ptyMaster)
self.ptyMaster = None
def handleInterruption(self, data):
if self.ptyMaster:
os.write(self.ptyMaster, data)
def __init__(self, container, devNum):
Subdevice.__init__(self, container, devNum)
def handleInterruption(self, data):
if data:
serialNum = data[0]
payload = data[1:]
if serialNum < len(self.serials):
self.serials[serialNum].handleInterruption(payload)
@cached_property
def serials(self):
num_serials = self.call(SerialSubdevice.Command.NUM_SERIALS, parser=parseByte)
return [
SerialSubdevice.Serial(self, i)
for i in range(num_serials)
]
async def attach(self):
serials = self.serials
with self.with_interruptions():
coroutines = []
for serial in serials:
print(" ", serial)
coroutines.append(serial.attach())
await run_coroutines(*coroutines)
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index),
path("<int:month>", views.monthly_challege_by_number),
path("<str:month>", views.monthly_challenge, name="month-challenge")
]
|
from flask import jsonify, request, abort
from flask_login import current_user, login_user, logout_user
from datetime import datetime
import uuid
import json
from random import random
from searchTwd import searchTwd
from searchTwdComponents import sanitizeTwd
from searchTwdComponents import matchInventory
from searchCrypt import searchCrypt
from searchLibrary import searchLibrary
from searchCryptComponents import get_crypt_by_id
from searchLibraryComponents import get_library_by_id
from deckExport import deckExport
from deckExportAll import deckExportAll
from deckImport import deckImport
from deckProxy import deckProxy
from inventoryExport import inventoryExport
from inventoryImportParse import inventoryImportParse
from api import app
from api import db
from models import User
from models import Deck
@app.route('/api/inventory', methods=['GET'])
def listInventory():
try:
if current_user.is_authenticated:
# Fix users without inventory
if not current_user.inventory:
current_user.inventory = {}
# Fix bad imports
if 'undefined' in current_user.inventory:
new_cards = current_user.inventory.copy()
del new_cards['undefined']
current_user.inventory = new_cards
db.session.commit()
crypt = {}
library = {}
for k, v in current_user.inventory.items():
k = int(k)
if k > 200000:
crypt[k] = {'q': v}
elif k < 200000:
library[k] = {'q': v}
return jsonify({
"crypt": crypt,
"library": library,
})
except AttributeError:
return jsonify({'error': 'not logged'})
@app.route('/api/inventory/export', methods=['POST'])
def inventoryExportRoute():
try:
deck = {
'cards': current_user.inventory,
'author': current_user.public_name,
}
result = inventoryExport(deck, request.json['format'])
return jsonify(result)
except Exception:
pass
@app.route('/api/inventory/import', methods=['POST'])
def inventoryImportRoute():
if current_user.is_authenticated:
i = current_user.inventory
try:
new_cards = inventoryImportParse(request.json)
merged_cards = i.copy() if i else {}
for k, v in new_cards.items():
if k not in merged_cards:
merged_cards[k] = v
else:
merged_cards[k] = merged_cards[k] + v
current_user.inventory = merged_cards.copy()
db.session.commit()
return jsonify(new_cards)
except Exception:
return jsonify("error")
else:
return jsonify({'Not logged in.'})
@app.route('/api/inventory/delete', methods=['GET'])
def deleteInventory():
try:
if current_user.is_authenticated:
current_user.inventory = {}
db.session.commit()
return jsonify({'delete inventory': 'success'})
except AttributeError:
return jsonify({'error': 'not logged'})
@app.route('/api/inventory/add', methods=['POST'])
def inventoryAddCard():
if current_user.is_authenticated:
i = current_user.inventory
try:
new_cards = request.json
merged_cards = i.copy() if i else {}
for k, v in new_cards.items():
if k not in merged_cards:
merged_cards[k] = v
else:
merged_cards[k] = merged_cards[k] + v
current_user.inventory = merged_cards.copy()
db.session.commit()
return jsonify({'inventory card added': 'success'})
except Exception:
pass
else:
return jsonify({'Not logged in.'})
@app.route('/api/inventory/del', methods=['POST'])
def inventoryDelCard():
if current_user.is_authenticated:
i = current_user.inventory
try:
new_cards = request.json
merged_cards = i.copy() if i else {}
for k, v in new_cards.items():
if k in merged_cards:
if merged_cards[k] > v:
merged_cards[k] = merged_cards[k] - v
else:
del merged_cards[k]
current_user.inventory = merged_cards.copy()
db.session.commit()
return jsonify({'inventory card deleted': 'success'})
except Exception:
pass
else:
return jsonify({'Not logged in.'})
@app.route('/api/inventory/change', methods=['POST'])
def inventoryChangeCard():
if current_user.is_authenticated:
i = current_user.inventory
try:
new_cards = request.json
merged_cards = i.copy() if i else {}
for k, v in new_cards.items():
if v < 0:
del merged_cards[k]
else:
merged_cards[k] = v
current_user.inventory = merged_cards.copy()
db.session.commit()
return jsonify({'inventory card change': 'success'})
except Exception:
pass
else:
return jsonify({'Not logged in.'})
@app.route('/api/deck/<string:deckid>', methods=['GET'])
def showDeck(deckid):
if len(deckid) == 32:
decks = {}
deck = Deck.query.filter_by(deckid=deckid).first()
if not deck:
abort(400)
crypt = {}
library = {}
for k, v in deck.cards.items():
k = int(k)
if k > 200000:
crypt[k] = {'q': v}
elif k < 200000:
library[k] = {'q': v}
decks[deckid] = {
'name': deck.name,
'owner': deck.author.username,
'author': deck.author_public_name,
'description': deck.description,
'crypt': crypt,
'library': library,
'deckid': deck.deckid,
'timestamp': deck.timestamp,
}
return jsonify(decks)
else:
with open("twdDecksById.json", "r") as twdDecks_file:
twdDecks = json.load(twdDecks_file)
try:
deck = twdDecks[deckid]
comments = deck['description']
deck['description'] = 'Date: ' + deck['date'] + '\n'
deck['description'] += 'Players: ' + str(
deck['players']) + '\n'
deck['description'] += 'Event: ' + deck['event'] + '\n'
deck['description'] += 'Location: ' + deck['location'] + '\n'
if comments:
deck['description'] += '\n' + comments
deck['author'] = deck['player']
del (deck['player'])
del (deck['disciplines'])
del (deck['format'])
del (deck['event'])
del (deck['link'])
del (deck['location'])
del (deck['players'])
del (deck['timestamp'])
del (deck['score'])
del (deck['cardtypes_ratio'])
del (deck['libraryTotal'])
decks = {deckid: deck}
return jsonify(decks)
except KeyError:
abort(400)
@app.route('/api/deck/<string:deckid>', methods=['PUT'])
def updateDeck(deckid):
if current_user.is_authenticated:
d = Deck.query.filter_by(author=current_user, deckid=deckid).first()
d.timestamp = datetime.utcnow()
try:
if 'cardChange' in request.json:
new_cards = request.json['cardChange']
merged_cards = d.cards.copy()
for k, v in new_cards.items():
if v < 0:
del merged_cards[k]
else:
merged_cards[k] = v
d.cards = merged_cards.copy()
except Exception:
pass
try:
if 'cardAdd' in request.json:
new_cards = request.json['cardAdd']
merged_cards = d.cards.copy()
for k, v in new_cards.items():
if k not in merged_cards:
merged_cards[k] = v
d.cards = merged_cards.copy()
except Exception:
pass
try:
if 'name' in request.json:
d.name = request.json['name']
if d.master:
master = Deck.query.filter_by(author=current_user,
deckid=d.master).first()
master.name = request.json['name']
for i in master.branches:
j = Deck.query.filter_by(author=current_user,
deckid=i).first()
j.name = request.json['name']
elif d.branches:
for i in d.branches:
j = Deck.query.filter_by(author=current_user,
deckid=i).first()
j.name = request.json['name']
except Exception:
pass
try:
if 'description' in request.json:
d.description = request.json['description']
except Exception:
pass
try:
if 'author' in request.json:
d.author_public_name = request.json['author'] or ''
if d.master:
master = Deck.query.filter_by(author=current_user,
deckid=d.master).first()
master.author_public_name = request.json['author']
for i in master.branches:
j = Deck.query.filter_by(author=current_user,
deckid=i).first()
j.author_public_name = request.json['author']
elif d.branches:
for i in d.branches:
j = Deck.query.filter_by(author=current_user,
deckid=i).first()
j.author_public_name = request.json['author']
except Exception:
pass
try:
if 'branchName' in request.json:
d.branch_name = request.json['branchName'] or ''
except Exception:
pass
try:
if 'makeFlexible' in request.json:
if request.json['makeFlexible'] == 'all':
d.used_in_inventory = {}
d.inventory_type = 's'
else:
r = str(request.json['makeFlexible'])
used = d.used_in_inventory.copy()
used[r] = 's'
d.used_in_inventory = used
except Exception:
pass
try:
if 'makeFixed' in request.json:
if request.json['makeFixed'] == 'all':
d.used_in_inventory = {}
d.inventory_type = 'h'
else:
r = str(request.json['makeFixed'])
used = d.used_in_inventory.copy()
used[r] = 'h'
d.used_in_inventory = used
except Exception:
pass
try:
if 'makeClear' in request.json:
if request.json['makeClear'] == 'all':
d.used_in_inventory = {}
d.inventory_type = ''
else:
r = str(request.json['makeClear'])
used = d.used_in_inventory.copy()
del (used[r])
d.used_in_inventory = used
except Exception:
pass
try:
if 'setTags' in request.json:
new_tags = request.json['setTags']
d.tags = new_tags
except Exception:
pass
if d.master:
old_master = Deck.query.filter_by(author=current_user,
deckid=d.master).first()
branches = old_master.branches.copy()
branches.remove(d.deckid)
branches.append(old_master.deckid)
d.branches = branches
d.master = None
old_master.branches = None
for b in branches:
branch_deck = Deck.query.filter_by(author=current_user,
deckid=b).first()
branch_deck.master = d.deckid
db.session.commit()
return jsonify({'updated deck': d.deckid})
else:
return jsonify({'Not logged in.'})
@app.route('/api/deck/parse', methods=['POST'])
def parseDeck():
try:
crypt = {}
library = {}
cards = request.json['cards']
for k, v in cards.items():
k = int(k)
if k > 200000:
crypt[k] = {'c': get_crypt_by_id(k), 'q': v}
elif k < 200000:
library[k] = {'c': get_library_by_id(k), 'q': v}
decks = {}
decks['deckInUrl'] = {
'name': '',
'owner': '',
'author': '',
'description': '',
'deckid': '',
'crypt': crypt,
'library': library,
'timestamp': datetime.utcnow()
}
if 'name' in request.json:
decks['deckInUrl']['name'] = request.json['name']
if 'author' in request.json:
decks['deckInUrl']['author'] = request.json['author']
if 'description' in request.json:
decks['deckInUrl']['description'] = request.json['description']
return jsonify(decks)
except AttributeError:
return jsonify({'error': 'not logged'})
@app.route('/api/decks', methods=['GET'])
def listDecks():
try:
decks = {}
for deck in current_user.decks.all():
# Fix pre-inventory period decks
if not deck.used_in_inventory:
deck.used_in_inventory = {}
db.session.commit()
if not deck.inventory_type:
deck.inventory_type = ''
db.session.commit()
# Fix pre-tags decks
if not deck.tags:
deck.tags = []
db.session.commit()
# Fix bad imports
if 'undefined' in deck.cards:
new_cards = deck.cards.copy()
del new_cards['undefined']
deck.cards = new_cards
db.session.commit()
# Fix branches
if deck.master:
d = Deck.query.filter_by(author=current_user,
deckid=deck.master).first()
if not d:
print(deck.deckid, 'del branch without master')
db.session.delete(deck)
db.session.commit()
if deck.branches:
for b in deck.branches:
d = Deck.query.filter_by(author=current_user,
deckid=b).first()
if not d:
print(b, 'fix not-existing branch')
old_branches = deck.branches.copy()
old_branches.remove(b)
deck.branches = old_branches
db.session.commit()
if deck.branches:
for b in deck.branches:
d = Deck.query.filter_by(author=current_user,
deckid=b).first()
if d.master != deck.deckid:
print(b, 'fix bad master')
d.master = deck.deckid
db.session.commit()
# Fix cards
cards = {}
for k, v in deck.cards.items():
cards[str(k)] = v
deck.cards = cards
crypt = {}
library = {}
for k, v in deck.cards.items():
int_k = int(k)
if int_k > 200000:
crypt[int_k] = {'q': v}
if k in deck.used_in_inventory:
crypt[int_k]['i'] = deck.used_in_inventory[k]
elif int_k < 200000:
library[int_k] = {'q': v}
if k in deck.used_in_inventory:
library[int_k]['i'] = deck.used_in_inventory[k]
decks[deck.deckid] = {
'name': deck.name,
'branchName': deck.branch_name,
'owner': deck.author.username,
'author': deck.author_public_name,
'description': deck.description,
'crypt': crypt,
'library': library,
'deckid': deck.deckid,
'inventory_type': deck.inventory_type,
'timestamp': deck.timestamp,
'master': deck.master,
'branches': deck.branches,
'tags': deck.tags,
}
return jsonify(decks)
except AttributeError:
return jsonify({'error': 'not logged'})
@app.route('/api/decks/create', methods=['POST'])
def newDeck():
if current_user.is_authenticated:
try:
deckid = uuid.uuid4().hex
d = Deck(
deckid=deckid,
name=request.json['deckname'],
author_public_name=request.json['author']
if 'author' in request.json else current_user.public_name,
description=request.json['description']
if 'description' in request.json else '',
author=current_user,
inventory_type='',
tags=[],
used_in_inventory={},
cards=request.json['cards'] if 'cards' in request.json else {})
db.session.add(d)
db.session.commit()
return jsonify({
'new deck created': request.json['deckname'],
'deckid': deckid,
})
except Exception:
pass
else:
return jsonify({'Not logged in.'})
@app.route('/api/branch/create', methods=['POST'])
def createBranch():
if current_user.is_authenticated:
master = Deck.query.filter_by(author=current_user,
deckid=request.json['master']).first()
source = Deck.query.filter_by(author=current_user,
deckid=request.json['source']).first()
deckid = uuid.uuid4().hex
branch = Deck(deckid=deckid,
name=master.name,
branch_name=f"#{len(master.branches) + 1}"
if master.branches else "#1",
author_public_name=source.author_public_name,
description=source.description,
author=current_user,
inventory_type='',
tags=[],
master=master.deckid,
used_in_inventory={},
cards=source.cards)
branches = master.branches.copy() if master.branches else []
branches.append(deckid)
master.branches = branches
if not master.branch_name:
master.branch_name = 'Original'
db.session.add(branch)
db.session.commit()
return jsonify({
'master': master.deckid,
'source': source.deckid,
'deckid': deckid,
})
else:
return jsonify({'Not logged in.'})
@app.route('/api/branch/remove', methods=['POST'])
def removeBranch():
if current_user.is_authenticated:
try:
d = Deck.query.filter_by(author=current_user,
deckid=request.json['deckid']).first()
if d.master:
master = Deck.query.filter_by(author=current_user,
deckid=d.master).first()
branches = master.branches.copy()
branches.remove(d.deckid)
master.branches = branches
db.session.delete(d)
db.session.commit()
return jsonify({'branch removed': request.json['deckid']})
else:
j = Deck.query.filter_by(author=current_user,
deckid=d.branches[-1]).first()
branches = d.branches.copy()
branches.remove(j.deckid)
j.branches = branches
for i in branches:
k = Deck.query.filter_by(author=current_user,
deckid=i).first()
k.master = j.deckid
j.master = None
db.session.delete(d)
db.session.commit()
return jsonify({'branch removed': request.json['deckid']})
except Exception:
return jsonify({'error': 'idk'})
else:
return jsonify({'Not logged in.'})
@app.route('/api/decks/clone', methods=['POST'])
def cloneDeck():
if 'deck' in request.json:
deck = request.json['deck']
cards = {}
for i in deck['crypt']:
cards[i] = deck['crypt'][i]['q']
for i in deck['library']:
cards[i] = deck['library'][i]['q']
deckid = uuid.uuid4().hex
d = Deck(deckid=deckid,
name=f"{deck['name']} [by {deck['author']}]",
author_public_name=deck['author'],
description=deck['description'],
author=current_user,
inventory_type='',
tags=[],
used_in_inventory={},
cards=cards)
db.session.add(d)
db.session.commit()
return jsonify({
'deck cloned': request.json['deckname'],
'deckid': deckid
})
elif request.json['src'] == 'twd':
with open("twdDecksById.json", "r") as twdDecks_file:
twdDecks = json.load(twdDecks_file)
deck = twdDecks[request.json['target']]
cards = {}
for i in deck['crypt']:
cards[i] = deck['crypt'][i]['q']
for i in deck['library']:
cards[i] = deck['library'][i]['q']
description = 'Date: ' + deck['date'] + '\n'
description += 'Players: ' + str(deck['players']) + '\n'
description += 'Event: ' + deck['event'] + '\n'
description += 'Location: ' + deck['location'] + '\n'
if deck['description']:
description += '\n' + deck['description']
deckid = uuid.uuid4().hex
d = Deck(deckid=deckid,
name=f"{deck['name']} [by {deck['player']}]",
author_public_name=deck['player'],
description=description,
author=current_user,
inventory_type='',
tags=['twd'],
used_in_inventory={},
cards=cards)
db.session.add(d)
db.session.commit()
return jsonify({
'deck cloned': request.json['deckname'],
'deckid': deckid
})
elif request.json['src'] == 'precons':
set, precon = request.json['target'].split(':')
with open("preconDecks.json", "r") as precons_file:
precon_decks = json.load(precons_file)
deck = precon_decks[set][precon]
cards = {}
for i in deck:
cards[i] = deck[i]
deckid = uuid.uuid4().hex
d = Deck(deckid=deckid,
name=f"Preconstructed {set}:{precon}",
author_public_name='VTES Team',
description='',
author=current_user,
inventory_type='',
tags=['precon'],
used_in_inventory={},
cards=cards)
db.session.add(d)
db.session.commit()
return jsonify({
'deck cloned': request.json['deckname'],
'deckid': deckid
})
else:
targetDeck = Deck.query.filter_by(
deckid=request.json['target']).first()
deckid = uuid.uuid4().hex
d = Deck(deckid=deckid,
name=request.json['deckname'],
author_public_name=request.json['author'],
description='',
author=current_user,
inventory_type='',
tags=[],
used_in_inventory={},
cards=targetDeck.cards)
db.session.add(d)
db.session.commit()
return jsonify({
'deck cloned': request.json['deckname'],
'deckid': deckid,
})
@app.route('/api/decks/import', methods=['POST'])
def importDeck():
if current_user.is_authenticated:
try:
[name, author, description,
cards] = deckImport(request.json['deckText'])
if len(cards) > 0:
deckid = uuid.uuid4().hex
d = Deck(deckid=deckid,
name=name,
author_public_name=author,
description=description,
author=current_user,
inventory_type='',
tags=[],
used_in_inventory={},
cards=cards)
db.session.add(d)
db.session.commit()
return jsonify({'deckid': deckid})
return jsonify({'Cannot import this deck.'})
except Exception:
pass
else:
return jsonify({'Not logged in.'})
@app.route('/api/decks/export', methods=['POST'])
def deckExportRoute():
try:
if request.json['deckid'] == 'all' and current_user.is_authenticated:
decks = Deck.query.filter_by(author=current_user).all()
result = deckExportAll(decks, request.json['format'])
return jsonify(result)
elif request.json['src'] == 'twd':
deckid = request.json['deckid']
with open("twdDecksById.json", "r") as twdDecks_file:
twdDecks = json.load(twdDecks_file)
deck = twdDecks[deckid]
comments = deck['description']
deck['description'] = 'Date: ' + deck['date'] + '\n'
deck['description'] += 'Players: ' + str(
deck['players']) + '\n'
deck['description'] += 'Event: ' + deck['event'] + '\n'
deck['description'] += 'Location: ' + deck['location'] + '\n'
deck['cards'] = {}
for i in deck['crypt']:
deck['cards'][i] = deck['crypt'][i]['q']
for i in deck['library']:
deck['cards'][i] = deck['library'][i]['q']
if comments:
deck['description'] += '\n' + comments
deck['author'] = deck['player']
result = deckExport(deck, request.json['format'])
return jsonify(result)
elif request.json['src'] == 'precons':
set, precon = request.json['deckid'].split(':')
with open("preconDecks.json", "r") as precons_file:
precon_decks = json.load(precons_file)
d = precon_decks[set][precon]
deck = {
'cards': d,
'name': f"Preconstructed {set}:{precon}",
'author': 'VTES Publisher',
'description': 'Preconstructed deck',
}
result = deckExport(deck, request.json['format'])
return jsonify(result)
elif request.json['src'] == 'shared':
deck = request.json['deck']
result = deckExport(deck, request.json['format'])
return jsonify(result)
elif request.json['src'] == 'my':
d = Deck.query.filter_by(deckid=request.json['deckid']).first()
deck = {
'cards': d.cards,
'name': d.name,
'author': d.author.public_name,
'branch_name': d.branch_name,
'description': d.description,
}
result = deckExport(deck, request.json['format'])
return jsonify(result)
except Exception:
pass
@app.route('/api/decks/proxy', methods=['POST'])
def deckProxyRoute():
try:
return deckProxy(request.json['cards'])
except Exception:
pass
@app.route('/api/decks/remove', methods=['POST'])
def removeDeck():
if current_user.is_authenticated:
try:
d = Deck.query.filter_by(author=current_user,
deckid=request.json['deckid']).first()
if d.branches:
for i in d.branches:
j = Deck.query.filter_by(author=current_user,
deckid=i).first()
db.session.delete(j)
if d.master:
j = Deck.query.filter_by(author=current_user,
deckid=d.master).first()
db.session.delete(j)
db.session.delete(d)
db.session.commit()
return jsonify({'deck removed': request.json['deckid']})
except Exception:
return jsonify({'error': 'idk'})
else:
return jsonify({'Not logged in.'})
@app.route('/api/register', methods=['POST'])
def register():
if current_user.is_authenticated:
return jsonify({'already logged as:': current_user.username})
try:
user = User(
username=request.json['username'],
public_name=request.json['username'],
inventory={},
)
user.set_password(request.json['password'])
db.session.add(user)
db.session.commit()
login_user(user)
return jsonify({'registered as': user.username})
except Exception:
abort(400)
@app.route('/api/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
if current_user.is_authenticated:
return jsonify({
'username': current_user.username,
'email': current_user.email,
'public_name': current_user.public_name,
})
else:
return jsonify({'username': ''})
elif request.method == 'POST':
try:
user = User.query.filter_by(
username=request.json['username']).first()
if user is None or not user.check_password(
request.json['password']):
return jsonify({'error': 'invalid username or password'}), 401
login_user(user, remember=request.json['remember'])
return jsonify({'logged in as': current_user.username})
except KeyError:
pass
@app.route('/api/account', methods=['POST'])
def account():
if current_user.is_authenticated:
try:
if (request.json['publicName']):
current_user.public_name = request.json['publicName']
db.session.commit()
return jsonify('public name changed')
except Exception:
pass
try:
if (request.json['email']) and current_user.check_password(
request.json['password']):
current_user.email = request.json['email']
db.session.commit()
return jsonify('email changed')
except Exception:
pass
try:
if (request.json['newPassword']) and current_user.check_password(
request.json['password']):
current_user.set_password(request.json['newPassword'])
db.session.commit()
return jsonify('password changed')
except Exception:
pass
else:
return jsonify({'error': 'Not logged in'})
@app.route('/api/account/remove', methods=['POST'])
def removeAccount():
if current_user.is_authenticated and current_user.check_password(
request.json['password']):
try:
db.session.delete(current_user)
db.session.commit()
return jsonify({'account removed': current_user.username})
except Exception:
pass
else:
return jsonify({'Not logged in or wrong password.'})
@app.route('/api/logout')
def logout():
try:
user = current_user.username
logout_user()
return jsonify({'logged out from': user})
except AttributeError:
return jsonify({'error': 'not logged'})
@app.route('/api/search/twd', methods=['POST'])
def searchTwdRoute():
result = searchTwd(request)
if 'matchInventory' in request.json:
if result != 400:
result = matchInventory(request.json['matchInventory'],
current_user.inventory, result)
else:
result = matchInventory(request.json['matchInventory'],
current_user.inventory)
if result != 400:
return jsonify(result)
else:
abort(400)
@app.route('/api/twd/locations', methods=['GET'])
def getLocations():
with open("twdLocations.json", "r") as twdLocations_file:
return jsonify(json.load(twdLocations_file))
@app.route('/api/twd/players', methods=['GET'])
def getPlayers():
with open("twdPlayers.json", "r") as twdPlayers_file:
return jsonify(json.load(twdPlayers_file))
@app.route('/api/twd/new/<int:quantity>', methods=['GET'])
def getNewTwd(quantity):
with open("twdDecks.json", "r") as twd_file:
twda = json.load(twd_file)
decks = []
for i in range(quantity):
deck = sanitizeTwd(twda[i])
decks.append(deck)
return jsonify(decks)
@app.route('/api/twd/random/<int:quantity>', methods=['GET'])
def getRandomTwd(quantity):
with open("twdDecks.json", "r") as twd_file:
twda = json.load(twd_file)
decks = []
max_id = len(twda) - 1
counter = 0
while counter < quantity:
counter += 1
deck = twda[round(random() * max_id)]
decks.append(sanitizeTwd(deck))
return jsonify(decks)
@app.route('/api/search/crypt', methods=['POST'])
def searchCryptRoute():
result = searchCrypt(request)
if result != 400:
return jsonify(result)
else:
abort(400)
@app.route('/api/search/library', methods=['POST'])
def searchLibraryRoute():
result = searchLibrary(request)
if result != 400:
return jsonify(result)
else:
abort(400)
@app.route('/api/search/quick', methods=['POST'])
def searchQuickRoute():
result = []
crypt = searchCrypt(request)
if crypt != 400:
result += crypt
library = searchLibrary(request)
if library != 400:
result += library
if result:
return jsonify(result)
else:
abort(400)
|
import math
import numpy as np
from helpers.agent8 import forward_execution, parent_to_child_dict
from src.Agent import Agent
class Agent8(Agent):
def __init__(self):
super().__init__()
# Override execution method of Agent class
def execution(self, full_maze: np.array, target_position=None):
percent_of_cells_to_examine = 15 # what % of cells in path from A* to examine with highest probabilities
# get path from start(current_position) to goal(current_estimated_goal)
# print("Parents:", self.parents)
temp_child = parent_to_child_dict(self.parents, self.current_estimated_goal)
# print("Child:", temp_child)
# number of cells to examine from the whole path by A*
num_cells_to_examine = math.ceil((percent_of_cells_to_examine * len(temp_child)) / 100)
temp_current_pos = self.current_position
# list that contains the cells in the path but in the descending order of their probability of containing target
path_list = list()
# if len(temp_child) == 1 and temp_child == {(0, 0): (0, 0)}: # Condition for start element only
# # print("here")
# path_list.append([self.maze[temp_current_pos[0]][temp_current_pos[1]].probability_of_containing_target *
# (1 - self.maze[temp_current_pos[0]][temp_current_pos[1]].false_negative_rate),
# temp_current_pos])
"""loop to store the prob of containing target with the indices in list of list from start
to end of path returned by A"""
while temp_current_pos != temp_child[temp_current_pos]:
# print("here") (1 - self.maze[temp_current_pos[0]][temp_current_pos[1]].false_negative_rate)
path_list.append([self.maze[temp_current_pos[0]][temp_current_pos[1]].probability_of_containing_target,
temp_current_pos])
temp_current_pos = temp_child[temp_current_pos]
path_list.append([self.maze[temp_current_pos[0]][temp_current_pos[1]].probability_of_containing_target *
(1 - self.maze[temp_current_pos[0]][temp_current_pos[1]].false_negative_rate),
temp_current_pos])
# Sort the cells w.r.t to their prob of containing target in descending order
path_list.sort(reverse=True)
# set containing cells to examine in the forward execution
cells_to_examine = set()
count = 0 # keeps track of the index in the path_list
# print("Dict len:", len(temp_child))
print('Target position', target_position)
print('Current path', path_list)
for element in path_list:
print(self.maze[element[1][0]][element[1][1]].is_blocked, full_maze[element[1][0]][element[1][1]], end=" ")
print()
# print("Cells to examine:", num_cells_to_examine)
print('Children', temp_child)
# loop to store num_cells_to_examine in set which will be used in forward execution
while num_cells_to_examine > 0:
cells_to_examine.add(path_list[count][1])
count += 1
num_cells_to_examine -= 1
self.num_astar_calls += 1
self.num_bumps += 1
# make changes in forward_execution()
current_path, num_backtracks, truth_value, examinations = forward_execution(self.maze, full_maze,
self.current_position,
self.current_estimated_goal,
self.parents, cells_to_examine,
target_position,
self.global_threshold)[:4]
self.current_position = current_path[-1]
self.final_paths.append(current_path)
self.num_examinations += examinations
self.num_backtracks += num_backtracks
return truth_value
|
# Implement a function that takes as input three variables, and returns the largest of the three. Do this without
# using the Python max() function!
#
# The goal of this exercise is to think about some internals that Python normally takes care of for us. All you need
# is some variables and if statements!
def largest(a, b, c):
if a > b and a > c:
print(f"Maximum Value is {a}")
elif b > c and b > a:
print(f"Maximum Value is {b}")
elif c > b and c > a:
print(f"Maximum Value is {c}")
largest(31, 9, 17)
|
from threading import Thread
import time
from typing import Tuple
from room import Room
from structsock import StructuredSocket, PeerDisconnect
from service import format_config, log, Signal, hexdump
from result import Status, StatusCode
import elite, msgpack, queue
# global configuration
config = {
"MinBetween": 1,
"MaxMessageLength": 0xFF,
"BacklogSize": 5
}
class Session:
'Represents a session to communicate with the client.'
def __init__(self, client: StructuredSocket, server: 'Server'):
'Initializes with a client and a server.'
self._user = None
self._scheme = elite.getscheme()
self._client = client
self._server = server
self._room = None
self._lastTimestamp = 0
def lifecycle(self, multithread: bool=False) -> None:
'Runs the lifecycle, either in this thread or in another.'
if multithread:
t = Thread(name='Session-{}:{}'.format(*self._client.getpeername()), target=self.lifecycle)
t.start()
return
try:
self._handshake()
self._main()
except PeerDisconnect: pass
log("会话结束。")
if self._room:
self._room.userLeft(self._user)
self._room.metaMessage('{} 离开了房间。'.format(self._user))
self._room = None
self._server.sessionExit(self)
def _send(self, status: Status, encrypt: bool=True):
'Sends the specific status.'
data = status.pack()
if encrypt:
data = self._scheme.encrypt(data)
self._client.send(data)
def _handshake(self) -> None:
'Handshakes with the client.'
while True:
key = self._client.recv()
try:
self._scheme.importBinaryKey(key)
self._send(Status.ok(self._scheme.exportBinaryKey()), False)
break
except elite.secret.CurveKindMismatch:
self._send(
Status(StatusCode.SC_KEY_INVALID), False
)
log("密钥协商完毕。共享密钥如下:\n{}".format(hexdump(self._scheme.secret().hex())))
def _main(self) -> None:
'The main loop of the session.'
while True:
data = self._client.recv()
try:
data = self._scheme.decrypt(data)
except elite.cipher.AuthorizationCodeInvalid:
self._send(
Status(StatusCode.SC_UNAUTHORIZED)
)
continue
data = msgpack.unpackb(data)
op = data['type']
if op == 'login':
self._user = data['user']
self._send(Status.ok())
elif op == 'join':
if self._user is None:
self._send(Status(StatusCode.SC_NEEDS_LOGIN))
else:
self._room = self._server.getRoom(data['room'])
if self._room.userJoined(self._user):
self._room.onMessage.register(self._message)
self._send(Status.ok())
self._room.metaMessage('{} 加入了房间。'.format(self._user))
else:
self._send(Status(StatusCode.SC_DUPLICATE_USER))
elif op == 'send':
if self._room is None:
self._send(Status(StatusCode.SC_WANDER_ROOM))
elif time.time() - self._lastTimestamp <= config.get("MinBetween", 1):
self._send(Status(StatusCode.SC_TOO_FREQUENT))
else:
message = data['message']
signature = data['signature']
if not self._scheme.verify(message.encode(), signature):
self._send(Status(StatusCode.SC_UNAUTHORIZED))
elif len(message) >= config.get("MaxMessageLength", 0xFF):
self._send(Status(StatusCode.SC_MSG_TOO_LONG))
else:
self._room.messageReceived(self._user, message)
self._lastTimestamp = time.time()
self._send(Status.ok())
elif op == 'leave':
if self._room is None:
self._send(Status(StatusCode.SC_WANDER_ROOM))
else:
self._room.userLeft(self._user)
self._room.onMessage.deregister(self._message)
self._room.metaMessage('{} 离开了房间。'.format(self._user))
self._room = None
self._send(Status.ok())
elif op == 'quit':
if self._room is not None:
self._room.userLeft(self._user)
self._room.onMessage.deregister(self._message)
self._room.metaMessage('{} 离开了房间。'.format(self._user))
self._room = None
self._client.close()
break
else:
self._send(Status(StatusCode.SC_INVALID_CMD))
def _message(self, user: str, message: str) -> None:
'Called when a new message was sent.'
self._send(Status.message((user, message)))
class Server:
'Represents the server to manage the connections.'
def __init__(self, port: int):
'Initializes the server with specific port.'
self._sock = StructuredSocket()
self._sock.bind(('0.0.0.0', port))
self._port = port
self._sessions = set()
self._rooms = {}
def sessionExit(self, session: Session):
'Called when a session exits.'
self._sessions.remove(session)
def serve(self, multithread: bool=False) -> None:
'Launches the server.'
if multithread:
t = Thread(name='ServerDaemon', target=self.serve, daemon=True)
t.start()
return
log('使用配置: {}'.format(format_config(config)))
log('正在监听 0.0.0.0 端口 {}'.format(self._port))
self._sock.listen(config.get('BacklogSize', 5))
while True:
client, addr = self._sock.accept()
log('客户端 {}:{} 已连接'.format(*addr))
session = Session(client, self)
self._sessions.add(session)
session.lifecycle(True)
def getRoom(self, name: str) -> Room:
'Gets a room with specific name.'
try:
return self._rooms[name]
except KeyError:
room = Room(name)
self._rooms[name] = room
return room
class Client:
'Represents a client.'
def __init__(self, host: str, port: int):
'Initializes with specific host and port.'
self._address = (host, port)
self._scheme = elite.getscheme()
self._sock = StructuredSocket()
self.onMessage = Signal()
self.onFailure = Signal()
self._queue = queue.Queue()
@property
def address(self) -> Tuple[str, int]:
'Gets the address of the peer.'
return self._address
@property
def scheme(self) -> elite.scheme.ECCScheme:
'Gets the security scheme.'
return self._scheme
def connect(self) -> None:
self._sock.connect(self._address)
while True:
self._sock.send(self._scheme.exportBinaryKey())
stat = Status.unpack(self._sock.recv())
if stat.success:
self._scheme.importBinaryKey(stat.data)
break
self._sock.setblocking(False)
def enqueue(self, operation: dict) -> None:
'Pushes an operation to the message queue.'
self._queue.put(operation)
def enterRoom(self, room: str) -> None:
'Enters a room with specific name.'
self.enqueue({
'type': 'join',
'room': room
})
def leaveRoom(self) -> None:
'Leaves the current room.'
self.enqueue({
'type': 'leave'
})
def login(self, user: str) -> None:
'Logs in as specific user.'
self.enqueue({
'type': 'login',
'user': user
})
def compose(self, message: str) -> None:
'Composes a message and send it.'
signature = self._scheme.sign(message.encode())
self.enqueue({
'type': 'send',
'message': message,
'signature': signature
})
def close(self) -> None:
'Closes this client.'
self.enqueue({
'type': 'quit'
})
def mainLoop(self, multithread: bool=False) -> None:
'Runs the main loop.'
if multithread:
t = Thread(name='ClientMainLoop', target=self.mainLoop, daemon=True)
t.start()
return
try:
while True:
try:
stat = Status.unpack(
self._scheme.decrypt(self._sock.recv())
)
if stat.code == StatusCode.SC_NEW_MESSAGE:
self.onMessage(*stat.data)
elif not stat.success:
self.onFailure(stat.code)
except BlockingIOError:
pass
try:
operation = self._queue.get_nowait()
data = msgpack.packb(operation)
data = self._scheme.encrypt(data)
self._sock.send(data)
except queue.Empty:
pass
except PeerDisconnect:
pass
finally:
self._sock.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict, deque
def is_value(x):
return x.isdigit() or (x[0] == '-' and x[1:].isdigit())
class Program(object):
def __init__(self, instructions, pid):
self.registers = defaultdict(lambda: 0)
self.registers['p'] = pid
self.last_played_freq = None
self.n_sent = 0
self.received = deque()
self.instructions = instructions
self.duet_bind = None
self.is_waiting = False
def bind(self, other):
self.duet_bind = other
self.duet_bind.duet_bind = self
def is_deadlock(self):
if self.is_waiting and not self.received and \
self.duet_bind.is_waiting and not self.duet_bind.received:
return True
return False
def __getitem__(self, item):
return int(item) if is_value(item) else self.registers[item]
def run(self):
n = 0
while 0 <= n < len(self.instructions):
parts = self.instructions[n].split(' ')
if parts[0] == 'snd':
self.n_sent += 1
value = self[parts[1]]
self.last_played_freq = value
if self.duet_bind:
self.duet_bind.received.appendleft(value)
elif parts[0] == 'set':
self.registers[parts[1]] = self[parts[2]]
elif parts[0] == 'add':
self.registers[parts[1]] += self[parts[2]]
elif parts[0] == 'mul':
self.registers[parts[1]] *= self[parts[2]]
elif parts[0] == 'mod':
self.registers[parts[1]] %= self[parts[2]]
elif parts[0] == 'rcv':
if self.duet_bind:
if self.received:
self.registers[parts[1]] = self.received.pop()
else:
self.is_waiting = True
yield
if self.received:
self.is_waiting = False
self.registers[parts[1]] = self.received.pop()
else:
# Sleep one last time. Deadlock has occurred.
yield
else:
if self.registers[parts[1]] != 0:
yield self.last_played_freq
elif parts[0] == 'jgz':
condition = self[parts[1]]
if condition > 0:
n += self[parts[2]]
continue
else:
raise ValueError(str(parts))
n += 1
def solve_1(instructions):
p = Program(instructions, 0)
out = next(p.run())
return out
def solve_2(instructions):
p0 = Program(instructions, 0)
p1 = Program(instructions, 1)
p0.bind(p1)
p0_run = p0.run()
p1_run = p1.run()
while not p0.is_deadlock():
next(p0_run)
next(p1_run)
return p1.n_sent
def main():
from _aocutils import ensure_data
ensure_data(18)
with open('input_18.txt', 'r') as f:
data = f.read().strip()
print("Part 1: {0}".format(solve_1(data.splitlines())))
print("Part 2: {0}".format(solve_2(data.splitlines())))
if __name__ == '__main__':
main()
|
# multivariate lin regresion of heights vs weights
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm, uniform, multivariate_normal
from scipy.interpolate import griddata
import pymc3 as pm
d = pd.read_csv('../../rethinking/data/WaffleDivorce.csv', sep=';', header=0)
plt.plot(d.Marriage, d.Divorce, 'C0o')
plt.xlabel('marriage')
plt.ylabel('divorce')
plt.show()
d['Marriage_s'] = (d.Marriage - np.mean(d.Marriage)) / np.std(d.Marriage)
with pm.Model() as model:
sigma = pm.Uniform(name='sigma', lower=0, upper=10)
bA = pm.Normal(name='bA', mu=0, sd=1)
a = pm.Normal(name='a', mu=10, sd=10)
mu = pm.Deterministic('mu', a + bA * d.Marriage_s)
divorce = pm.Normal(name='divorce', mu=mu, sd=sigma, observed=d.Divorce)
trace_model = pm.sample(1000, tune=1000)
pm.traceplot(trace_model)
plt.show()
mu_mean = trace_model['mu']
mu_hpd = pm.hpd(mu_mean, alpha=0.11)
plt.plot(d.Marriage_s, d.Divorce, 'C0o')
plt.plot(d.Marriage_s, mu_mean.mean(0), 'C2')
idx = np.argsort(d.Marriage_s)
plt.fill_between(d.Marriage_s[idx], mu_hpd[:, 0][idx], mu_hpd[:, 1][idx], color='C2', alpha=0.25)
# plt.fill_between(d.Marriage_s, mu_hpd[:, 0], mu_hpd[:, 1], color='C2', alpha=0.25)
plt.xlabel('deviation of standard marriage rate')
plt.ylabel('divorce')
plt.show()
samples = pm.sample_ppc(trace_model, 1000, model)
mu_mean = samples['divorce']
mu_hpd = pm.hpd(mu_mean, alpha=0.11)
plt.plot(d.Marriage_s, d.Divorce, 'C0o')
idx = np.argsort(d.Marriage_s)
plt.plot(d.Marriage_s[idx], mu_mean.mean(0)[idx], 'C2')
# plt.plot(d.Marriage_s, mu_mean.mean(0), 'C2')
plt.fill_between(d.Marriage_s[idx], mu_hpd[:, 0][idx], mu_hpd[:, 1][idx], color='C2', alpha=0.25)
# plt.fill_between(d.Marriage_s, mu_hpd[:, 0], mu_hpd[:, 1], color='C2', alpha=0.25)
plt.xlabel('deviation of standard marriage rate')
plt.ylabel('divorce')
plt.show()
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.forms.forms import BoundField
from django.forms.models import formset_factory
from django.template import Context
try:
from django.template.loader import get_template_from_string
except ImportError:
from django.template import Engine
def get_template_from_string(s):
return Engine().from_string(s)
from .base import CrispyTestCase
from .forms import TestForm
from crispy_forms.templatetags.crispy_forms_field import crispy_addon
class TestBasicFunctionalityTags(CrispyTestCase):
def test_as_crispy_errors_form_without_non_field_errors(self):
template = get_template_from_string(u"""
{% load crispy_forms_tags %}
{{ form|as_crispy_errors }}
""")
form = TestForm({'password1': "god", 'password2': "god"})
form.is_valid()
c = Context({'form': form})
html = template.render(c)
self.assertFalse("errorMsg" in html or "alert" in html)
def test_as_crispy_errors_form_with_non_field_errors(self):
template = get_template_from_string(u"""
{% load crispy_forms_tags %}
{{ form|as_crispy_errors }}
""")
form = TestForm({'password1': "god", 'password2': "wargame"})
form.is_valid()
c = Context({'form': form})
html = template.render(c)
self.assertTrue("errorMsg" in html or "alert" in html)
self.assertTrue("<li>Passwords dont match</li>" in html)
self.assertFalse("<h3>" in html)
def test_crispy_filter_with_form(self):
template = get_template_from_string(u"""
{% load crispy_forms_tags %}
{{ form|crispy }}
""")
c = Context({'form': TestForm()})
html = template.render(c)
self.assertTrue("<td>" not in html)
self.assertTrue("id_is_company" in html)
self.assertEqual(html.count('<label'), 7)
def test_crispy_filter_with_formset(self):
template = get_template_from_string(u"""
{% load crispy_forms_tags %}
{{ testFormset|crispy }}
""")
TestFormset = formset_factory(TestForm, extra=4)
testFormset = TestFormset()
c = Context({'testFormset': testFormset})
html = template.render(c)
self.assertEqual(html.count('<form'), 0)
# Check formset management form
self.assertTrue('form-TOTAL_FORMS' in html)
self.assertTrue('form-INITIAL_FORMS' in html)
self.assertTrue('form-MAX_NUM_FORMS' in html)
def test_classes_filter(self):
template = get_template_from_string(u"""
{% load crispy_forms_field %}
{{ testField|classes }}
""")
test_form = TestForm()
test_form.fields['email'].widget.attrs.update({'class': 'email-fields'})
c = Context({'testField': test_form.fields['email']})
html = template.render(c)
self.assertTrue('email-fields' in html)
def test_crispy_field_and_class_converters(self):
if hasattr(settings, 'CRISPY_CLASS_CONVERTERS'):
template = get_template_from_string(u"""
{% load crispy_forms_field %}
{% crispy_field testField 'class' 'error' %}
""")
test_form = TestForm()
field_instance = test_form.fields['email']
bound_field = BoundField(test_form, field_instance, 'email')
c = Context({'testField': bound_field})
html = template.render(c)
self.assertTrue('error' in html)
self.assertTrue('inputtext' in html)
def test_crispy_addon(self):
test_form = TestForm()
field_instance = test_form.fields['email']
bound_field = BoundField(test_form, field_instance, 'email')
if self.current_template_pack == 'bootstrap':
# prepend tests
self.assertIn("input-prepend", crispy_addon(bound_field, prepend="Work"))
self.assertNotIn("input-append", crispy_addon(bound_field, prepend="Work"))
# append tests
self.assertNotIn("input-prepend", crispy_addon(bound_field, append="Primary"))
self.assertIn("input-append", crispy_addon(bound_field, append="Secondary"))
# prepend and append tests
self.assertIn("input-append", crispy_addon(bound_field, prepend="Work", append="Primary"))
self.assertIn("input-prepend", crispy_addon(bound_field, prepend="Work", append="Secondary"))
elif self.current_template_pack == 'bootsrap3':
self.assertIn("input-group-addon", crispy_addon(bound_field, prepend="Work", append="Primary"))
self.assertIn("input-group-addon", crispy_addon(bound_field, prepend="Work", append="Secondary"))
# errors
with self.assertRaises(TypeError):
crispy_addon()
crispy_addon(bound_field)
|
from builtins import range
import pandas as pd # type: ignore
from langdetect import detect
from datamart_isi.profilers.helpers.feature_compute_hih import is_Decimal_Number
import string
import numpy as np # type: ignore
import re
# till now, this file totally compute 16 types of features
def compute_missing_space(column, feature, feature_list):
"""
NOTE: this function may change the input column. It will trim all the leading and trailing whitespace.
if a cell is empty after trimming(which means it only contains whitespaces),
it will be set as NaN (missing value), and both leading_space and trailing_space will += 1.
(1). trim and count the leading space and trailing space if applicable.
note that more than one leading(trailing) spaces in a cell will still be counted as 1.
(2). compute the number of missing value for a given series (column); store the result into (feature)
"""
# if one of them is specified, just compute all; since does not increase lot computations
if (("number_of_values_with_leading_spaces" in feature_list) or
("ratio_of_values_with_leading_spaces" in feature_list) or
("number_of_values_with_trailing_spaces" in feature_list) or
("ratio_of_values_with_trailing_spaces" in feature_list)):
leading_space = 0
trailing_space = 0
for id, cell in column.iteritems():
if (pd.isnull(cell)):
continue
change = False
trim_leading_cell = re.sub(r"^\s+", "", cell)
if (trim_leading_cell != cell):
leading_space += 1
change = True
trim_trailing_cell = re.sub(r"\s+$", "", trim_leading_cell)
if ((trim_trailing_cell != trim_leading_cell) or len(trim_trailing_cell) == 0):
trailing_space += 1
change = True
# change the origin value in data
if change:
if (len(trim_trailing_cell) == 0):
column[id] = np.nan
else:
column[id] = trim_trailing_cell
feature["number_of_values_with_leading_spaces"] = leading_space
feature["ratio_of_values_with_leading_spaces"] = leading_space / column.size
feature["number_of_values_with_trailing_spaces"] = trailing_space
feature["ratio_of_values_with_trailing_spaces"] = trailing_space / column.size
def compute_length_distinct(column, feature, delimiter, feature_list):
"""
two tasks because of some overlaping computation:
(1). compute the mean and std of length for each cell, in a given series (column);
mean and std precision: 5 after point
missing value (NaN): treated as does not exist
(2). also compute the distinct value and token:
number: number of distinct value or tokens, ignore the NaN
ratio: number/num_total, ignore all NaN
"""
# (1)
column = column.dropna() # get rid of all missing value
if (column.size == 0): # if the column is empty, do nothing
return
# 1. for character
# lenth_for_all = column.apply(len)
# feature["string_length_mean"] = lenth_for_all.mean()
# feature["string_length_std"] = lenth_for_all.std()
# (2)
if (("number_of_distinct_values" in feature_list) or
("ratio_of_distinct_values" in feature_list)):
feature["number_of_distinct_values"] = column.nunique()
feature["ratio_of_distinct_values"] = feature["number_of_distinct_values"] / float(column.size)
if (("number_of_distinct_tokens" in feature_list) or
("ratio_of_distinct_tokens" in feature_list)):
tokenlized = pd.Series([token for lst in column.str.split().dropna() for token in lst]) # tokenlized Series
lenth_for_token = tokenlized.apply(len)
# feature["token_count_mean"] = lenth_for_token.mean()
# feature["token_count_std"] = lenth_for_token.std()
feature["number_of_distinct_tokens"] = tokenlized.nunique()
feature["ratio_of_distinct_tokens"] = feature["number_of_distinct_tokens"] / float(tokenlized.size)
def compute_lang(column, feature):
"""
compute which language(s) it use for a given series (column); store the result into (feature).
not apply for numbers
PROBLEMS:
1. not accurate when string contains digits
2. not accurate when string is too short
maybe need to consider the special cases for the above conditions
"""
column = column.dropna() # ignore all missing value
if (column.size == 0): # if the column is empty, do nothing
return
feature["natural_language_of_feature"] = list()
language_count = {}
for cell in column:
if cell.isdigit() or is_Decimal_Number(cell):
continue
else:
# detecting language
try:
language = detect(cell)
if language in language_count:
language_count[language] += 1
else:
language_count[language] = 1
except Exception as e:
print("there is something may not be any language nor number: {}".format(cell))
pass
languages_ordered = sorted(language_count, key=language_count.get, reverse=True)
for lang in languages_ordered:
lang_obj = {}
lang_obj['name'] = lang
lang_obj['count'] = language_count[lang]
feature["natural_language_of_feature"].append(lang_obj)
def compute_filename(column, feature):
"""
compute number of cell whose content might be a filename
"""
column = column.dropna() # ignore all missing value
filename_pattern = r"^\w+\.[a-z]{1,5}"
column.str.match(filename_pattern)
num_filename = column.str.match(filename_pattern).sum()
feature["num_filename"] = num_filename
def compute_punctuation(column, feature, weight_outlier):
"""
compute the statistical values related to punctuations, for details, see the format section of README.
not apply for numbers (eg: for number 1.23, "." does not count as a punctuation)
weight_outlier: = number_of_sigma in function "helper_outlier_calcu"
"""
column = column.dropna() # get rid of all missing value
if (column.size == 0): # if the column is empty, do nothing
return
number_of_chars = sum(column.apply(len)) # number of all chars in column
num_chars_cell = np.zeros(column.size) # number of chars for each cell
puncs_cell = np.zeros([column.size, len(string.punctuation)],
dtype=int) # (number_of_cell * number_of_puncs) sized array
# step 1: pre-calculations
cell_id = -1
for cell in column:
cell_id += 1
num_chars_cell[cell_id] = len(cell)
# only counts puncs for non-number cell
if cell.isdigit() or is_Decimal_Number(cell):
continue
else:
counts_cell_punc = np.asarray(list(cell.count(c) for c in string.punctuation))
puncs_cell[cell_id] = counts_cell_punc
counts_column_punc = puncs_cell.sum(axis=0) # number of possible puncs in this column
cell_density_array = puncs_cell / num_chars_cell.reshape([column.size, 1])
puncs_density_average = cell_density_array.sum(axis=0) / column.size
# step 2: extract from pre-calculated data
# only create this feature when punctuations exist
if (sum(counts_column_punc) > 0):
feature["most_common_punctuations"] = list() # list of dict
# extract the counts to feature, for each punctuation
for i in range(len(string.punctuation)):
if (counts_column_punc[i] == 0): # if no this punctuation occur in the whole column, ignore
continue
else:
punc_obj = {}
punc_obj["punctuation"] = string.punctuation[i]
punc_obj["count"] = counts_column_punc[i]
punc_obj["ratio"] = counts_column_punc[i] / float(number_of_chars)
punc_obj["punctuation_density_aggregate"] = {"mean": puncs_density_average[i]}
# calculate outlier
outlier_array = helper_outlier_calcu(cell_density_array[:, i], weight_outlier)
# only one element in outlier
punc_obj["punctuation_density_outliers"] = [{"n": weight_outlier,
"count": sum(outlier_array)}]
feature["most_common_punctuations"].append(punc_obj)
# step 3: sort
feature["most_common_punctuations"] = sorted(feature["most_common_punctuations"], key=lambda k: k['count'],
reverse=True)
def helper_outlier_calcu(array, number_of_sigma):
"""
input: array is a 1D numpy array, number_of_sigma is a integer.
output: boolean array, size same with input array; true -> is outlier, false -> not outlier
outlier def:
the values that not within mean +- (number_of_sigma * sigma) of the statics of the whole list
"""
mean = np.mean(array)
std = np.std(array)
upper_bound = mean + number_of_sigma * std
lower_bound = mean - number_of_sigma * std
outlier = (array > upper_bound) + (array < lower_bound)
return outlier
|
from nose.tools import *
import pygraphviz as pgv
|
import textwrap
from typing import ForwardRef, List, Optional
import pytest
import databases
import ormar
import sqlalchemy
import strawberry
database = databases.Database("sqlite:///db.sqlite")
metadata = sqlalchemy.MetaData()
MasterRef = ForwardRef("Hero")
class City(ormar.Model):
class Meta:
database = database
metadata = metadata
id: Optional[int] = ormar.Integer(primary_key=True, default=None)
name: str = ormar.String(max_length=255)
population: int = ormar.Integer()
class Manager(ormar.Model):
class Meta:
database = database
metadata = metadata
id: Optional[int] = ormar.Integer(primary_key=True, default=None)
name: str = ormar.String(max_length=255)
class Team(ormar.Model):
class Meta:
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(index=True, max_length=255)
headquarters: Optional[str] = ormar.String(nullable=True, max_length=255)
manager: Manager = ormar.ForeignKey(
Manager, nullable=False, related_name="managed_teams"
)
referrers: List[Manager] = ormar.ManyToMany(Manager, related_name="referring_teams")
class Hero(ormar.Model):
class Meta:
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(index=True, max_length=255)
secret_name: str
age: Optional[int] = ormar.Integer(default=None, index=True, nullable=True)
master: Optional[MasterRef] = ormar.ForeignKey(
MasterRef, nullable=True, default=None
)
team: Optional[int] = ormar.ForeignKey(Team, nullable=True, related_name="heroes")
@pytest.fixture
def clear_types():
for model in (Team, Hero, Manager, City):
if hasattr(model, "_strawberry_type"):
delattr(model, "_strawberry_type")
def test_all_fields(clear_types):
@strawberry.experimental.pydantic.type(City, all_fields=True)
class CityType:
pass
@strawberry.type
class Query:
@strawberry.field
def city(self) -> CityType:
return CityType(id=1, name="Gotham", population=100000)
schema = strawberry.Schema(query=Query)
expected_schema = """
type CityType {
name: String!
population: Int!
id: Int
}
type Query {
city: CityType!
}
"""
assert str(schema) == textwrap.dedent(expected_schema).strip()
query = "{ city { name } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["city"]["name"] == "Gotham"
def test_basic_type_field_list(clear_types):
@strawberry.experimental.pydantic.type(Team, fields=["name", "headquarters"])
class TeamType:
pass
@strawberry.type
class Query:
@strawberry.field
def team(self) -> TeamType:
return TeamType(name="hobbits", headquarters="The Shire")
schema = strawberry.Schema(query=Query)
expected_schema = """
type Query {
team: TeamType!
}
type TeamType {
name: String!
headquarters: String
}
"""
assert str(schema) == textwrap.dedent(expected_schema).strip()
query = "{ team { name } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["team"]["name"] == "hobbits"
def test_one_to_one_optional(clear_types):
@strawberry.experimental.pydantic.type(Team, fields=["name"])
class TeamType:
pass
@strawberry.experimental.pydantic.type(Hero, fields=["team"])
class HeroType:
pass
@strawberry.type
class Query:
@strawberry.field
def hero(self) -> HeroType:
return HeroType(team=TeamType(name="Skii"))
schema = strawberry.Schema(query=Query)
expected_schema = """
type HeroType {
team: TeamType
}
type Query {
hero: HeroType!
}
type TeamType {
name: String!
}
"""
assert str(schema) == textwrap.dedent(expected_schema).strip()
query = "{ hero { team { name } } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["hero"]["team"]["name"] == "Skii"
def test_one_to_one_required(clear_types):
@strawberry.experimental.pydantic.type(Manager, fields=["name"])
class ManagerType:
pass
@strawberry.experimental.pydantic.type(Team, fields=["manager"])
class TeamType:
pass
@strawberry.type
class Query:
@strawberry.field
def team(self) -> TeamType:
return TeamType(manager=ManagerType(name="Skii"))
schema = strawberry.Schema(query=Query)
expected_schema = """
type ManagerType {
name: String!
}
type Query {
team: TeamType!
}
type TeamType {
manager: ManagerType!
}
"""
assert str(schema) == textwrap.dedent(expected_schema).strip()
query = "{ team { manager { name } } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["team"]["manager"]["name"] == "Skii"
def test_nested_type_unordered(clear_types):
@strawberry.experimental.pydantic.type(Hero, fields=["team"])
class HeroType:
pass
@strawberry.experimental.pydantic.type(Team, fields=["name"])
class TeamType:
pass
@strawberry.type
class Query:
@strawberry.field
def hero(self) -> HeroType:
return HeroType(team=TeamType(name="Skii"))
schema = strawberry.Schema(query=Query)
expected_schema = """
type HeroType {
team: TeamType
}
type Query {
hero: HeroType!
}
type TeamType {
name: String!
}
"""
assert str(schema) == textwrap.dedent(expected_schema).strip()
query = "{ hero { team { name } } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["hero"]["team"]["name"] == "Skii"
def test_reverse_relation(clear_types):
@strawberry.experimental.pydantic.type(Team, fields=["heroes"])
class TeamType:
pass
@strawberry.experimental.pydantic.type(Hero, fields=["name"])
class HeroType:
pass
@strawberry.type
class Query:
@strawberry.field
def team(self) -> TeamType:
return TeamType(heroes=[HeroType(name="Skii"), HeroType(name="Chris")])
schema = strawberry.Schema(query=Query)
expected_schema = """
type HeroType {
name: String!
}
type Query {
team: TeamType!
}
type TeamType {
heroes: [HeroType]
}
"""
assert str(schema) == textwrap.dedent(expected_schema).strip()
query = "{ team { heroes { name } } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["team"]["heroes"][0]["name"] == "Skii"
assert result.data["team"]["heroes"][1]["name"] == "Chris"
def test_all_fields_and_reverse_relation(clear_types):
@strawberry.experimental.pydantic.type(Manager, fields=["name"])
class ManagerType:
pass
@strawberry.experimental.pydantic.type(Team, all_fields=True)
class TeamType:
pass
@strawberry.experimental.pydantic.type(Hero, fields=["name"])
class HeroType:
pass
@strawberry.type
class Query:
@strawberry.field
def team(self) -> TeamType:
return TeamType(
name="Avengers",
manager="Tony",
referrers=[ManagerType(name="Hulk")],
heroes=[HeroType(name="Skii"), HeroType(name="Chris")],
)
schema = strawberry.Schema(query=Query)
query = "{ team { heroes { name } } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["team"]["heroes"][0]["name"] == "Skii"
assert result.data["team"]["heroes"][1]["name"] == "Chris"
def test_one_to_many(clear_types):
@strawberry.experimental.pydantic.type(Team, fields=["referrers"])
class TeamType:
pass
@strawberry.experimental.pydantic.type(Manager, fields=["name"])
class ManagerType:
pass
@strawberry.type
class Query:
@strawberry.field
def team(self) -> TeamType:
return TeamType(
referrers=[ManagerType(name="Skii"), ManagerType(name="Chris")]
)
schema = strawberry.Schema(query=Query)
expected_schema = """
type ManagerType {
name: String!
}
type Query {
team: TeamType!
}
type TeamType {
referrers: [ManagerType]
}
"""
assert str(schema) == textwrap.dedent(expected_schema).strip()
query = "{ team { referrers { name } } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["team"]["referrers"][0]["name"] == "Skii"
assert result.data["team"]["referrers"][1]["name"] == "Chris"
def test_forwardref(clear_types):
@strawberry.experimental.pydantic.type(Hero, fields=["master", "name"])
class HeroType:
pass
@strawberry.type
class Query:
@strawberry.field
def hero(self) -> HeroType:
return HeroType(name="Chris", master=HeroType(name="Skii", master=None))
schema = strawberry.Schema(query=Query)
expected_schema = """
type HeroType {
name: String!
master: HeroType
}
type Query {
hero: HeroType!
}
"""
assert str(schema) == textwrap.dedent(expected_schema).strip()
query = "{ hero { master { name } } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["hero"]["master"]["name"] == "Skii"
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
from pandas import DataFrame
from lib.cast import numeric_code_as_string
from lib.data_source import DataSource
from lib.time import datetime_isoformat
from lib.utils import aggregate_admin_level, table_rename
_column_adapter = {
"reg": "subregion1_code",
"dep": "subregion2_code",
"jour": "date",
"n_dose1": "new_persons_vaccinated",
"n_cum_dose1": "total_persons_vaccinated",
"n_tot_dose1": "total_persons_vaccinated",
"n_complet": "new_persons_fully_vaccinated",
"n_cum_complet": "total_persons_fully_vaccinated",
"n_tot_complet": "total_persons_fully_vaccinated",
}
_region_code_map = {
84: "ARA",
27: "BFC",
53: "BRE",
94: "COR",
24: "CVL",
44: "GES",
3: "GF",
1: "GUA",
32: "HDF",
11: "IDF",
4: "LRE",
6: "MAY",
2: "MQ",
75: "NAQ",
28: "NOR",
76: "OCC",
93: "PAC",
52: "PDL",
}
def _preprocess_dataframe(data: DataFrame) -> DataFrame:
data = data.copy()
# Convert date to ISO format
data["date"] = data["date"].str.slice(0, 10)
# In some datasets the second dose column can be missing
if "new_persons_fully_vaccinated" not in data.columns:
data["new_persons_fully_vaccinated"] = None
if "total_persons_fully_vaccinated" not in data.columns:
data["total_persons_fully_vaccinated"] = None
# Estimate the doses from person counts
data["new_vaccine_doses_administered"] = (
data["new_persons_vaccinated"] + data["new_persons_fully_vaccinated"]
)
data["total_vaccine_doses_administered"] = (
data["total_persons_vaccinated"] + data["total_persons_fully_vaccinated"]
)
return data
class FranceDepartmentDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[Any, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = table_rename(dataframes[0], _column_adapter, remove_regex=r"[^\w]", drop=True)
data = _preprocess_dataframe(data)
# Make sure all records have the country code and match subregion2 only
data["key"] = None
data["country_code"] = "FR"
data["locality_code"] = None
# We consider some departments as regions
data.loc[data["subregion2_code"] == "971", "key"] = "FR_GUA"
data.loc[data["subregion2_code"] == "972", "key"] = "FR_MQ"
data.loc[data["subregion2_code"] == "973", "key"] = "FR_GF"
data.loc[data["subregion2_code"] == "974", "key"] = "FR_LRE"
# Drop bogus data
data = data.dropna(subset=["subregion2_code"])
data = data[data["subregion2_code"] != "00"]
return data
class FranceRegionDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[Any, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = table_rename(dataframes[0], _column_adapter, remove_regex=r"[^\w]", drop=True)
data = _preprocess_dataframe(data)
# Convert the region codes to ISO format
data["subregion1_code"] = data["subregion1_code"].apply(_region_code_map.get)
data = data.dropna(subset=["subregion1_code"])
# Make sure all records have the country code and match subregion1 only
data["country_code"] = "FR"
data["subregion2_code"] = None
data["locality_code"] = None
return data
class FranceCountryDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[Any, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = table_rename(dataframes[0], _column_adapter, remove_regex=r"[^\w]", drop=True)
data = _preprocess_dataframe(data)
data["key"] = "FR"
return data
|
# Faça um programa que leia três números e mostre qual é o maior e qual é o menor.
a = int(input('Informe o primeiro número: '))
b = int(input('Informe o segundo número: '))
c = int(input('Informe o terceiro número: '))
# Verificando quem é menor
menor = a
if b < a and b < c:
menor = b
if c < a and c < b:
menor = c
# Verificando quem é o maior
maior = a
if b > a and b > c:
maior = b
if c > a and c > b:
maior = c
print(f'O menor valor digitado foi {menor}.')
print(f'O maior valor digitado foi {maior}')
|
import os, sys
import unittest
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'tools'))
from hcx_report import HouseCallXReport
class HouseCallXReportTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_scores(self):
report_path = os.path.join('UT','staff','60_Report.log')
scores = HouseCallXReport.get_scores(report_path)
malicious_count = 0
suspicious_count = 0
normal_count = 0
for key, value in scores.items():
if value[0] == 2:
malicious_count += 1
elif value[0] == 1:
suspicious_count += 1
else:
normal_count += 1
# print('{},{},{}'.format(normal_count, suspicious_count, malicious_count))
assert(normal_count == 0)
assert(suspicious_count == 11)
assert(malicious_count == 89)
|
f=open("testfile.txt", "w") # it removes initial written things and write new things and also create a new file if not created at first
f.write("I'm loving Solus.")
f.close() #must close the file after writing
|
# Generated by Django 2.1.3 on 2020-08-22 15:04
import commons.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('eurowiki', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='QueryRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.TextField(blank=True, null=True, verbose_name='query result')),
('executed', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='execution time')),
('executor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='executor', to=settings.AUTH_USER_MODEL, verbose_name='executor')),
],
options={
'verbose_name': 'SPARQL query execution',
'verbose_name_plural': 'SPARQL query executions',
},
),
migrations.CreateModel(
name='SparqlQuery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(db_index=True, max_length=200, verbose_name='title')),
('description', models.TextField(blank=True, null=True, verbose_name='description')),
('text', models.TextField(blank=True, null=True, verbose_name='query text')),
('state', models.IntegerField(choices=[(-3, 'Portlet'), (-1, 'Grey'), (1, 'Draft'), (2, 'Submitted'), (3, 'Published'), (4, 'Un-published')], default=1, null=True, verbose_name='publication state')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='last modified')),
('editor', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='query_editor', to=settings.AUTH_USER_MODEL, verbose_name='last editor')),
],
options={
'verbose_name': 'SPARQL query definition',
'verbose_name_plural': 'SPARQL query definitions',
},
bases=(models.Model, commons.models.Publishable),
),
migrations.AddField(
model_name='queryrun',
name='query',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='query', to='eurowiki.SparqlQuery', verbose_name='query'),
),
]
|
from random import randrange
bolso = 100
resultado = 0
resposta = "s"
while(resposta=="s"):
numero_apostado = int(input("Escolha um número entre 1 e 6 para você apostar: "))
valor_aposta = float(input("Qual o valor da aposta? "))
bolso -= valor_aposta
dado1 = randrange(1,6)
dado2 = randrange(1,6)
print("Sorteados os dados {} e {}.".format(dado1, dado2))
if(dado1==numero_apostado)and(dado2==numero_apostado):
resultado = valor_aposta * 10
bolso += resultado
print("Você ganhou {} e agora está com {}.".format(resultado,bolso))
elif (dado1==numero_apostado)or(dado2==numero_apostado):
resultado = valor_aposta * 2
bolso += resultado
print("Você ganhou {} e agora está com {}.".format(resultado,bolso))
else:
print("Você errou. Agora tem {} no bolso.".format(bolso))
resposta = input("Deseja jogar outra vez? ".lower())
print("Fim de jogo.")
|
import pandas as pd
import numpy as np
def voting(time_matrix, values_array):
time_matrix = np.array(time_matrix)
col_sum = time_matrix.sum(axis=0) # Sum of votes for every option
idx = col_sum.argmax()
matrix_size = time_matrix.shape
if col_sum[idx] > matrix_size[0]/2: # If at least half of the voters chose a certain time
return idx
mult = np.multiply(col_sum, np.asarray(values_array)) # Weighing votes
sum = col_sum.sum()
if sum == 0:
return 0
Avg = mult.sum()/sum # Average = weighted votes / sum of all the votes
# Returning the index of the closest value from values_array
idx = (np.abs(values_array-Avg)).argmin()
return idx
def ranking_algorithm(price_matrix,food_matrix,filename="restaurant_database.csv"):
# Reading requisite files
database = pd.read_csv(filename, index_col = 0, sep=';') # Restaurant data
to_matrix=database.drop(["Name"],1)
restaurant_matrix=to_matrix.to_numpy()
price_matrix=np.array(price_matrix)
food_matrix=np.array(food_matrix)
#manipulating food_matrix
fm_sum=food_matrix.sum(axis=1)
food_matrix_norm=[]
for i,row in enumerate(food_matrix):
if fm_sum[i]!=0:
food_matrix_norm.append(row/fm_sum[i])
else:
sh = row.shape
food_matrix_norm.append(np.zeros(sh[0]))
food_matrix_norm=np.array(food_matrix_norm)
#manipulating price_matrix:
pm_sum=price_matrix.sum(axis=1)*2+2
price_matrix_norm=[]
coefficients=[]
for row in price_matrix:
tmp=0
tmp_row=[]
for i,elem in enumerate(row):
if row[i]==1:
tmp+=2
else:
if i==0:
if row[i+1]==1:
tmp+=1
elif i==len(row)-1:
if row[i-1]==1:
tmp+=1
else:
if (row[i-1]==1 or row[i+1]==1):
tmp+=1
for i,elem in enumerate(row):
if tmp!=0:
if row[i]==1:
tmp_row.append(2*1/tmp)
else:
if i==0:
if row[i+1]==1:
tmp_row.append(1/tmp)
else:
tmp_row.append(0)
elif i==len(row)-1:
if row[i-1]==1:
tmp_row.append(1/tmp)
else:
tmp_row.append(0)
else:
if (row[i-1]==1 or row[i+1]==1):
tmp_row.append(1/tmp)
else:
tmp_row.append(0)
else:
tmp_row=[0,0,0,0]
price_matrix_norm.append(tmp_row)
price_matrix_norm=np.array(price_matrix_norm)
#matrix multiplication (algorithm core)
participant_matrix_norm=np.concatenate((price_matrix_norm,food_matrix_norm),axis=1)
result=np.matmul(participant_matrix_norm,np.transpose(restaurant_matrix))
#sort, list ranked restaurants:
ranking=result.sum(axis=0)
database.insert(0,"Ranking",ranking)
database=database.sort_values(by=["Ranking"], ascending=False)
#print(database)
ranked_restaurant=database["Name"].to_list()
return ranked_restaurant # Its a list of restaurant names from best to worst
|
from __future__ import unicode_literals
import unittest
import mock
import vcr
from mopidy_youtube import Extension
from mopidy_youtube.backend import resolve_playlist
from mopidy_youtube.backend import search_youtube
from mopidy_youtube.backend import resolve_track
class ExtensionTest(unittest.TestCase):
def test_get_default_config(self):
ext = Extension()
config = ext.get_default_config()
self.assertIn('[youtube]', config)
self.assertIn('enabled = true', config)
@vcr.use_cassette('tests/fixtures/youtube_playlist_resolve.yaml')
def test_playlist_resolver(self):
with mock.patch('mopidy_youtube.backend.pafy'):
videos = resolve_playlist('PLOxORm4jpOQfMU7bpfGCzDyLropIYEHuR')
self.assertEquals(len(videos), 104)
@vcr.use_cassette('tests/fixtures/youtube_search.yaml')
def test_search_yt(self):
with mock.patch('mopidy_youtube.backend.pafy'):
videos = search_youtube('chvrches')
self.assertEquals(len(videos), 15)
@vcr.use_cassette('tests/fixtures/resolve_track.yaml')
def test_resolve_track(self):
with mock.patch('mopidy_youtube.backend.pafy'):
video = resolve_track('TU3b1qyEGsE')
self.assertTrue(video)
@vcr.use_cassette('tests/fixtures/resolve_track_failed.yaml')
def test_resolve_track_failed(self):
with mock.patch('mopidy_youtube.backend.pafy') as pafy:
pafy.new.side_effect = Exception('Removed')
video = resolve_track('unknown')
self.assertFalse(video)
@vcr.use_cassette('tests/fixtures/resolve_track_stream.yaml')
def test_resolve_track_stream(self):
with mock.patch('mopidy_youtube.backend.pafy'):
video = resolve_track('TU3b1qyEGsE', True)
self.assertTrue(video)
|
class bindings:
def __init__(self):
pass
def getName(self):
return "bindings"
def getDescription(self):
return "Flow snowboard bindings"
|
from time import perf_counter
import warnings
import torch
import torch.nn.functional as F
import numpy as np
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines.compose import Compose
@PIPELINES.register_module()
class Timer(Compose):
"""Times a list of transforms and stores result in img_meta."""
def __init__(self, name, transforms):
super(Timer, self).__init__(transforms)
self.name = f"{name}_time"
def __call__(self, data):
t1 = perf_counter()
data = super(Timer, self).__call__(data)
data[self.name] = perf_counter() - t1
return data
@PIPELINES.register_module()
class DummyResize(object):
"""Replacement for resize in case the scale is 1.
Adds img_shape, pad_shape, scale_factor to results."""
def __call__(self, results):
"""Resize images with ``results['scale']``."""
for key in results.get('img_fields', ['img']):
img_shape = results[key].shape
results['img_shape'] = img_shape
# in case that there is no padding
results['pad_shape'] = img_shape
results['scale_factor'] = np.array([1, 1, 1, 1], dtype=np.float32)
return results
@PIPELINES.register_module()
class ImageTestTransformGPU(object):
"""Preprocess an image using GPU."""
def __init__(self, img_norm_cfg, size_divisor, scale_factor):
self.img_norm_cfg = img_norm_cfg
self.mean = torch.tensor(img_norm_cfg['mean'], dtype=torch.float32)
self.std = torch.tensor(img_norm_cfg['std'], dtype=torch.float32)
self.to_rgb = img_norm_cfg['to_rgb']
self.std_inv = 1/self.std
self.size_divisor = size_divisor
self.scale_factor = float(scale_factor)
def __call__(self, results, device='cuda'):
start = perf_counter()
img = results['img']
ori_shape = img.shape
h, w = img.shape[:2]
new_size = (round(h*self.scale_factor), round(w*self.scale_factor))
img_shape = (*new_size, 3)
img = torch.from_numpy(img).to(device).float()
if self.to_rgb:
img = img[:, :, (2, 1, 0)]
# to BxCxHxW
img = img.permute(2, 0, 1).unsqueeze(0)
if new_size[0] != img.shape[2] or new_size[1] != img.shape[3]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# ignore the align_corner warnings
img = F.interpolate(img, new_size, mode='bilinear')
for c in range(3):
img[:, c, :, :] = img[:, c, :, :].sub(self.mean[c]) \
.mul(self.std_inv[c])
if self.size_divisor is not None:
pad_h = int(np.ceil(new_size[0] / self.size_divisor)) \
* self.size_divisor - new_size[0]
pad_w = int(np.ceil(new_size[1] / self.size_divisor)) \
* self.size_divisor - new_size[1]
img = F.pad(img, (0, pad_w, 0, pad_h), mode='constant', value=0)
pad_shape = (img.shape[2], img.shape[3], 3)
else:
pad_shape = img_shape
img_meta = dict(
filename=results['filename'],
ori_filename=results['ori_filename'],
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=self.scale_factor,
flip=False,
img_norm_cfg=self.img_norm_cfg,
start_time=start,
)
if 'gt_bboxes' in results:
gt_bboxes = torch.from_numpy(results['gt_bboxes']) \
.to(device).float()
gt_labels = torch.from_numpy(results['gt_labels']) \
.to(device).float()
return dict(img=img, img_metas=[img_meta],
gt_bboxes=gt_bboxes, gt_labels=gt_labels)
else:
return dict(img=img, img_metas=[img_meta])
|
#!/usr/bin/python
#-*- coding:utf8 -*-
"""
本程序用来获取音乐台网站的NV排行榜的数据
音悦台:http://www.yinyuetai.com
参考书籍
《用Python写网络爬虫》
《Python网络爬虫实战》
"""
import requests
import sys
import os
import re
import random
import time
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf8')
"""
Item 代表一个MV榜单
"""
class Item(object):
topNum = None #排名
score = None #打分 desc_score:趋势下降 asc_score:趋势上升
mvName = None #MV名字
singer = None #演唱者
releasTime = None #释放时间
'''获取url地址
初始化整个程序的数据,包含网址,以及需要用到的字典和列表等'''
def getURL():
urlBase = 'http://vchart.yinyuetai.com/vchart/trends?'
areas = ['ML','HT','US','KR','JP'] # 和后面的areaDict相对应
pages = [str(i) for i in range(1,4)]
urls = []
for area in areas:
for page in pages:
urlEnd = 'area=' + area + '&page=' + page
url = urlBase + urlEnd
urls.append(url)
getMvsList(area, urls)
# return urls
# 获取页面的Text数据
def getHTMLText(url):
try:
# r = requests.get(url, headers=header, timeout=30, proxies=proxy)
r = requests.get(url,timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
# 获取页面上的专辑的数据
def getMvsList(area, urls):
url = 'http://vchart.yinyuetai.com/vchart/trends?area=ML&page=1';
html = getHTMLText(url)
items = []
try:
soup = BeautifulSoup(html, 'html.parser')
tags = soup.find_all('li', attrs={'name':'dmvLi'})
for tag in tags:
item = Item()
item.topNum = tag.find('div', attrs={'class':'top_num'}).get_text()
if tag.find('h3', attrs={'class':'desc_score'}):
item.score = tag.find('h3', attrs={'class':'desc_score'}).get_text()
else:
item.score = tag.find('h3', attrs={'class':'asc_score'}).get_text()
item.mvName = tag.find('img').get('alt')
item.singer = tag.find('a', attrs={'class':'special'}).get_text()
pst = re.compile(r'\d{4}-\d{2}-\d{2}')
item.releaseTime = pst.findall(tag.find('p', attrs={'class':'c9'}).get_text())[0]
items.append(item)
except Exception as e:
raise e
handleMvsData(area, items)
def handleMvsData(area, items):
areasDic = {'ML':'Mainland','HT':'Hongkong&Taiwan','US':'Americ','KR':'Korea','JP':'Japan'}
fileName = "/Users/miraclewong/github/MoocStudy/python_web_scrap/YinYueTaiMvsList.txt"
nowTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
tplt = "{:8}\t{:10}\t{:20}\t{:20}\t{:16}\r\n"
with open(fileName, 'a') as f:
f.write('%s -------%s\r\n' %(areasDic.get(area), nowTime))
f.write(tplt.format("排名","得分","发布时间","歌手","专辑名称"))
for item in items:
f.write('%s \t %s \t %s \t %s \t\t %s \r\n' %(item.topNum, item.score, item.releaseTime, item.singer, item.mvName))
f.write('\r\n'*4)
def main():
urls = getURL()
# items = getMvsList(urls)
if __name__ == '__main__':
main()
|
"""
This is essentially an interface for other converter classes,
these methods MUST be present!
"""
class BaseConverter:
def get_file(self):
raise NotImplementedError()
def get_filename(self):
raise NotImplementedError()
def is_inline(self):
raise NotImplementedError()
def get_content_id(self):
raise NotImplementedError()
|
#! /usr/bin/env python
import sourmash
import pickle
import argparse
from sourmash.search import gather_databases
from sourmash.lca import lca_utils
from sourmash.lca.lca_utils import LCA_Database
def main():
p = argparse.ArgumentParser()
p.add_argument("node_mh_pickle")
p.add_argument("lca_db")
args = p.parse_args()
node_mhs = pickle.load(open(args.node_mh_pickle, "rb"))
lca_obj = LCA_Database()
lca_obj.load(args.lca_db)
databases = ((lca_obj, args.lca_db, "LCA"),)
d = {}
n_pure95 = 0
total = 0
for k, v in node_mhs.items():
ss = sourmash.SourmashSignature(v)
results = [x[0] for x in gather_databases(ss, databases, 0, True)]
sum_f_uniq = sum([result.f_unique_to_query for result in results])
keep_results = []
for result in results:
if result.f_unique_to_query < 0.10:
break
keep_results.append(result)
if not keep_results:
print("** no match for {}".format(k))
continue
idents = [result.name.split()[0].split(".")[0] for result in keep_results]
idxlist = [lca_obj.ident_to_idx[ident] for ident in idents]
lidlist = [lca_obj.idx_to_lid[idx] for idx in idxlist]
lineages = [lca_obj.lid_to_lineage[lid] for lid in lidlist]
tree = lca_utils.build_tree(lineages)
lca, reason = lca_utils.find_lca(tree)
level = "*none*"
if lca:
level = lca[-1].rank
lineage = ";".join(lca_utils.zip_lineage(lca, truncate_empty=True))
this_f_uniq = sum([result.f_unique_to_query for result in keep_results])
print(
"node {} matches {} @ {:.1f}".format(
k, level, this_f_uniq / sum_f_uniq * 100
)
)
if level in ("strain", "genus", "species") and this_f_uniq / sum_f_uniq >= 0.95:
n_pure95 += 1
total += 1
print("XXX", n_pure95, total)
if __name__ == "__main__":
main()
|
###############################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License.
# A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express #
# or implied. See the License for the specific language governing permissions#
# and limitations under the License. #
###############################################################################
import gzip, re
import pandas as pd
import csv
import sys
from awsglue.utils import getResolvedOptions
import boto3
s3 = boto3.client('s3')
s3_resource = boto3.resource('s3')
args = getResolvedOptions(sys.argv,
['input_bucket', 'clinvar_input_key', 'clinvar_annotated_input_key', 'output_bucket',
'output_key'])
def download_to_local(filename):
new_filename = filename.split('/')[-1]
s3_resource.meta.client.download_file(args['input_bucket'], filename, '/tmp/' + new_filename)
return new_filename
def list_to_dict(l):
"""Convert list to dict."""
return {k: v for k, v in (x.split("=") for x in l)}
fieldnames = [
"CHROM",
"POS",
"REF",
"ALT",
"AF_ESP",
"AF_EXAC",
"AF_TGP",
"CLNDISDB",
"CLNDISDBINCL",
"CLNDN",
"CLNDNINCL",
"CLNHGVS",
"CLNSIGINCL",
"CLNVC",
"CLNVI",
"MC",
"ORIGIN",
"SSR",
"CLASS",
"Allele",
"Consequence",
"IMPACT",
"SYMBOL",
"Feature_type",
"Feature",
"BIOTYPE",
"EXON",
"INTRON",
"cDNA_position",
"CDS_position",
"Protein_position",
"Amino_acids",
"Codons",
"DISTANCE",
"STRAND",
"BAM_EDIT",
"SIFT",
"PolyPhen",
"MOTIF_NAME",
"MOTIF_POS",
"HIGH_INF_POS",
"MOTIF_SCORE_CHANGE",
"LoFtool",
"CADD_PHRED",
"CADD_RAW",
"BLOSUM62",
]
obj = s3.get_object(Bucket=args['input_bucket'], Key=args['clinvar_input_key'])
cv_columns = {}
with gzip.GzipFile(fileobj=obj['Body'], mode='rb') as f:
for metaline in f:
if metaline.startswith(b'##INFO'):
colname = re.search(b'ID=(\w+),', metaline.strip(b'#\n'))
coldesc = re.search(b'.*Description=(.*)>', metaline.strip(b'#\n'))
cv_columns[colname.group(1)] = coldesc.group(1).strip(b'"')
# read clinvar vcf
obj = s3.get_object(Bucket=args['input_bucket'], Key=args['clinvar_input_key'])
with gzip.GzipFile(fileobj=obj['Body'], mode='rb') as f:
cv_df = pd.read_csv(f, sep='\t', comment='#', header=None, usecols=[0, 1, 2, 3, 4, 7], dtype={0: object})
# convert dictionaries to columns
cv_df = pd.concat(
[
cv_df.drop([7], axis=1),
cv_df[7].str.split(";").apply(list_to_dict).apply(pd.Series),
],
axis=1,
)
# rename columns
cv_df.rename(columns={0: "CHROM", 1: "POS", 2: "ID", 3: "REF", 4: "ALT"}, inplace=True)
# drop columns we know we won't need
cv_df = cv_df.drop(columns=["CHROM", "POS", "REF", "ALT"])
# assign classes
cv_df["CLASS"] = 0
cv_df.loc[cv_df["CLNSIGCONF"].notnull(), "CLASS"] = 1
# convert NaN to 0 where allele frequencies are null
cv_df[["AF_ESP", "AF_EXAC", "AF_TGP"]] = cv_df[["AF_ESP", "AF_EXAC", "AF_TGP"]].fillna(
0
)
# select variants that have beeen submitted by multiple organizations.
cv_df = cv_df.loc[
cv_df["CLNREVSTAT"].isin(
[
"criteria_provided,_multiple_submitters,_no_conflicts",
"criteria_provided,_conflicting_interpretations",
]
)
]
# Reduce the size of the dataset below
cv_df.drop(columns=["ALLELEID", "RS", "DBVARID"], inplace=True)
# drop columns that would reveal class
cv_df.drop(columns=["CLNSIG", "CLNSIGCONF", "CLNREVSTAT"], inplace=True)
# drop this redundant columns
cv_df.drop(columns=["CLNVCSO", "GENEINFO"], inplace=True)
# dictionary to map ID to clinvar annotations
clinvar_annotations = cv_df.set_index("ID")[
[col for col in cv_df.columns if col in fieldnames]
].to_dict(orient="index")
# open the output file
outfile = "/tmp/clinvar_conflicting.csv"
with open(outfile, "w") as fout:
dw = csv.DictWriter(
fout, delimiter=",", fieldnames=fieldnames, extrasaction="ignore"
)
dw.writeheader()
# read the VEP-annotated vcf file line-by-line
filename = download_to_local(args['clinvar_annotated_input_key'])
filename = "/tmp/" + filename
with gzip.GzipFile(filename, mode='rb') as f:
for line in f:
line = line.decode("utf-8")
if line.startswith("##INFO=<ID=CSQ"):
m = re.search(r'.*Format: (.*)">', line)
cols = m.group(1).split("|")
continue
if line.startswith("#"):
continue
record = line.split("\t")
(
chromosome,
position,
clinvar_id,
reference_base,
alternate_base,
qual,
filter_,
info,
) = record
info_field = info.strip("\n").split(";")
# to lookup in clivnar_annotaitons
clinvar_id = int(clinvar_id)
# only keep the variants that have been evaluated by multiple submitters
if clinvar_id in clinvar_annotations:
# initialize a dictionary to hold all the VEP annotation data
annotation_data = {column: None for column in cols}
annotation_data.update(clinvar_annotations[clinvar_id])
# fields directly from the vcf
annotation_data["CHROM"] = str(chromosome)
annotation_data["POS"] = position
annotation_data["REF"] = reference_base
annotation_data["ALT"] = alternate_base
for annotations in info_field:
column, value = annotations.split("=")
if column == "CSQ":
for csq_column, csq_value in zip(cols, value.split("|")):
annotation_data[csq_column] = csq_value
continue
annotation_data[column] = value
dw.writerow(annotation_data)
s3_resource.meta.client.upload_file(outfile, args['output_bucket'], args['output_key'])
|
# -*- coding: utf-8 -*-
from permission import Permission
from app import db
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=True, unique=True)
default = db.Column(db.Boolean, default=False)
permissions = db.Column(db.Integer)
user = db.relationship('User', backref='itsrole')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderare': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (Permission.ADMINISTRATOR, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
|
"""
:attr:`~django.db.models.Field.help_text` string constants for the various
fields.
"""
SERIES_UID = "Unique identifier of the series"
SERIES_DATE = "Date the series started"
SERIES_TIME = "Time the series started"
SERIES_DESCRIPTION = "Description of the series"
PIXEL_SPACING = "Physical distance in the patient between the center of each pixel, specified by a numeric pair: adjacent row spacing (delimiter) adjacent column spacing in mm"
SERIES_NUMBER = "A number that identifies this series within a given session"
INVERSION_TIME = "Time in milliseconds after the middle of inverting RF pulse to middle of excitation pulse to detect the amount of longitudinal magnetization"
ECHO_TIME = "Time in ms between the middle of the excitation pulse and the peak of the echo produced"
REPETITION_TIME = "The period of time in milliseconds between the beginning of a pulse sequence and the beginning of the succeeding pulse sequence."
SCANNING_SEQUENCE = "Description of the type of data taken"
SEQUENCE_VARIANT = "Variant of the scanning sequence"
FLIP_ANGLE = "Steady state angle in degrees to which the magnetic vector is flipped from the magnetic vector of the primary field."
MODALITY = "Type of equipment that originally acquired the data used to create the images in this series"
MANUFACTURER = (
"Manufacturer of the equipment that produced the composite instances"
)
MANUFACTURER_MODEL_NAME = "Manufacturer's model name of the equipment that produced the composite instances"
MAGNETIC_FIELD_STRENGTH = "Nominal field strength of MR magnet, in Tesla"
DEVICE_SERIAL_NUMBER = "Manufacturer's serial number of the equipment that produced the Composite Instances"
BODY_PART_EXAMINED = "Text description of the part of the body examined"
PATIENT_POSITION = "Patient position descriptor relative to the equipment"
INSTITUTE_NAME = "Institution where the equipment that produced the Composite Instances is located"
PROTOCOL_NAME = "User-defined description of the conditions under which the series was performed"
MR_ACQUISITION_TYPE = "Identification of data encoding scheme"
SLICE_THICKNESS = "Nominal slice thickness, in millimeters."
OPERATORS_NAME = "Name(s) of the operator(s) supporting the Series."
ECHO_TRAIN_LENGTH = (
"Number of lines in k-space acquired per excitation per image."
)
PULSE_SEQUENCE_NAME = "Name of the pulse sequence for annotation purposes. Potentially vendor-specific name."
SEQUENCE_NAME = "User defined name for the combination of Scanning Sequence and Sequence Variant."
# flake8: noqa: E501
|
from knx_stack.decode.usb_hid.report_body import usb_protocol_header
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Thu Dec 13 17:56:14 2018 by generateDS.py version 2.29.5.
# Python 3.6.5 (default, May 19 2018, 11:27:13) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]
#
# Command line options:
# ('-o', '../python/eVSCertifyRequest.xsd.py')
#
# Command line arguments:
# eVSCertifyRequest.xsd
#
# Command line:
# /Users/danielkobina/Documents/Open/bin/generateDS -o "../python/eVSCertifyRequest.xsd.py" eVSCertifyRequest.xsd
#
# Current working directory (os.getcwd()):
# schemas
#
import sys
import re as re_
import base64
import datetime as datetime_
import warnings as warnings_
try:
from lxml import etree as etree_
except ImportError:
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
def parsexmlstring_(instring, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
element = etree_.fromstring(instring, parser=parser, **kwargs)
return element
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for a example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ImportError:
GenerateDSNamespaceDefs_ = {}
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns. We should:
# - AND the outer elements
# - OR the inner elements
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
if re_.search(patterns2, target) is not None:
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
return instring.encode(ExternalEncoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
if type(self) != type(other):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs
def get_child_attrs(self): return self.child_attrs
def set_choice(self, choice): self.choice = choice
def get_choice(self): return self.choice
def set_optional(self, optional): self.optional = optional
def get_optional(self): return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class eVSCertifyRequest(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, USERID=None, Option=None, Revision=None, ImageParameters=None, FromName=None, FromFirm=None, FromAddress1=None, FromAddress2=None, FromCity=None, FromState=None, FromZip5=None, FromZip4=None, FromPhone=None, POZipCode=None, AllowNonCleansedOriginAddr=None, ToName=None, ToFirm=None, ToAddress1=None, ToAddress2=None, ToCity=None, ToState=None, ToZip5=None, ToZip4=None, ToPhone=None, POBox=None, AllowNonCleansedDestAddr=None, WeightInOunces=None, ServiceType=None, Container=None, Width=None, Length=None, Height=None, Machinable=None, ProcessingCategory=None, PriceOptions=None, InsuredAmount=None, AddressServiceRequested=None, ExpressMailOptions=None, ShipDate=None, CustomerRefNo=None, ExtraServices=None, HoldForPickup=None, OpenDistribute=None, PermitNumber=None, PermitZIPCode=None, PermitHolderName=None, CRID=None, SenderName=None, SenderEMail=None, RecipientName=None, RecipientEMail=None, ReceiptOption=None, ImageType=None, HoldForManifest=None, NineDigitRoutingZip=None, ShipInfo=None, CarrierRelease=None, ReturnCommitments=None, PrintCustomerRefNo=None, Content=None, ShippingContents=None, CustomsContentType=None, ContentComments=None, RestrictionType=None, RestrictionComments=None, AESITN=None, ImportersReference=None, ImportersContact=None, ExportersReference=None, ExportersContact=None, InvoiceNumber=None, LicenseNumber=None, CertificateNumber=None):
self.original_tagname_ = None
self.USERID = _cast(None, USERID)
self.Option = Option
self.Revision = Revision
self.ImageParameters = ImageParameters
self.FromName = FromName
self.FromFirm = FromFirm
self.FromAddress1 = FromAddress1
self.FromAddress2 = FromAddress2
self.FromCity = FromCity
self.FromState = FromState
self.FromZip5 = FromZip5
self.FromZip4 = FromZip4
self.FromPhone = FromPhone
self.POZipCode = POZipCode
self.AllowNonCleansedOriginAddr = AllowNonCleansedOriginAddr
self.ToName = ToName
self.ToFirm = ToFirm
self.ToAddress1 = ToAddress1
self.ToAddress2 = ToAddress2
self.ToCity = ToCity
self.ToState = ToState
self.ToZip5 = ToZip5
self.ToZip4 = ToZip4
self.ToPhone = ToPhone
self.POBox = POBox
self.AllowNonCleansedDestAddr = AllowNonCleansedDestAddr
self.WeightInOunces = WeightInOunces
self.ServiceType = ServiceType
self.Container = Container
self.Width = Width
self.Length = Length
self.Height = Height
self.Machinable = Machinable
self.ProcessingCategory = ProcessingCategory
self.PriceOptions = PriceOptions
self.InsuredAmount = InsuredAmount
self.AddressServiceRequested = AddressServiceRequested
self.ExpressMailOptions = ExpressMailOptions
self.ShipDate = ShipDate
self.CustomerRefNo = CustomerRefNo
self.ExtraServices = ExtraServices
self.HoldForPickup = HoldForPickup
self.OpenDistribute = OpenDistribute
self.PermitNumber = PermitNumber
self.PermitZIPCode = PermitZIPCode
self.PermitHolderName = PermitHolderName
self.CRID = CRID
self.SenderName = SenderName
self.SenderEMail = SenderEMail
self.RecipientName = RecipientName
self.RecipientEMail = RecipientEMail
self.ReceiptOption = ReceiptOption
self.ImageType = ImageType
self.HoldForManifest = HoldForManifest
self.NineDigitRoutingZip = NineDigitRoutingZip
self.ShipInfo = ShipInfo
self.CarrierRelease = CarrierRelease
self.ReturnCommitments = ReturnCommitments
self.PrintCustomerRefNo = PrintCustomerRefNo
self.Content = Content
self.ShippingContents = ShippingContents
self.CustomsContentType = CustomsContentType
self.ContentComments = ContentComments
self.RestrictionType = RestrictionType
self.RestrictionComments = RestrictionComments
self.AESITN = AESITN
self.ImportersReference = ImportersReference
self.ImportersContact = ImportersContact
self.ExportersReference = ExportersReference
self.ExportersContact = ExportersContact
self.InvoiceNumber = InvoiceNumber
self.LicenseNumber = LicenseNumber
self.CertificateNumber = CertificateNumber
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, eVSCertifyRequest)
if subclass is not None:
return subclass(*args_, **kwargs_)
if eVSCertifyRequest.subclass:
return eVSCertifyRequest.subclass(*args_, **kwargs_)
else:
return eVSCertifyRequest(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Option(self): return self.Option
def set_Option(self, Option): self.Option = Option
def get_Revision(self): return self.Revision
def set_Revision(self, Revision): self.Revision = Revision
def get_ImageParameters(self): return self.ImageParameters
def set_ImageParameters(self, ImageParameters): self.ImageParameters = ImageParameters
def get_FromName(self): return self.FromName
def set_FromName(self, FromName): self.FromName = FromName
def get_FromFirm(self): return self.FromFirm
def set_FromFirm(self, FromFirm): self.FromFirm = FromFirm
def get_FromAddress1(self): return self.FromAddress1
def set_FromAddress1(self, FromAddress1): self.FromAddress1 = FromAddress1
def get_FromAddress2(self): return self.FromAddress2
def set_FromAddress2(self, FromAddress2): self.FromAddress2 = FromAddress2
def get_FromCity(self): return self.FromCity
def set_FromCity(self, FromCity): self.FromCity = FromCity
def get_FromState(self): return self.FromState
def set_FromState(self, FromState): self.FromState = FromState
def get_FromZip5(self): return self.FromZip5
def set_FromZip5(self, FromZip5): self.FromZip5 = FromZip5
def get_FromZip4(self): return self.FromZip4
def set_FromZip4(self, FromZip4): self.FromZip4 = FromZip4
def get_FromPhone(self): return self.FromPhone
def set_FromPhone(self, FromPhone): self.FromPhone = FromPhone
def get_POZipCode(self): return self.POZipCode
def set_POZipCode(self, POZipCode): self.POZipCode = POZipCode
def get_AllowNonCleansedOriginAddr(self): return self.AllowNonCleansedOriginAddr
def set_AllowNonCleansedOriginAddr(self, AllowNonCleansedOriginAddr): self.AllowNonCleansedOriginAddr = AllowNonCleansedOriginAddr
def get_ToName(self): return self.ToName
def set_ToName(self, ToName): self.ToName = ToName
def get_ToFirm(self): return self.ToFirm
def set_ToFirm(self, ToFirm): self.ToFirm = ToFirm
def get_ToAddress1(self): return self.ToAddress1
def set_ToAddress1(self, ToAddress1): self.ToAddress1 = ToAddress1
def get_ToAddress2(self): return self.ToAddress2
def set_ToAddress2(self, ToAddress2): self.ToAddress2 = ToAddress2
def get_ToCity(self): return self.ToCity
def set_ToCity(self, ToCity): self.ToCity = ToCity
def get_ToState(self): return self.ToState
def set_ToState(self, ToState): self.ToState = ToState
def get_ToZip5(self): return self.ToZip5
def set_ToZip5(self, ToZip5): self.ToZip5 = ToZip5
def get_ToZip4(self): return self.ToZip4
def set_ToZip4(self, ToZip4): self.ToZip4 = ToZip4
def get_ToPhone(self): return self.ToPhone
def set_ToPhone(self, ToPhone): self.ToPhone = ToPhone
def get_POBox(self): return self.POBox
def set_POBox(self, POBox): self.POBox = POBox
def get_AllowNonCleansedDestAddr(self): return self.AllowNonCleansedDestAddr
def set_AllowNonCleansedDestAddr(self, AllowNonCleansedDestAddr): self.AllowNonCleansedDestAddr = AllowNonCleansedDestAddr
def get_WeightInOunces(self): return self.WeightInOunces
def set_WeightInOunces(self, WeightInOunces): self.WeightInOunces = WeightInOunces
def get_ServiceType(self): return self.ServiceType
def set_ServiceType(self, ServiceType): self.ServiceType = ServiceType
def get_Container(self): return self.Container
def set_Container(self, Container): self.Container = Container
def get_Width(self): return self.Width
def set_Width(self, Width): self.Width = Width
def get_Length(self): return self.Length
def set_Length(self, Length): self.Length = Length
def get_Height(self): return self.Height
def set_Height(self, Height): self.Height = Height
def get_Machinable(self): return self.Machinable
def set_Machinable(self, Machinable): self.Machinable = Machinable
def get_ProcessingCategory(self): return self.ProcessingCategory
def set_ProcessingCategory(self, ProcessingCategory): self.ProcessingCategory = ProcessingCategory
def get_PriceOptions(self): return self.PriceOptions
def set_PriceOptions(self, PriceOptions): self.PriceOptions = PriceOptions
def get_InsuredAmount(self): return self.InsuredAmount
def set_InsuredAmount(self, InsuredAmount): self.InsuredAmount = InsuredAmount
def get_AddressServiceRequested(self): return self.AddressServiceRequested
def set_AddressServiceRequested(self, AddressServiceRequested): self.AddressServiceRequested = AddressServiceRequested
def get_ExpressMailOptions(self): return self.ExpressMailOptions
def set_ExpressMailOptions(self, ExpressMailOptions): self.ExpressMailOptions = ExpressMailOptions
def get_ShipDate(self): return self.ShipDate
def set_ShipDate(self, ShipDate): self.ShipDate = ShipDate
def get_CustomerRefNo(self): return self.CustomerRefNo
def set_CustomerRefNo(self, CustomerRefNo): self.CustomerRefNo = CustomerRefNo
def get_ExtraServices(self): return self.ExtraServices
def set_ExtraServices(self, ExtraServices): self.ExtraServices = ExtraServices
def get_HoldForPickup(self): return self.HoldForPickup
def set_HoldForPickup(self, HoldForPickup): self.HoldForPickup = HoldForPickup
def get_OpenDistribute(self): return self.OpenDistribute
def set_OpenDistribute(self, OpenDistribute): self.OpenDistribute = OpenDistribute
def get_PermitNumber(self): return self.PermitNumber
def set_PermitNumber(self, PermitNumber): self.PermitNumber = PermitNumber
def get_PermitZIPCode(self): return self.PermitZIPCode
def set_PermitZIPCode(self, PermitZIPCode): self.PermitZIPCode = PermitZIPCode
def get_PermitHolderName(self): return self.PermitHolderName
def set_PermitHolderName(self, PermitHolderName): self.PermitHolderName = PermitHolderName
def get_CRID(self): return self.CRID
def set_CRID(self, CRID): self.CRID = CRID
def get_SenderName(self): return self.SenderName
def set_SenderName(self, SenderName): self.SenderName = SenderName
def get_SenderEMail(self): return self.SenderEMail
def set_SenderEMail(self, SenderEMail): self.SenderEMail = SenderEMail
def get_RecipientName(self): return self.RecipientName
def set_RecipientName(self, RecipientName): self.RecipientName = RecipientName
def get_RecipientEMail(self): return self.RecipientEMail
def set_RecipientEMail(self, RecipientEMail): self.RecipientEMail = RecipientEMail
def get_ReceiptOption(self): return self.ReceiptOption
def set_ReceiptOption(self, ReceiptOption): self.ReceiptOption = ReceiptOption
def get_ImageType(self): return self.ImageType
def set_ImageType(self, ImageType): self.ImageType = ImageType
def get_HoldForManifest(self): return self.HoldForManifest
def set_HoldForManifest(self, HoldForManifest): self.HoldForManifest = HoldForManifest
def get_NineDigitRoutingZip(self): return self.NineDigitRoutingZip
def set_NineDigitRoutingZip(self, NineDigitRoutingZip): self.NineDigitRoutingZip = NineDigitRoutingZip
def get_ShipInfo(self): return self.ShipInfo
def set_ShipInfo(self, ShipInfo): self.ShipInfo = ShipInfo
def get_CarrierRelease(self): return self.CarrierRelease
def set_CarrierRelease(self, CarrierRelease): self.CarrierRelease = CarrierRelease
def get_ReturnCommitments(self): return self.ReturnCommitments
def set_ReturnCommitments(self, ReturnCommitments): self.ReturnCommitments = ReturnCommitments
def get_PrintCustomerRefNo(self): return self.PrintCustomerRefNo
def set_PrintCustomerRefNo(self, PrintCustomerRefNo): self.PrintCustomerRefNo = PrintCustomerRefNo
def get_Content(self): return self.Content
def set_Content(self, Content): self.Content = Content
def get_ShippingContents(self): return self.ShippingContents
def set_ShippingContents(self, ShippingContents): self.ShippingContents = ShippingContents
def get_CustomsContentType(self): return self.CustomsContentType
def set_CustomsContentType(self, CustomsContentType): self.CustomsContentType = CustomsContentType
def get_ContentComments(self): return self.ContentComments
def set_ContentComments(self, ContentComments): self.ContentComments = ContentComments
def get_RestrictionType(self): return self.RestrictionType
def set_RestrictionType(self, RestrictionType): self.RestrictionType = RestrictionType
def get_RestrictionComments(self): return self.RestrictionComments
def set_RestrictionComments(self, RestrictionComments): self.RestrictionComments = RestrictionComments
def get_AESITN(self): return self.AESITN
def set_AESITN(self, AESITN): self.AESITN = AESITN
def get_ImportersReference(self): return self.ImportersReference
def set_ImportersReference(self, ImportersReference): self.ImportersReference = ImportersReference
def get_ImportersContact(self): return self.ImportersContact
def set_ImportersContact(self, ImportersContact): self.ImportersContact = ImportersContact
def get_ExportersReference(self): return self.ExportersReference
def set_ExportersReference(self, ExportersReference): self.ExportersReference = ExportersReference
def get_ExportersContact(self): return self.ExportersContact
def set_ExportersContact(self, ExportersContact): self.ExportersContact = ExportersContact
def get_InvoiceNumber(self): return self.InvoiceNumber
def set_InvoiceNumber(self, InvoiceNumber): self.InvoiceNumber = InvoiceNumber
def get_LicenseNumber(self): return self.LicenseNumber
def set_LicenseNumber(self, LicenseNumber): self.LicenseNumber = LicenseNumber
def get_CertificateNumber(self): return self.CertificateNumber
def set_CertificateNumber(self, CertificateNumber): self.CertificateNumber = CertificateNumber
def get_USERID(self): return self.USERID
def set_USERID(self, USERID): self.USERID = USERID
def hasContent_(self):
if (
self.Option is not None or
self.Revision is not None or
self.ImageParameters is not None or
self.FromName is not None or
self.FromFirm is not None or
self.FromAddress1 is not None or
self.FromAddress2 is not None or
self.FromCity is not None or
self.FromState is not None or
self.FromZip5 is not None or
self.FromZip4 is not None or
self.FromPhone is not None or
self.POZipCode is not None or
self.AllowNonCleansedOriginAddr is not None or
self.ToName is not None or
self.ToFirm is not None or
self.ToAddress1 is not None or
self.ToAddress2 is not None or
self.ToCity is not None or
self.ToState is not None or
self.ToZip5 is not None or
self.ToZip4 is not None or
self.ToPhone is not None or
self.POBox is not None or
self.AllowNonCleansedDestAddr is not None or
self.WeightInOunces is not None or
self.ServiceType is not None or
self.Container is not None or
self.Width is not None or
self.Length is not None or
self.Height is not None or
self.Machinable is not None or
self.ProcessingCategory is not None or
self.PriceOptions is not None or
self.InsuredAmount is not None or
self.AddressServiceRequested is not None or
self.ExpressMailOptions is not None or
self.ShipDate is not None or
self.CustomerRefNo is not None or
self.ExtraServices is not None or
self.HoldForPickup is not None or
self.OpenDistribute is not None or
self.PermitNumber is not None or
self.PermitZIPCode is not None or
self.PermitHolderName is not None or
self.CRID is not None or
self.SenderName is not None or
self.SenderEMail is not None or
self.RecipientName is not None or
self.RecipientEMail is not None or
self.ReceiptOption is not None or
self.ImageType is not None or
self.HoldForManifest is not None or
self.NineDigitRoutingZip is not None or
self.ShipInfo is not None or
self.CarrierRelease is not None or
self.ReturnCommitments is not None or
self.PrintCustomerRefNo is not None or
self.Content is not None or
self.ShippingContents is not None or
self.CustomsContentType is not None or
self.ContentComments is not None or
self.RestrictionType is not None or
self.RestrictionComments is not None or
self.AESITN is not None or
self.ImportersReference is not None or
self.ImportersContact is not None or
self.ExportersReference is not None or
self.ExportersContact is not None or
self.InvoiceNumber is not None or
self.LicenseNumber is not None or
self.CertificateNumber is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='eVSCertifyRequest', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('eVSCertifyRequest')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='eVSCertifyRequest')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='eVSCertifyRequest', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='eVSCertifyRequest'):
if self.USERID is not None and 'USERID' not in already_processed:
already_processed.add('USERID')
outfile.write(' USERID=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.USERID), input_name='USERID')), ))
def exportChildren(self, outfile, level, namespace_='', name_='eVSCertifyRequest', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Option is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Option>%s</Option>%s' % (self.gds_format_integer(self.Option, input_name='Option'), eol_))
if self.Revision is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Revision>%s</Revision>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.Revision), input_name='Revision')), eol_))
if self.ImageParameters is not None:
self.ImageParameters.export(outfile, level, namespace_, name_='ImageParameters', pretty_print=pretty_print)
if self.FromName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<FromName>%s</FromName>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.FromName), input_name='FromName')), eol_))
if self.FromFirm is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<FromFirm>%s</FromFirm>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.FromFirm), input_name='FromFirm')), eol_))
if self.FromAddress1 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<FromAddress1>%s</FromAddress1>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.FromAddress1), input_name='FromAddress1')), eol_))
if self.FromAddress2 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<FromAddress2>%s</FromAddress2>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.FromAddress2), input_name='FromAddress2')), eol_))
if self.FromCity is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<FromCity>%s</FromCity>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.FromCity), input_name='FromCity')), eol_))
if self.FromState is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<FromState>%s</FromState>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.FromState), input_name='FromState')), eol_))
if self.FromZip5 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<FromZip5>%s</FromZip5>%s' % (self.gds_format_integer(self.FromZip5, input_name='FromZip5'), eol_))
if self.FromZip4 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<FromZip4>%s</FromZip4>%s' % (self.gds_format_integer(self.FromZip4, input_name='FromZip4'), eol_))
if self.FromPhone is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<FromPhone>%s</FromPhone>%s' % (self.gds_format_integer(self.FromPhone, input_name='FromPhone'), eol_))
if self.POZipCode is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<POZipCode>%s</POZipCode>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.POZipCode), input_name='POZipCode')), eol_))
if self.AllowNonCleansedOriginAddr is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<AllowNonCleansedOriginAddr>%s</AllowNonCleansedOriginAddr>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.AllowNonCleansedOriginAddr), input_name='AllowNonCleansedOriginAddr')), eol_))
if self.ToName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ToName>%s</ToName>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ToName), input_name='ToName')), eol_))
if self.ToFirm is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ToFirm>%s</ToFirm>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ToFirm), input_name='ToFirm')), eol_))
if self.ToAddress1 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ToAddress1>%s</ToAddress1>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ToAddress1), input_name='ToAddress1')), eol_))
if self.ToAddress2 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ToAddress2>%s</ToAddress2>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ToAddress2), input_name='ToAddress2')), eol_))
if self.ToCity is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ToCity>%s</ToCity>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ToCity), input_name='ToCity')), eol_))
if self.ToState is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ToState>%s</ToState>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ToState), input_name='ToState')), eol_))
if self.ToZip5 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ToZip5>%s</ToZip5>%s' % (self.gds_format_integer(self.ToZip5, input_name='ToZip5'), eol_))
if self.ToZip4 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ToZip4>%s</ToZip4>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ToZip4), input_name='ToZip4')), eol_))
if self.ToPhone is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ToPhone>%s</ToPhone>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ToPhone), input_name='ToPhone')), eol_))
if self.POBox is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<POBox>%s</POBox>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.POBox), input_name='POBox')), eol_))
if self.AllowNonCleansedDestAddr is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<AllowNonCleansedDestAddr>%s</AllowNonCleansedDestAddr>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.AllowNonCleansedDestAddr), input_name='AllowNonCleansedDestAddr')), eol_))
if self.WeightInOunces is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<WeightInOunces>%s</WeightInOunces>%s' % (self.gds_format_integer(self.WeightInOunces, input_name='WeightInOunces'), eol_))
if self.ServiceType is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ServiceType>%s</ServiceType>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ServiceType), input_name='ServiceType')), eol_))
if self.Container is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Container>%s</Container>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.Container), input_name='Container')), eol_))
if self.Width is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Width>%s</Width>%s' % (self.gds_format_integer(self.Width, input_name='Width'), eol_))
if self.Length is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Length>%s</Length>%s' % (self.gds_format_integer(self.Length, input_name='Length'), eol_))
if self.Height is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Height>%s</Height>%s' % (self.gds_format_integer(self.Height, input_name='Height'), eol_))
if self.Machinable is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Machinable>%s</Machinable>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.Machinable), input_name='Machinable')), eol_))
if self.ProcessingCategory is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ProcessingCategory>%s</ProcessingCategory>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ProcessingCategory), input_name='ProcessingCategory')), eol_))
if self.PriceOptions is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<PriceOptions>%s</PriceOptions>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.PriceOptions), input_name='PriceOptions')), eol_))
if self.InsuredAmount is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<InsuredAmount>%s</InsuredAmount>%s' % (self.gds_format_integer(self.InsuredAmount, input_name='InsuredAmount'), eol_))
if self.AddressServiceRequested is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<AddressServiceRequested>%s</AddressServiceRequested>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.AddressServiceRequested), input_name='AddressServiceRequested')), eol_))
if self.ExpressMailOptions is not None:
self.ExpressMailOptions.export(outfile, level, namespace_, name_='ExpressMailOptions', pretty_print=pretty_print)
if self.ShipDate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ShipDate>%s</ShipDate>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ShipDate), input_name='ShipDate')), eol_))
if self.CustomerRefNo is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CustomerRefNo>%s</CustomerRefNo>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.CustomerRefNo), input_name='CustomerRefNo')), eol_))
if self.ExtraServices is not None:
self.ExtraServices.export(outfile, level, namespace_, name_='ExtraServices', pretty_print=pretty_print)
if self.HoldForPickup is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<HoldForPickup>%s</HoldForPickup>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.HoldForPickup), input_name='HoldForPickup')), eol_))
if self.OpenDistribute is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<OpenDistribute>%s</OpenDistribute>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.OpenDistribute), input_name='OpenDistribute')), eol_))
if self.PermitNumber is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<PermitNumber>%s</PermitNumber>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.PermitNumber), input_name='PermitNumber')), eol_))
if self.PermitZIPCode is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<PermitZIPCode>%s</PermitZIPCode>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.PermitZIPCode), input_name='PermitZIPCode')), eol_))
if self.PermitHolderName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<PermitHolderName>%s</PermitHolderName>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.PermitHolderName), input_name='PermitHolderName')), eol_))
if self.CRID is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CRID>%s</CRID>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.CRID), input_name='CRID')), eol_))
if self.SenderName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<SenderName>%s</SenderName>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.SenderName), input_name='SenderName')), eol_))
if self.SenderEMail is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<SenderEMail>%s</SenderEMail>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.SenderEMail), input_name='SenderEMail')), eol_))
if self.RecipientName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<RecipientName>%s</RecipientName>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.RecipientName), input_name='RecipientName')), eol_))
if self.RecipientEMail is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<RecipientEMail>%s</RecipientEMail>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.RecipientEMail), input_name='RecipientEMail')), eol_))
if self.ReceiptOption is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ReceiptOption>%s</ReceiptOption>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ReceiptOption), input_name='ReceiptOption')), eol_))
if self.ImageType is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ImageType>%s</ImageType>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ImageType), input_name='ImageType')), eol_))
if self.HoldForManifest is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<HoldForManifest>%s</HoldForManifest>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.HoldForManifest), input_name='HoldForManifest')), eol_))
if self.NineDigitRoutingZip is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<NineDigitRoutingZip>%s</NineDigitRoutingZip>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.NineDigitRoutingZip), input_name='NineDigitRoutingZip')), eol_))
if self.ShipInfo is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ShipInfo>%s</ShipInfo>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ShipInfo), input_name='ShipInfo')), eol_))
if self.CarrierRelease is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CarrierRelease>%s</CarrierRelease>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.CarrierRelease), input_name='CarrierRelease')), eol_))
if self.ReturnCommitments is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ReturnCommitments>%s</ReturnCommitments>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ReturnCommitments), input_name='ReturnCommitments')), eol_))
if self.PrintCustomerRefNo is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<PrintCustomerRefNo>%s</PrintCustomerRefNo>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.PrintCustomerRefNo), input_name='PrintCustomerRefNo')), eol_))
if self.Content is not None:
self.Content.export(outfile, level, namespace_, name_='Content', pretty_print=pretty_print)
if self.ShippingContents is not None:
self.ShippingContents.export(outfile, level, namespace_, name_='ShippingContents', pretty_print=pretty_print)
if self.CustomsContentType is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CustomsContentType>%s</CustomsContentType>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.CustomsContentType), input_name='CustomsContentType')), eol_))
if self.ContentComments is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ContentComments>%s</ContentComments>%s' % (self.gds_format_integer(self.ContentComments, input_name='ContentComments'), eol_))
if self.RestrictionType is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<RestrictionType>%s</RestrictionType>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.RestrictionType), input_name='RestrictionType')), eol_))
if self.RestrictionComments is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<RestrictionComments>%s</RestrictionComments>%s' % (self.gds_format_integer(self.RestrictionComments, input_name='RestrictionComments'), eol_))
if self.AESITN is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<AESITN>%s</AESITN>%s' % (self.gds_format_integer(self.AESITN, input_name='AESITN'), eol_))
if self.ImportersReference is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ImportersReference>%s</ImportersReference>%s' % (self.gds_format_integer(self.ImportersReference, input_name='ImportersReference'), eol_))
if self.ImportersContact is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ImportersContact>%s</ImportersContact>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ImportersContact), input_name='ImportersContact')), eol_))
if self.ExportersReference is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ExportersReference>%s</ExportersReference>%s' % (self.gds_format_integer(self.ExportersReference, input_name='ExportersReference'), eol_))
if self.ExportersContact is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ExportersContact>%s</ExportersContact>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ExportersContact), input_name='ExportersContact')), eol_))
if self.InvoiceNumber is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<InvoiceNumber>%s</InvoiceNumber>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.InvoiceNumber), input_name='InvoiceNumber')), eol_))
if self.LicenseNumber is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<LicenseNumber>%s</LicenseNumber>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.LicenseNumber), input_name='LicenseNumber')), eol_))
if self.CertificateNumber is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CertificateNumber>%s</CertificateNumber>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.CertificateNumber), input_name='CertificateNumber')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('USERID', node)
if value is not None and 'USERID' not in already_processed:
already_processed.add('USERID')
self.USERID = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Option':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Option')
self.Option = ival_
elif nodeName_ == 'Revision':
Revision_ = child_.text
Revision_ = self.gds_validate_string(Revision_, node, 'Revision')
self.Revision = Revision_
elif nodeName_ == 'ImageParameters':
obj_ = ImageParametersType.factory()
obj_.build(child_)
self.ImageParameters = obj_
obj_.original_tagname_ = 'ImageParameters'
elif nodeName_ == 'FromName':
FromName_ = child_.text
FromName_ = self.gds_validate_string(FromName_, node, 'FromName')
self.FromName = FromName_
elif nodeName_ == 'FromFirm':
FromFirm_ = child_.text
FromFirm_ = self.gds_validate_string(FromFirm_, node, 'FromFirm')
self.FromFirm = FromFirm_
elif nodeName_ == 'FromAddress1':
FromAddress1_ = child_.text
FromAddress1_ = self.gds_validate_string(FromAddress1_, node, 'FromAddress1')
self.FromAddress1 = FromAddress1_
elif nodeName_ == 'FromAddress2':
FromAddress2_ = child_.text
FromAddress2_ = self.gds_validate_string(FromAddress2_, node, 'FromAddress2')
self.FromAddress2 = FromAddress2_
elif nodeName_ == 'FromCity':
FromCity_ = child_.text
FromCity_ = self.gds_validate_string(FromCity_, node, 'FromCity')
self.FromCity = FromCity_
elif nodeName_ == 'FromState':
FromState_ = child_.text
FromState_ = self.gds_validate_string(FromState_, node, 'FromState')
self.FromState = FromState_
elif nodeName_ == 'FromZip5':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'FromZip5')
self.FromZip5 = ival_
elif nodeName_ == 'FromZip4':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'FromZip4')
self.FromZip4 = ival_
elif nodeName_ == 'FromPhone':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'FromPhone')
self.FromPhone = ival_
elif nodeName_ == 'POZipCode':
POZipCode_ = child_.text
POZipCode_ = self.gds_validate_string(POZipCode_, node, 'POZipCode')
self.POZipCode = POZipCode_
elif nodeName_ == 'AllowNonCleansedOriginAddr':
AllowNonCleansedOriginAddr_ = child_.text
AllowNonCleansedOriginAddr_ = self.gds_validate_string(AllowNonCleansedOriginAddr_, node, 'AllowNonCleansedOriginAddr')
self.AllowNonCleansedOriginAddr = AllowNonCleansedOriginAddr_
elif nodeName_ == 'ToName':
ToName_ = child_.text
ToName_ = self.gds_validate_string(ToName_, node, 'ToName')
self.ToName = ToName_
elif nodeName_ == 'ToFirm':
ToFirm_ = child_.text
ToFirm_ = self.gds_validate_string(ToFirm_, node, 'ToFirm')
self.ToFirm = ToFirm_
elif nodeName_ == 'ToAddress1':
ToAddress1_ = child_.text
ToAddress1_ = self.gds_validate_string(ToAddress1_, node, 'ToAddress1')
self.ToAddress1 = ToAddress1_
elif nodeName_ == 'ToAddress2':
ToAddress2_ = child_.text
ToAddress2_ = self.gds_validate_string(ToAddress2_, node, 'ToAddress2')
self.ToAddress2 = ToAddress2_
elif nodeName_ == 'ToCity':
ToCity_ = child_.text
ToCity_ = self.gds_validate_string(ToCity_, node, 'ToCity')
self.ToCity = ToCity_
elif nodeName_ == 'ToState':
ToState_ = child_.text
ToState_ = self.gds_validate_string(ToState_, node, 'ToState')
self.ToState = ToState_
elif nodeName_ == 'ToZip5':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'ToZip5')
self.ToZip5 = ival_
elif nodeName_ == 'ToZip4':
ToZip4_ = child_.text
ToZip4_ = self.gds_validate_string(ToZip4_, node, 'ToZip4')
self.ToZip4 = ToZip4_
elif nodeName_ == 'ToPhone':
ToPhone_ = child_.text
ToPhone_ = self.gds_validate_string(ToPhone_, node, 'ToPhone')
self.ToPhone = ToPhone_
elif nodeName_ == 'POBox':
POBox_ = child_.text
POBox_ = self.gds_validate_string(POBox_, node, 'POBox')
self.POBox = POBox_
elif nodeName_ == 'AllowNonCleansedDestAddr':
AllowNonCleansedDestAddr_ = child_.text
AllowNonCleansedDestAddr_ = self.gds_validate_string(AllowNonCleansedDestAddr_, node, 'AllowNonCleansedDestAddr')
self.AllowNonCleansedDestAddr = AllowNonCleansedDestAddr_
elif nodeName_ == 'WeightInOunces':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'WeightInOunces')
self.WeightInOunces = ival_
elif nodeName_ == 'ServiceType':
ServiceType_ = child_.text
ServiceType_ = self.gds_validate_string(ServiceType_, node, 'ServiceType')
self.ServiceType = ServiceType_
elif nodeName_ == 'Container':
Container_ = child_.text
Container_ = self.gds_validate_string(Container_, node, 'Container')
self.Container = Container_
elif nodeName_ == 'Width':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Width')
self.Width = ival_
elif nodeName_ == 'Length':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Length')
self.Length = ival_
elif nodeName_ == 'Height':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Height')
self.Height = ival_
elif nodeName_ == 'Machinable':
Machinable_ = child_.text
Machinable_ = self.gds_validate_string(Machinable_, node, 'Machinable')
self.Machinable = Machinable_
elif nodeName_ == 'ProcessingCategory':
ProcessingCategory_ = child_.text
ProcessingCategory_ = self.gds_validate_string(ProcessingCategory_, node, 'ProcessingCategory')
self.ProcessingCategory = ProcessingCategory_
elif nodeName_ == 'PriceOptions':
PriceOptions_ = child_.text
PriceOptions_ = self.gds_validate_string(PriceOptions_, node, 'PriceOptions')
self.PriceOptions = PriceOptions_
elif nodeName_ == 'InsuredAmount':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'InsuredAmount')
self.InsuredAmount = ival_
elif nodeName_ == 'AddressServiceRequested':
AddressServiceRequested_ = child_.text
AddressServiceRequested_ = self.gds_validate_string(AddressServiceRequested_, node, 'AddressServiceRequested')
self.AddressServiceRequested = AddressServiceRequested_
elif nodeName_ == 'ExpressMailOptions':
obj_ = ExpressMailOptionsType.factory()
obj_.build(child_)
self.ExpressMailOptions = obj_
obj_.original_tagname_ = 'ExpressMailOptions'
elif nodeName_ == 'ShipDate':
ShipDate_ = child_.text
ShipDate_ = self.gds_validate_string(ShipDate_, node, 'ShipDate')
self.ShipDate = ShipDate_
elif nodeName_ == 'CustomerRefNo':
CustomerRefNo_ = child_.text
CustomerRefNo_ = self.gds_validate_string(CustomerRefNo_, node, 'CustomerRefNo')
self.CustomerRefNo = CustomerRefNo_
elif nodeName_ == 'ExtraServices':
obj_ = ExtraServicesType.factory()
obj_.build(child_)
self.ExtraServices = obj_
obj_.original_tagname_ = 'ExtraServices'
elif nodeName_ == 'HoldForPickup':
HoldForPickup_ = child_.text
HoldForPickup_ = self.gds_validate_string(HoldForPickup_, node, 'HoldForPickup')
self.HoldForPickup = HoldForPickup_
elif nodeName_ == 'OpenDistribute':
OpenDistribute_ = child_.text
OpenDistribute_ = self.gds_validate_string(OpenDistribute_, node, 'OpenDistribute')
self.OpenDistribute = OpenDistribute_
elif nodeName_ == 'PermitNumber':
PermitNumber_ = child_.text
PermitNumber_ = self.gds_validate_string(PermitNumber_, node, 'PermitNumber')
self.PermitNumber = PermitNumber_
elif nodeName_ == 'PermitZIPCode':
PermitZIPCode_ = child_.text
PermitZIPCode_ = self.gds_validate_string(PermitZIPCode_, node, 'PermitZIPCode')
self.PermitZIPCode = PermitZIPCode_
elif nodeName_ == 'PermitHolderName':
PermitHolderName_ = child_.text
PermitHolderName_ = self.gds_validate_string(PermitHolderName_, node, 'PermitHolderName')
self.PermitHolderName = PermitHolderName_
elif nodeName_ == 'CRID':
CRID_ = child_.text
CRID_ = self.gds_validate_string(CRID_, node, 'CRID')
self.CRID = CRID_
elif nodeName_ == 'SenderName':
SenderName_ = child_.text
SenderName_ = self.gds_validate_string(SenderName_, node, 'SenderName')
self.SenderName = SenderName_
elif nodeName_ == 'SenderEMail':
SenderEMail_ = child_.text
SenderEMail_ = self.gds_validate_string(SenderEMail_, node, 'SenderEMail')
self.SenderEMail = SenderEMail_
elif nodeName_ == 'RecipientName':
RecipientName_ = child_.text
RecipientName_ = self.gds_validate_string(RecipientName_, node, 'RecipientName')
self.RecipientName = RecipientName_
elif nodeName_ == 'RecipientEMail':
RecipientEMail_ = child_.text
RecipientEMail_ = self.gds_validate_string(RecipientEMail_, node, 'RecipientEMail')
self.RecipientEMail = RecipientEMail_
elif nodeName_ == 'ReceiptOption':
ReceiptOption_ = child_.text
ReceiptOption_ = self.gds_validate_string(ReceiptOption_, node, 'ReceiptOption')
self.ReceiptOption = ReceiptOption_
elif nodeName_ == 'ImageType':
ImageType_ = child_.text
ImageType_ = self.gds_validate_string(ImageType_, node, 'ImageType')
self.ImageType = ImageType_
elif nodeName_ == 'HoldForManifest':
HoldForManifest_ = child_.text
HoldForManifest_ = self.gds_validate_string(HoldForManifest_, node, 'HoldForManifest')
self.HoldForManifest = HoldForManifest_
elif nodeName_ == 'NineDigitRoutingZip':
NineDigitRoutingZip_ = child_.text
NineDigitRoutingZip_ = self.gds_validate_string(NineDigitRoutingZip_, node, 'NineDigitRoutingZip')
self.NineDigitRoutingZip = NineDigitRoutingZip_
elif nodeName_ == 'ShipInfo':
ShipInfo_ = child_.text
ShipInfo_ = self.gds_validate_string(ShipInfo_, node, 'ShipInfo')
self.ShipInfo = ShipInfo_
elif nodeName_ == 'CarrierRelease':
CarrierRelease_ = child_.text
CarrierRelease_ = self.gds_validate_string(CarrierRelease_, node, 'CarrierRelease')
self.CarrierRelease = CarrierRelease_
elif nodeName_ == 'ReturnCommitments':
ReturnCommitments_ = child_.text
ReturnCommitments_ = self.gds_validate_string(ReturnCommitments_, node, 'ReturnCommitments')
self.ReturnCommitments = ReturnCommitments_
elif nodeName_ == 'PrintCustomerRefNo':
PrintCustomerRefNo_ = child_.text
PrintCustomerRefNo_ = self.gds_validate_string(PrintCustomerRefNo_, node, 'PrintCustomerRefNo')
self.PrintCustomerRefNo = PrintCustomerRefNo_
elif nodeName_ == 'Content':
obj_ = ContentType.factory()
obj_.build(child_)
self.Content = obj_
obj_.original_tagname_ = 'Content'
elif nodeName_ == 'ShippingContents':
obj_ = ShippingContentsType.factory()
obj_.build(child_)
self.ShippingContents = obj_
obj_.original_tagname_ = 'ShippingContents'
elif nodeName_ == 'CustomsContentType':
CustomsContentType_ = child_.text
CustomsContentType_ = self.gds_validate_string(CustomsContentType_, node, 'CustomsContentType')
self.CustomsContentType = CustomsContentType_
elif nodeName_ == 'ContentComments':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'ContentComments')
self.ContentComments = ival_
elif nodeName_ == 'RestrictionType':
RestrictionType_ = child_.text
RestrictionType_ = self.gds_validate_string(RestrictionType_, node, 'RestrictionType')
self.RestrictionType = RestrictionType_
elif nodeName_ == 'RestrictionComments':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'RestrictionComments')
self.RestrictionComments = ival_
elif nodeName_ == 'AESITN':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'AESITN')
self.AESITN = ival_
elif nodeName_ == 'ImportersReference':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'ImportersReference')
self.ImportersReference = ival_
elif nodeName_ == 'ImportersContact':
ImportersContact_ = child_.text
ImportersContact_ = self.gds_validate_string(ImportersContact_, node, 'ImportersContact')
self.ImportersContact = ImportersContact_
elif nodeName_ == 'ExportersReference':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'ExportersReference')
self.ExportersReference = ival_
elif nodeName_ == 'ExportersContact':
ExportersContact_ = child_.text
ExportersContact_ = self.gds_validate_string(ExportersContact_, node, 'ExportersContact')
self.ExportersContact = ExportersContact_
elif nodeName_ == 'InvoiceNumber':
InvoiceNumber_ = child_.text
InvoiceNumber_ = self.gds_validate_string(InvoiceNumber_, node, 'InvoiceNumber')
self.InvoiceNumber = InvoiceNumber_
elif nodeName_ == 'LicenseNumber':
LicenseNumber_ = child_.text
LicenseNumber_ = self.gds_validate_string(LicenseNumber_, node, 'LicenseNumber')
self.LicenseNumber = LicenseNumber_
elif nodeName_ == 'CertificateNumber':
CertificateNumber_ = child_.text
CertificateNumber_ = self.gds_validate_string(CertificateNumber_, node, 'CertificateNumber')
self.CertificateNumber = CertificateNumber_
# end class eVSCertifyRequest
class ImageParametersType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, LabelSequence=None):
self.original_tagname_ = None
self.LabelSequence = LabelSequence
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ImageParametersType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ImageParametersType.subclass:
return ImageParametersType.subclass(*args_, **kwargs_)
else:
return ImageParametersType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_LabelSequence(self): return self.LabelSequence
def set_LabelSequence(self, LabelSequence): self.LabelSequence = LabelSequence
def hasContent_(self):
if (
self.LabelSequence is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ImageParametersType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ImageParametersType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ImageParametersType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ImageParametersType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ImageParametersType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ImageParametersType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.LabelSequence is not None:
self.LabelSequence.export(outfile, level, namespace_, name_='LabelSequence', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'LabelSequence':
obj_ = LabelSequenceType.factory()
obj_.build(child_)
self.LabelSequence = obj_
obj_.original_tagname_ = 'LabelSequence'
# end class ImageParametersType
class LabelSequenceType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, PackageNumber=None, TotalPackages=None):
self.original_tagname_ = None
self.PackageNumber = PackageNumber
self.TotalPackages = TotalPackages
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, LabelSequenceType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if LabelSequenceType.subclass:
return LabelSequenceType.subclass(*args_, **kwargs_)
else:
return LabelSequenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_PackageNumber(self): return self.PackageNumber
def set_PackageNumber(self, PackageNumber): self.PackageNumber = PackageNumber
def get_TotalPackages(self): return self.TotalPackages
def set_TotalPackages(self, TotalPackages): self.TotalPackages = TotalPackages
def hasContent_(self):
if (
self.PackageNumber is not None or
self.TotalPackages is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='LabelSequenceType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('LabelSequenceType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='LabelSequenceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='LabelSequenceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LabelSequenceType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='LabelSequenceType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.PackageNumber is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<PackageNumber>%s</PackageNumber>%s' % (self.gds_format_integer(self.PackageNumber, input_name='PackageNumber'), eol_))
if self.TotalPackages is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<TotalPackages>%s</TotalPackages>%s' % (self.gds_format_integer(self.TotalPackages, input_name='TotalPackages'), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'PackageNumber':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'PackageNumber')
self.PackageNumber = ival_
elif nodeName_ == 'TotalPackages':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'TotalPackages')
self.TotalPackages = ival_
# end class LabelSequenceType
class ExpressMailOptionsType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, DeliveryOption=None, WaiverOfSignature=None):
self.original_tagname_ = None
self.DeliveryOption = DeliveryOption
self.WaiverOfSignature = WaiverOfSignature
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ExpressMailOptionsType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ExpressMailOptionsType.subclass:
return ExpressMailOptionsType.subclass(*args_, **kwargs_)
else:
return ExpressMailOptionsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_DeliveryOption(self): return self.DeliveryOption
def set_DeliveryOption(self, DeliveryOption): self.DeliveryOption = DeliveryOption
def get_WaiverOfSignature(self): return self.WaiverOfSignature
def set_WaiverOfSignature(self, WaiverOfSignature): self.WaiverOfSignature = WaiverOfSignature
def hasContent_(self):
if (
self.DeliveryOption is not None or
self.WaiverOfSignature is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ExpressMailOptionsType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ExpressMailOptionsType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ExpressMailOptionsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ExpressMailOptionsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExpressMailOptionsType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ExpressMailOptionsType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.DeliveryOption is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<DeliveryOption>%s</DeliveryOption>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.DeliveryOption), input_name='DeliveryOption')), eol_))
if self.WaiverOfSignature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<WaiverOfSignature>%s</WaiverOfSignature>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.WaiverOfSignature), input_name='WaiverOfSignature')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'DeliveryOption':
DeliveryOption_ = child_.text
DeliveryOption_ = self.gds_validate_string(DeliveryOption_, node, 'DeliveryOption')
self.DeliveryOption = DeliveryOption_
elif nodeName_ == 'WaiverOfSignature':
WaiverOfSignature_ = child_.text
WaiverOfSignature_ = self.gds_validate_string(WaiverOfSignature_, node, 'WaiverOfSignature')
self.WaiverOfSignature = WaiverOfSignature_
# end class ExpressMailOptionsType
class ExtraServicesType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ExtraService=None):
self.original_tagname_ = None
self.ExtraService = ExtraService
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ExtraServicesType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ExtraServicesType.subclass:
return ExtraServicesType.subclass(*args_, **kwargs_)
else:
return ExtraServicesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ExtraService(self): return self.ExtraService
def set_ExtraService(self, ExtraService): self.ExtraService = ExtraService
def hasContent_(self):
if (
self.ExtraService is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ExtraServicesType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ExtraServicesType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ExtraServicesType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ExtraServicesType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExtraServicesType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ExtraServicesType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ExtraService is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ExtraService>%s</ExtraService>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ExtraService), input_name='ExtraService')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ExtraService':
ExtraService_ = child_.text
ExtraService_ = self.gds_validate_string(ExtraService_, node, 'ExtraService')
self.ExtraService = ExtraService_
# end class ExtraServicesType
class ContentType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ContentType_member=None, ContentDescription=None):
self.original_tagname_ = None
self.ContentType = ContentType_member
self.ContentDescription = ContentDescription
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ContentType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ContentType.subclass:
return ContentType.subclass(*args_, **kwargs_)
else:
return ContentType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ContentType(self): return self.ContentType
def set_ContentType(self, ContentType): self.ContentType = ContentType
def get_ContentDescription(self): return self.ContentDescription
def set_ContentDescription(self, ContentDescription): self.ContentDescription = ContentDescription
def hasContent_(self):
if (
self.ContentType is not None or
self.ContentDescription is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ContentType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ContentType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContentType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ContentType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContentType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ContentType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ContentType is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ContentType>%s</ContentType>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ContentType), input_name='ContentType')), eol_))
if self.ContentDescription is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ContentDescription>%s</ContentDescription>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ContentDescription), input_name='ContentDescription')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ContentType':
ContentType_ = child_.text
ContentType_ = self.gds_validate_string(ContentType_, node, 'ContentType')
self.ContentType = ContentType_
elif nodeName_ == 'ContentDescription':
ContentDescription_ = child_.text
ContentDescription_ = self.gds_validate_string(ContentDescription_, node, 'ContentDescription')
self.ContentDescription = ContentDescription_
# end class ContentType
class ShippingContentsType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ItemDetail=None):
self.original_tagname_ = None
if ItemDetail is None:
self.ItemDetail = []
else:
self.ItemDetail = ItemDetail
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ShippingContentsType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ShippingContentsType.subclass:
return ShippingContentsType.subclass(*args_, **kwargs_)
else:
return ShippingContentsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ItemDetail(self): return self.ItemDetail
def set_ItemDetail(self, ItemDetail): self.ItemDetail = ItemDetail
def add_ItemDetail(self, value): self.ItemDetail.append(value)
def insert_ItemDetail_at(self, index, value): self.ItemDetail.insert(index, value)
def replace_ItemDetail_at(self, index, value): self.ItemDetail[index] = value
def hasContent_(self):
if (
self.ItemDetail
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ShippingContentsType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ShippingContentsType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ShippingContentsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ShippingContentsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ShippingContentsType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ShippingContentsType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for ItemDetail_ in self.ItemDetail:
ItemDetail_.export(outfile, level, namespace_, name_='ItemDetail', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ItemDetail':
obj_ = ItemDetailType.factory()
obj_.build(child_)
self.ItemDetail.append(obj_)
obj_.original_tagname_ = 'ItemDetail'
# end class ShippingContentsType
class ItemDetailType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Description=None, Quantity=None, Value=None, NetPounds=None, NetOunces=None, HSTariffNumber=None, CountryOfOrigin=None):
self.original_tagname_ = None
self.Description = Description
self.Quantity = Quantity
self.Value = Value
self.NetPounds = NetPounds
self.NetOunces = NetOunces
self.HSTariffNumber = HSTariffNumber
self.CountryOfOrigin = CountryOfOrigin
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ItemDetailType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ItemDetailType.subclass:
return ItemDetailType.subclass(*args_, **kwargs_)
else:
return ItemDetailType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_Quantity(self): return self.Quantity
def set_Quantity(self, Quantity): self.Quantity = Quantity
def get_Value(self): return self.Value
def set_Value(self, Value): self.Value = Value
def get_NetPounds(self): return self.NetPounds
def set_NetPounds(self, NetPounds): self.NetPounds = NetPounds
def get_NetOunces(self): return self.NetOunces
def set_NetOunces(self, NetOunces): self.NetOunces = NetOunces
def get_HSTariffNumber(self): return self.HSTariffNumber
def set_HSTariffNumber(self, HSTariffNumber): self.HSTariffNumber = HSTariffNumber
def get_CountryOfOrigin(self): return self.CountryOfOrigin
def set_CountryOfOrigin(self, CountryOfOrigin): self.CountryOfOrigin = CountryOfOrigin
def hasContent_(self):
if (
self.Description is not None or
self.Quantity is not None or
self.Value is not None or
self.NetPounds is not None or
self.NetOunces is not None or
self.HSTariffNumber is not None or
self.CountryOfOrigin is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ItemDetailType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ItemDetailType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ItemDetailType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ItemDetailType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ItemDetailType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ItemDetailType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Description>%s</Description>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.Description), input_name='Description')), eol_))
if self.Quantity is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Quantity>%s</Quantity>%s' % (self.gds_format_integer(self.Quantity, input_name='Quantity'), eol_))
if self.Value is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Value>%s</Value>%s' % (self.gds_format_float(self.Value, input_name='Value'), eol_))
if self.NetPounds is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<NetPounds>%s</NetPounds>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.NetPounds), input_name='NetPounds')), eol_))
if self.NetOunces is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<NetOunces>%s</NetOunces>%s' % (self.gds_format_integer(self.NetOunces, input_name='NetOunces'), eol_))
if self.HSTariffNumber is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<HSTariffNumber>%s</HSTariffNumber>%s' % (self.gds_format_integer(self.HSTariffNumber, input_name='HSTariffNumber'), eol_))
if self.CountryOfOrigin is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CountryOfOrigin>%s</CountryOfOrigin>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.CountryOfOrigin), input_name='CountryOfOrigin')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Description':
Description_ = child_.text
Description_ = self.gds_validate_string(Description_, node, 'Description')
self.Description = Description_
elif nodeName_ == 'Quantity':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Quantity')
self.Quantity = ival_
elif nodeName_ == 'Value':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'Value')
self.Value = fval_
elif nodeName_ == 'NetPounds':
NetPounds_ = child_.text
NetPounds_ = self.gds_validate_string(NetPounds_, node, 'NetPounds')
self.NetPounds = NetPounds_
elif nodeName_ == 'NetOunces':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'NetOunces')
self.NetOunces = ival_
elif nodeName_ == 'HSTariffNumber':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'HSTariffNumber')
self.HSTariffNumber = ival_
elif nodeName_ == 'CountryOfOrigin':
CountryOfOrigin_ = child_.text
CountryOfOrigin_ = self.gds_validate_string(CountryOfOrigin_, node, 'CountryOfOrigin')
self.CountryOfOrigin = CountryOfOrigin_
# end class ItemDetailType
GDSClassesMapping = {
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'eVSCertifyRequest'
rootClass = eVSCertifyRequest
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'eVSCertifyRequest'
rootClass = eVSCertifyRequest
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
'''Parse a string, create the object tree, and export it.
Arguments:
- inString -- A string. This XML fragment should not start
with an XML declaration containing an encoding.
- silence -- A boolean. If False, export the object.
Returns -- The root object in the tree.
'''
parser = None
rootNode= parsexmlstring_(inString, parser)
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'eVSCertifyRequest'
rootClass = eVSCertifyRequest
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'eVSCertifyRequest'
rootClass = eVSCertifyRequest
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from eVSCertifyRequest.xsd import *\n\n')
sys.stdout.write('import eVSCertifyRequest.xsd as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"ContentType",
"ExpressMailOptionsType",
"ExtraServicesType",
"ImageParametersType",
"ItemDetailType",
"LabelSequenceType",
"ShippingContentsType",
"eVSCertifyRequest"
]
|
import zipfile
import re
from django.utils.html import strip_tags
from util.textcleanup import split_into_chunks, calculate_unique_score_for_chunk, remove_special_characters
from util.handlequeries import build_query_result
def get_queries(source, num_queries=3):
scored_chunks = []
zip_data = zipfile.ZipFile(source).read('word/document.xml')
try:
# Word docs seem to always be encoded as UTF-8.
# TODO Should really scan the encoding attribute, but for now just use this method
xml = zip_data.decode('UTF-8')
except UnicodeDecodeError:
xml = zip_data.decode('ISO-8859-1')
# Clean up the data - e.g. by replacing 'key' XML like linebreaks into actual linebreaks
text = xml.replace('<w:br/>', " \r\n");
text = text.replace('</w:r></w:p></w:tc><w:tc>', " ");
text = text.replace('</w:r><w:proofErr w:type="gramEnd"/></w:p>', " \r\n");
text = text.replace('</w:r></w:p>', " \r\n");
text = re.sub(r'<w:hyperlink.*?<w:t>(.*?)</w:t>.*?</w:hyperlink>', r' \1 ', text) # extract hyperlink text
text = re.sub(r'<w:instrText.*?</w:instrText>', '', text) # remove 'instruction text' fields
text = re.sub(r'HYPERLINK ".*?"', '', text)
text = strip_tags(text)
scored_chunks = []
for chunk in split_into_chunks(text, filter_poor_quality=True):
score = calculate_unique_score_for_chunk(chunk)
scored_chunks.append((remove_special_characters(chunk), score))
return build_query_result(scored_chunks, num_queries, source=text)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Gobuster Module
'''
import os
import configparser
import sys
from core.config import cfg
CFG = configparser.ConfigParser()
CFG.read('config.ini')
class GoBusterModule:
'''Class GoBusterModule'''
@staticmethod
def display_wordlists_filenames(wordlist_dir):
'''Get all wordlists filenames function'''
i = 0
for root, dirs, files in os.walk(wordlist_dir):
for filename in files:
print(str(i) + ' - ' + filename)
i = i+1
@staticmethod
def get_selected_wordlist(wordlist_dir, number):
'''Get the selected wordlist function'''
i = 0
for root, dirs, files in os.walk(wordlist_dir):
for filename in files:
if int(number) == i:
return filename
i = i+1
return None
@staticmethod
def start_webfuzz():
'''Start webfuzz function'''
output_dir = os.path.abspath('{}/{}'.format(cfg.get().output_dir, 'WEBFUZZ'))
scan_all = CFG['GOBUSTER']['SCAN_ALL_WORDLISTS']
gobuster_path = CFG['GOBUSTER']['GOBUSTER_PATH']
gobuster_url_path = CFG['GOBUSTER']['GOBUSTER_URL_PATH']
gobuster_args = CFG['GOBUSTER']['ARGS']
wordlist_dir = os.path.abspath('core/modules/gobuster/wordlists/')
cmd = ''
if not os.path.exists(output_dir + gobuster_path):
os.makedirs(output_dir + gobuster_path)
if scan_all == 'Y':
for root, dirs, files in os.walk(wordlist_dir):
for filename in files:
fullpath_file = '{}/{}'.format(wordlist_dir, filename)
cmd = "cd {} && ./gobuster dir -u {}{} {} -w {} -k -o {}" \
.format(gobuster_path, cfg.get().url, gobuster_url_path,
gobuster_args, fullpath_file, '{}/{}'
.format(output_dir, filename))
os.system(cmd)
elif scan_all == 'N':
GoBusterModule.display_wordlists_filenames(wordlist_dir)
print('Select your wordlist number:')
number = input()
if not number.isdigit():
print('error: you need to select a number')
sys.exit(0)
wordlist = GoBusterModule.get_selected_wordlist(wordlist_dir, number)
print('Selected wordlist: {}'.format(wordlist))
fullpath_file = '{}/{}'.format(wordlist_dir, wordlist)
cmd = "cd {} && ./gobuster dir -u {}{} {} -w {} -k -o {}" \
.format(gobuster_path, cfg.get().url, gobuster_url_path,
gobuster_args, fullpath_file, '{}/{}'
.format(output_dir, wordlist))
answer = 'Y'
print('CMD: {}'.format(cmd))
print('Do you want to start it ? (Y/n)')
answer = input()
if 'Y' in answer or '' in answer:
os.system(cmd)
else:
sys.exit(0)
else:
print('You need to configure your gobuster options correctly ' \
'(Y or N for the SCAN_ALL_WORDLISTS option)')
sys.exit(0)
|
#
# run all the regression tests
#
# [or, you can request specific ones by giving their names on the command line]
#
import sys
import os
import subprocess
def system (cmd):
fo = open ('/dev/null', 'wb')
p = subprocess.Popen (cmd, shell=True, stdout=fo, stderr=subprocess.STDOUT)
return os.waitpid (p.pid, 0)[1]
def run_test (cmd, *args):
cmd = PJ ('tests', cmd)
p = subprocess.Popen ([cmd] + list(args), stdout=subprocess.PIPE)
out = p.stdout.read()
return out
def test_t17():
lines = run_test ('t17').split ('\n')
# we can't say anything about the address returned by malloc
# but we should expect to read this number
assert (lines[1] == '3141')
# and the sizeof (pxll_int) is random too, since even on the same
# platform we might compile 32 or 64 bit, so just ignore it.
def test_t_dump_image():
# generate the output
run_test ('t_dump_image')
# load the image and run it
exp0 = run_test ('t_dump_image','-l')
exp1 = open ('tests/t_dump_image.exp').read()
assert (exp0 == exp1)
def test_t21():
out = run_test ('t21')
exp = open ('gc.c').read()
# make sure the first part matches the contents of gc.c
assert (out[:len(exp)] == exp)
# the chars are too hard to tests for, and unlikely to be wrong.
# should really make a separate char test.
def test_t22():
out = run_test ('t22')
lines = out.split ('\n')
assert (lines[0].count ('<closure pc=') == 5)
r6 = [ str(x) for x in range (6) ]
assert (lines[1:] == (r6 + r6 + ['#u', '']))
if not os.path.isfile ('parse/lexstep.scm'):
print 'generating parse/lexstep.scm...'
os.system ('(cd parse; python lexer.py)')
def test_t_lex():
# needs to generate parse/lexstep.scm for this test to run.
out = run_test ('t_lex')
assert (out.split('\n')[-4:] == ['{u0 whitespace " "}', '{u0 string1 "\\"\\""}', '"done"', ''])
def test_t_vm():
out = run_test ('t_vm', 'vm/tests/t11.byc')
assert (out.split('\n')[-3:] == ['{u0 7}', '#u', ''])
PJ = os.path.join
if len(sys.argv) > 1:
# run only these specific tests
files = [x + '.scm' for x in sys.argv[1:]]
else:
files = os.listdir ('tests')
# When looking for things that are broken, I prefer to work with the smallest
# test that reproduces a problem. Thus, run the tests in source-size order...
files = [ (os.stat(PJ ('tests', x)).st_size, x) for x in files ]
files.sort()
# tests that need special handling
special = [x[5:] for x in dir() if x.startswith ('test_')]
failed = []
succeeded = 0
for size, file in files:
if file.endswith ('.scm'):
base, ext = os.path.splitext (file)
path = os.path.join ('tests', file)
print 'compiling', path
fail = file.startswith ('f')
# XXX need to make 'special' tests such that they can compile with
# custom flags (e.g. t_stl).
code = system ('self/compile %s' % (path,))
print 'code=', code
if code == 0:
if fail:
failed.append ((base, 'compile did not fail like expected'))
else:
if base not in special:
out = run_test (base)
exp_path = PJ ('tests', base + '.exp')
if os.path.isfile (exp_path):
exp = open (exp_path).read()
if out != exp:
failed.append ((base, 'did not match expected output'))
#raise ValueError ("oops - output didn't match on test '%s'" % (base,))
else:
succeeded += 1
else:
succeeded += 1
else:
# tests that require special handling for whatever reason.
try:
eval ('test_%s()' % (base,))
except:
failed.append ((base, 'assertion failed'))
else:
succeeded += 1
else:
if not fail:
failed.append ((base, 'did not compile'))
print '%d tests passed' % succeeded
if len(failed):
print '%d tests failed!!' % (len(failed))
for base, reason in failed:
print base, reason
|
'''
Created on Aug 25, 2016
@author: David Zwicker <dzwicker@seas.harvard.edu>
'''
from __future__ import division
import copy
import unittest
import tempfile
import numpy as np
from .. import cache
from ...testing import deep_getsizeof
class TestCache(unittest.TestCase):
""" test collection for caching methods """
_multiprocess_can_split_ = True # let nose know that tests can run parallel
def get_serialization_methods(self, with_none=True):
""" returns possible methods for serialization that are supported """
methods = ['json', 'pickle']
if with_none:
methods.append(None)
# check whether yaml is actually available
try:
import yaml # @UnusedImport
except ImportError:
pass
else:
methods.append('yaml')
return methods
def test_hashes(self):
""" test whether the hash key makes sense """
class Dummy(object):
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
for f in (cache.hash_mutable, cache.hash_readable):
# test simple objects
for obj in (1, 1.2, 'a', (1, 2), [1, 2], {1, 2}, {1: 2},
{(1, 2): [2, 3], (1, 3): [1, 2]},
Dummy(1), np.arange(5)):
o2 = copy.deepcopy(obj)
self.assertEqual(f(obj), f(o2),
msg='Hash different for `%s`' % str(obj))
# make sure different objects get different hash
self.assertNotEqual(1, '1')
self.assertNotEqual('a', 'b')
self.assertNotEqual({1, 2}, (1, 2))
def test_serializer_nonsense(self):
""" test whether errors are thrown for wrong input """
with self.assertRaises(ValueError):
cache.make_serializer('non-sense')
with self.assertRaises(ValueError):
cache.make_unserializer('non-sense')
def test_serializer(self):
""" tests whether the make_serializer returns a canonical hash """
methods = self.get_serialization_methods()
for method in methods:
encode = cache.make_serializer(method)
self.assertEqual(encode(1), encode(1))
self.assertNotEqual(encode([1, 2, 3]), encode([2, 3, 1]))
if method != 'json':
# json cannot encode sets
self.assertEqual(encode({1, 2, 3}), encode({2, 3, 1}))
# test special serializer
encode = cache.make_serializer('hash_mutable')
self.assertEqual(encode({'a': 1, 'b': 2}), encode({'b': 2, 'a': 1}))
def test_unserializer(self):
""" tests whether the make_serializer and make_unserializer return the
original objects """
methods = self.get_serialization_methods()
data_list = [None, 1, [1, 2], {'b': 1, 'a': 2}]
for method in methods:
encode = cache.make_serializer(method)
decode = cache.make_unserializer(method)
for data in data_list:
self.assertEqual(data, decode(encode(data)))
def test_DictFiniteCapacity(self):
""" tests the DictFiniteCapacity class """
data = cache.DictFiniteCapacity(capacity=2)
data['a'] = 1
self.assertTrue(len(data), 1)
data['b'] = 2
self.assertTrue(len(data), 2)
data['c'] = 3
self.assertTrue(len(data), 2)
with self.assertRaises(KeyError):
data['a']
data.update({'d': 4})
self.assertTrue(len(data), 2)
with self.assertRaises(KeyError):
data['b']
def test_PersistentDict(self):
""" tests the PersistentDict class """
db = tempfile.NamedTemporaryFile()
data = cache.PersistentDict(db.name)
with self.assertRaises(TypeError):
data[1]
with self.assertRaises(TypeError):
_ = 1 in data
with self.assertRaises(TypeError):
data['a'] = 1
with self.assertRaises(TypeError):
del data[1]
data[b'a'] = b'1'
self.assertEqual(len(data), 1)
data[b'b'] = b'2'
self.assertEqual(len(data), 2)
del data[b'a']
self.assertEqual(len(data), 1)
with self.assertRaises(KeyError):
data[b'a']
data.update({b'd': b'4'})
self.assertTrue(len(data), 2)
# reinitialize the dictionary
data = cache.PersistentDict(db.name)
self.assertEqual(len(data), 2)
self.assertEqual(data[b'b'], b'2')
self.assertTrue(b'd' in data)
self.assertEqual({b'b', b'd'}, set(data.keys()))
self.assertEqual({b'2', b'4'}, set(data.values()))
data.clear()
self.assertEqual(len(data), 0)
# reinitialize the dictionary
data = cache.PersistentDict(db.name)
self.assertEqual(len(data), 0)
def _test_SerializedDict(self, storage, reinitialize=None,
key_serialization='pickle',
value_serialization='pickle'):
""" tests the SerializedDict class with a particular parameter set """
msg = 'Serializers: key: %s, value: %s' % (key_serialization,
value_serialization)
data = cache.SerializedDict(key_serialization, value_serialization,
storage_dict=storage)
if value_serialization == 'none':
with self.assertRaises(TypeError):
data['a'] = 1
v1, v2, v3 = '1', '2', '3'
else:
v1, v2, v3 = 1, 2, '3'
data['a'] = v1
self.assertEqual(len(data), v1, msg=msg)
data['b'] = v2
self.assertEqual(data['b'], v2, msg=msg)
self.assertEqual(len(data), v2, msg=msg)
del data['a']
self.assertEqual(len(data), v1, msg=msg)
with self.assertRaises(KeyError):
data['a']
data.update({'d': v3})
self.assertEqual(len(data), v2, msg=msg)
# reinitialize the storage dictionary
if reinitialize is not None:
data._data = reinitialize()
self.assertEqual(len(data), v2, msg=msg)
self.assertEqual(data['b'], v2, msg=msg)
self.assertTrue('d' in data, msg=msg)
self.assertEqual({'b', 'd'}, set(data.keys()), msg=msg)
self.assertEqual({v2, v3}, set(data.values()), msg=msg)
data.clear()
self.assertEqual(len(data), 0, msg=msg)
# reinitialize the dictionary
if reinitialize is not None:
data._data = reinitialize()
self.assertEqual(len(data), 0, msg=msg)
def test_SerializedDict(self):
""" tests the SerializedDict class """
serializers = self.get_serialization_methods(with_none=False)
# test different storage types
for storage_type in ('none', 'dict', 'persistent_dict'):
if storage_type == 'none':
storage = None
reinitialize = None
elif storage_type == 'dict':
storage = {}
def reinitialize():
return storage
elif storage_type == 'persistent_dict':
db = tempfile.NamedTemporaryFile()
storage = cache.PersistentDict(db.name)
def reinitialize():
return cache.PersistentDict(db.name)
else:
raise ValueError('Unknown storage type `%s`' % storage_type)
# test different serialization methods
for key_serializer in serializers:
for value_serializer in serializers:
self._test_SerializedDict(
storage=storage, reinitialize=reinitialize,
key_serialization=key_serializer,
value_serialization=value_serializer
)
if storage is not None:
storage.clear()
def _test_property_cache(self, cache_storage):
""" test cached_property decorator """
# create test class
class CacheTest(object):
""" class for testing caching """
def __init__(self):
self.counter = 0
def get_finite_dict(self, n):
return cache.DictFiniteCapacity(capacity=1)
@property
def uncached(self):
self.counter += 1
return 1
def cached(self):
self.counter += 1
return 2
# apply the cache with the given storage
if cache_storage is None:
decorator = cache.cached_property()
else:
decorator = cache.cached_property(cache_storage)
CacheTest.cached = decorator(CacheTest.cached)
# try to objects to make sure caching is done on the instance level
for obj in [CacheTest(), CacheTest()]:
# test uncached method
self.assertEqual(obj.uncached, 1)
self.assertEqual(obj.counter, 1)
self.assertEqual(obj.uncached, 1)
self.assertEqual(obj.counter, 2)
obj.counter = 0
# test cached methods
self.assertEqual(obj.cached, 2)
self.assertEqual(obj.counter, 1)
self.assertEqual(obj.cached, 2)
self.assertEqual(obj.counter, 1)
def test_property_cache(self):
""" test cached_property decorator """
for cache_storage in [None, "get_finite_dict"]:
self._test_property_cache(cache_storage)
def _test_method_cache(self, serializer, cache_factory=None):
""" test one particular parameter set of the cached_method decorator """
# create test class
class CacheTest(object):
""" class for testing caching """
def __init__(self):
self.counter = 0
def get_finite_dict(self, name):
return cache.DictFiniteCapacity(capacity=1)
def uncached(self, arg):
self.counter += 1
return arg
@cache.cached_method(hash_function=serializer,
factory=cache_factory)
def cached(self, arg):
self.counter += 1
return arg
@cache.cached_method(hash_function=serializer,
factory=cache_factory)
def cached_kwarg(self, a=0, b=0):
self.counter += 1
return a + b
# test what happens when the decorator is applied wrongly
with self.assertRaises(ValueError):
cache.cached_method(CacheTest.cached)
# try to objects to make sure caching is done on the instance level and
# that clearing the cache works
obj1, obj2 = CacheTest(), CacheTest()
for k, obj in enumerate([obj1, obj2, obj1]):
# clear the cache before the first and the last pass
if k == 0 or k == 2:
CacheTest.cached.clear_cache_of_obj(obj)
CacheTest.cached_kwarg.clear_cache_of_obj(obj)
obj.counter = 0
# test uncached method
self.assertEqual(obj.uncached(1), 1)
self.assertEqual(obj.counter, 1)
self.assertEqual(obj.uncached(1), 1)
self.assertEqual(obj.counter, 2)
obj.counter = 0
# test cached methods
for method in (obj.cached, obj.cached_kwarg):
# run twice to test clearing the cache
for _ in (None, None):
# test simple caching behavior
self.assertEqual(method(1), 1)
self.assertEqual(obj.counter, 1)
self.assertEqual(method(1), 1)
self.assertEqual(obj.counter, 1)
self.assertEqual(method(2), 2)
self.assertEqual(obj.counter, 2)
self.assertEqual(method(2), 2)
self.assertEqual(obj.counter, 2)
# test special properties of cache_factories
if cache_factory is None:
self.assertEqual(method(1), 1)
self.assertEqual(obj.counter, 2)
elif cache_factory == 'get_finite_dict':
self.assertEqual(method(1), 1)
self.assertEqual(obj.counter, 3)
else:
raise ValueError('Unknown cache_factory `%s`'
% cache_factory)
obj.counter = 0
# clear cache to test the second run
method.clear_cache_of_obj(obj)
# test complex cached method
self.assertEqual(obj.cached_kwarg(1, b=2), 3)
self.assertEqual(obj.counter, 1)
self.assertEqual(obj.cached_kwarg(1, b=2), 3)
self.assertEqual(obj.counter, 1)
self.assertEqual(obj.cached_kwarg(2, b=2), 4)
self.assertEqual(obj.counter, 2)
self.assertEqual(obj.cached_kwarg(2, b=2), 4)
self.assertEqual(obj.counter, 2)
self.assertEqual(obj.cached_kwarg(1, b=3), 4)
self.assertEqual(obj.counter, 3)
self.assertEqual(obj.cached_kwarg(1, b=3), 4)
self.assertEqual(obj.counter, 3)
def _test_method_cache_extra_args(self, serializer, cache_factory=None):
""" test extra arguments in the cached_method decorator """
# create test class
class CacheTest(object):
""" class for testing caching """
def __init__(self, value=0):
self.counter = 0
self.value = 0
def get_finite_dict(self, name):
return cache.DictFiniteCapacity(capacity=1)
@cache.cached_method(hash_function=serializer, extra_args=['value'],
factory=cache_factory)
def cached(self, arg):
self.counter += 1
return self.value + arg
obj = CacheTest(0)
# test simple caching behavior
self.assertEqual(obj.cached(1), 1)
self.assertEqual(obj.counter, 1)
self.assertEqual(obj.cached(1), 1)
self.assertEqual(obj.counter, 1)
self.assertEqual(obj.cached(2), 2)
self.assertEqual(obj.counter, 2)
self.assertEqual(obj.cached(2), 2)
self.assertEqual(obj.counter, 2)
obj.value = 10
# test simple caching behavior
self.assertEqual(obj.cached(1), 11)
self.assertEqual(obj.counter, 3)
self.assertEqual(obj.cached(1), 11)
self.assertEqual(obj.counter, 3)
self.assertEqual(obj.cached(2), 12)
self.assertEqual(obj.counter, 4)
self.assertEqual(obj.cached(2), 12)
self.assertEqual(obj.counter, 4)
def _test_method_cache_ignore(self, serializer, cache_factory=None):
""" test ignored parameters of the cached_method decorator """
# test two different ways of ignoring arguments
for ignore_args in ['display', ['display']]:
# create test class
class CacheTest(object):
""" class for testing caching """
def __init__(self):
self.counter = 0
def get_finite_dict(self, name):
return cache.DictFiniteCapacity(capacity=1)
@cache.cached_method(serializer=serializer,
ignore_args=ignore_args,
factory=cache_factory)
def cached(self, arg, display=True):
return arg
obj = CacheTest()
# test simple caching behavior
self.assertEqual(obj.cached(1, True), 1)
self.assertEqual(obj.counter, 1)
self.assertEqual(obj.cached(1, True), 1)
self.assertEqual(obj.counter, 1)
self.assertEqual(obj.cached(1, False), 1)
self.assertEqual(obj.counter, 1)
self.assertEqual(obj.cached(2, True), 2)
self.assertEqual(obj.counter, 2)
self.assertEqual(obj.cached(2, False), 2)
self.assertEqual(obj.counter, 2)
self.assertEqual(obj.cached(2, False), 2)
self.assertEqual(obj.counter, 2)
def test_method_cache(self):
""" test the cached_method decorator with several parameters """
for serializer in self.get_serialization_methods(with_none=False):
for cache_factory in [None, 'get_finite_dict']:
self._test_method_cache(serializer, cache_factory)
self._test_method_cache_extra_args(serializer, cache_factory)
self._test_method_cache_extra_args(serializer, cache_factory)
def test_cache_clearing(self):
""" make sure that memory is freed when cache is cleared """
class Test(object):
""" simple test object with a cache """
@cache.cached_method()
def calc(self, n):
return np.empty(n)
def clear_cache(self):
self._cache_methods = {}
def clear_specific(self):
self.calc.clear_cache_of_obj(self)
t = Test()
mem0 = deep_getsizeof(t)
for clear_cache in (t.clear_cache, t.clear_specific):
t.calc(100)
mem1 = deep_getsizeof(t)
self.assertGreater(mem1, mem0)
t.calc(200)
mem2 = deep_getsizeof(t)
self.assertGreater(mem2, mem1)
t.calc(100)
mem3 = deep_getsizeof(t)
self.assertEqual(mem3, mem2)
clear_cache()
mem4 = deep_getsizeof(t)
self.assertGreaterEqual(mem4, mem0)
self.assertGreaterEqual(mem1, mem4)
def test_clear_cache_decorator(self):
""" make sure that memory is freed when cache is cleared """
@cache.add_clear_cache_method
class Test(object):
""" simple test object with a cache """
@cache.cached_method()
def calc(self, n):
return np.empty(n)
t = Test()
mem0 = deep_getsizeof(t)
t.calc(100)
mem1 = deep_getsizeof(t)
self.assertGreater(mem1, mem0)
t.calc(200)
mem2 = deep_getsizeof(t)
self.assertGreater(mem2, mem1)
t.calc(100)
mem3 = deep_getsizeof(t)
self.assertEqual(mem3, mem2)
t.clear_cache()
mem4 = deep_getsizeof(t)
self.assertGreaterEqual(mem4, mem0)
self.assertGreaterEqual(mem1, mem4)
def test_CachedArray(self):
""" test the CachedArray class """
for value in (None, 0, 1):
array_cache = cache.CachedArray(value=value)
a = array_cache((2, 2))
b = array_cache((2, 2))
self.assertIs(a, b)
b = array_cache((2, 3))
b = array_cache((2, 2))
self.assertIsNot(a, b)
if value is not None:
np.testing.assert_equal(a, value)
np.testing.assert_equal(b, value)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__autor__ = u'Juan Beresiarte'
from math import sqrt
try:
input = raw_input
except NameError:
pass
def Tpitagoras(Hipotenusa=None, Cateto1=None, Cateto2=None):
if Hipotenusa == None:
return sqrt(Cateto1**2 + Cateto2**2)
elif Cateto1 == None:
return sqrt(Hipotenusa**2 - Cateto2**2)
else:
return sqrt(Hipotenusa**2 - Cateto1**2)
# _______________________________________________,
# Este modulo depende del modulo math |
# En caso de no tener la libreria ingresar: |
# Python - pip install math |
# Python3 - pip3 install math |
# _______________________________________________|
|
import exercises as e
from itertools import islice
import unittest
class GeneratorTests(unittest.TestCase):
# Uloha 1:
def test_square_generator(self):
self.assertNotIsInstance(e.square_generator(range(7)), list)
self.assertEqual(list(e.square_generator(range(7))), [0,1,4,9,16,25,36])
# Uloha 2:
def test_square_map(self):
self.assertNotIsInstance(e.square_map(range(7)), list)
self.assertEqual(list(e.square_map(range(7))), [0,1,4,9,16,25,36])
# Uloha 3:
def test_square_comprehension(self):
self.assertNotIsInstance(e.square_comprehension(range(7)), list)
self.assertEqual(list(e.square_comprehension(range(7))), [0,1,4,9,16,25,36])
# Uloha 4:
def test_cycle(self):
self.assertEqual(list(islice(e.cycle([1,2,3]), 7)), [1,2,3,1,2,3,1])
# Uloha 5:
def test_factorial(self):
self.assertEqual(list(islice(e.factorial(),6)), [1, 2, 6, 24, 120, 720])
# Uloha 6:
def test_digits(self):
self.assertEqual(list(e.digits(123450, 10)), [0, 5, 4, 3, 2, 1])
self.assertEqual(list(e.digits(0, 10)), [0])
self.assertEqual(list(e.digits(8, 2)), [0, 0, 0, 1])
# Uloha 7:
def test_factorial_digit_sum(self):
self.assertEqual(e.factorial_digit_sum(10), 27)
self.assertEqual(e.factorial_digit_sum(101), 639)
# Uloha 8:
def test_my_range(self):
self.assertEqual(list(e.my_range(0,10)), list(range(0, 10)))
self.assertEqual(list(e.my_range(0, 10, 2)), list(range(0, 10, 2)))
self.assertEqual(list(e.my_range(0, 10, 3)), list(range(0, 10, 3)))
# Uloha 9:
def test_my_range_negative(self):
self.assertEqual(list(e.my_range_negative(0,10)), list(range(0, 10)))
self.assertEqual(list(e.my_range_negative(0, 10, 2)), list(range(0, 10, 2)))
self.assertEqual(list(e.my_range_negative(0, 10, 3)), list(range(0, 10, 3)))
self.assertEqual(list(e.my_range_negative(10, 0, -1)), list(range(10, 0, -1)))
self.assertEqual(list(e.my_range_negative(10, 0, -3)), list(range(10, 0, -3)))
# Uloha 10:
def test_items(self):
dictionary = {'a':1, 'b':2, 'c':3, 'd':4}
self.assertEqual(list(e.items(dictionary)), list(dictionary.items()))
# Uloha 11:
def test_pseudorandom(self):
self.assertEqual(list(islice(e.pseudorandom(9, 2, 0, 1), 6)), [2, 4, 8, 7, 5, 1])
# Uloha 12:
def test_sample(self):
self.assertEqual(list(islice(e.sample(['a',1,2,3,4,5,6,7,8,9]), 20)), ['a', 5, 4, 1, 4, 9, 2, 5, 6, 7, 8, 1, 6, 1, 8, 1, 6, 9, 2, 9])
# Uloha 13:
def test_sample_no_rep(self):
items = ['a',1,2,3,4,5,6,7,8,9]
self.assertEqual(set(islice(e.sample_no_rep(items), 9)), set(['a', 4, 6, 7, 8, 9, 1, 2, 3]))
self.assertEqual(set(items), set(['a',1,2,3,4,5,6,7,8,9]))
self.assertEqual(set(islice(e.sample_no_rep(items), 20)), set(['a', 4, 6, 7, 8, 9, 1, 2, 3, 5]))
# Uloha 14:
def test_primes(self):
self.assertEqual(list(islice(e.primes(), 5)), [2,3,5,7,11])
# Uloha 15:
def test_primes_memory(self):
self.assertEqual(list(islice(e.primes_memory(), 5)), [2,3,5,7,11])
# Uloha 16:
def test_nth_prime(self):
self.assertEqual(e.nth_prime(1), 2)
self.assertEqual(e.nth_prime(100), 541)
# Uloha 17:
def test_pairs(self):
self.assertEqual(list(e.pairs([1, 2, 3, 4], ['a', 'b', 'c', 'd'])), [(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')])
# Uloha 18:
def test_groups(self):
self.assertEqual(list(e.groups([1, 2, 3, 4], ['a', 'b', 'c', 'd'], 'xyz')), [(1, 'a', 'x'), (2, 'b', 'y'), (3, 'c', 'z')])
# Uloha 19:
def test_trange(self):
self.assertEqual(list(e.trange((10, 10, 10), (13, 50, 15), (0, 15, 12))),
[(10, 10, 10),
(10, 25, 22),
(10, 40, 34),
(10, 55, 46),
(11, 10, 58),
(11, 26, 10),
(11, 41, 22),
(11, 56, 34),
(12, 11, 46),
(12, 26, 58),
(12, 42, 10),
(12, 57, 22),
(13, 12, 34),
(13, 27, 46),
(13, 42, 58)])
# Uloha 20:
def test_k_permutations(self):
self.assertEqual(list(e.k_permutations(['a', 'b', 'c', 'd'], 3)),
[['a', 'b', 'c'],
['a', 'b', 'd'],
['a', 'c', 'b'],
['a', 'c', 'd'],
['a', 'd', 'b'],
['a', 'd', 'c'],
['b', 'a', 'c'],
['b', 'a', 'd'],
['b', 'c', 'a'],
['b', 'c', 'd'],
['b', 'd', 'a'],
['b', 'd', 'c'],
['c', 'a', 'b'],
['c', 'a', 'd'],
['c', 'b', 'a'],
['c', 'b', 'd'],
['c', 'd', 'a'],
['c', 'd', 'b'],
['d', 'a', 'b'],
['d', 'a', 'c'],
['d', 'b', 'a'],
['d', 'b', 'c'],
['d', 'c', 'a'],
['d', 'c', 'b']])
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import h5py
import collections
def runSubsetInputs(self):
'''
This step reads the subset information - freq, pol and spatial.
'''
state = self.state
subset_dict = self.get_value(['runconfig', 'groups', 'processing',
'input_subset', 'list_of_frequencies'])
if isinstance(subset_dict, str):
state.subset_dict = collections.OrderedDict()
state.subset_dict[subset_dict] = None
elif not subset_dict:
state.subset_dict = collections.OrderedDict()
state.subset_dict['A'] = None
state.subset_dict['B'] = None
else:
state.subset_dict = collections.OrderedDict(subset_dict)
if 'outputList' not in state.__dir__():
state.outputList = collections.OrderedDict()
self._radar_grid_list = collections.OrderedDict()
for frequency in state.subset_dict.keys():
if not state.subset_dict[frequency]:
current_key = (f'//science/LSAR/SLC/swaths/'
f'frequency{frequency}/listOfPolarizations')
hdf5_obj = h5py.File(state.input_hdf5, 'r')
state.subset_dict[frequency] = [s.decode()
for s in hdf5_obj[current_key]]
hdf5_obj.close()
# Get radar grid
radar_grid = self.slc_obj.getRadarGrid(frequency)
self._radar_grid_list[frequency] = radar_grid
# Prepare outputList dict
if frequency not in state.outputList.keys():
state.outputList[frequency] = []
# end of file
|
from typing import Optional
from posthog.constants import (
BIN_COUNT,
DISPLAY,
FUNNEL_FROM_STEP,
FUNNEL_ORDER_TYPE,
FUNNEL_STEP,
FUNNEL_TO_STEP,
FUNNEL_VIZ_TYPE,
FUNNEL_WINDOW_DAYS,
INSIGHT,
INSIGHT_FUNNELS,
TRENDS_LINEAR,
FunnelOrderType,
FunnelVizType,
)
from posthog.models.filters.mixins.base import BaseParamMixin
from posthog.models.filters.mixins.utils import cached_property, include_dict
class FunnelFromToStepsMixin(BaseParamMixin):
@cached_property
def funnel_from_step(self) -> Optional[int]:
if self._data.get(FUNNEL_FROM_STEP):
return int(self._data[FUNNEL_FROM_STEP])
return None
@cached_property
def funnel_to_step(self) -> Optional[int]:
if self._data.get(FUNNEL_TO_STEP):
return int(self._data[FUNNEL_TO_STEP])
return None
@include_dict
def funnel_from_to_steps_to_dict(self):
dict_part = {}
if self.funnel_from_step:
dict_part[FUNNEL_FROM_STEP] = self.funnel_from_step
if self.funnel_to_step:
dict_part[FUNNEL_TO_STEP] = self.funnel_to_step
return dict_part
class FunnelWindowDaysMixin(BaseParamMixin):
@cached_property
def funnel_window_days(self) -> Optional[int]:
_days = int(self._data.get(FUNNEL_WINDOW_DAYS, "0"))
if _days == 0:
return None
return _days
@include_dict
def funnel_window_days_to_dict(self):
return {FUNNEL_WINDOW_DAYS: self.funnel_window_days} if self.funnel_window_days else {}
@staticmethod
def milliseconds_from_days(days):
milliseconds, seconds, minutes, hours = [1000, 60, 60, 24]
return milliseconds * seconds * minutes * hours * days
@staticmethod
def microseconds_from_days(days):
microseconds = 1000
return microseconds * FunnelWindowDaysMixin.milliseconds_from_days(days)
class FunnelPersonsStepMixin(BaseParamMixin):
# first step is 0
# -1 means dropoff into step 1
@cached_property
def funnel_step(self) -> Optional[int]:
_step = int(self._data.get(FUNNEL_STEP, "0"))
if _step == 0:
return None
return _step
@include_dict
def funnel_step_to_dict(self):
return {FUNNEL_STEP: self.funnel_step} if self.funnel_step else {}
class FunnelTypeMixin(BaseParamMixin):
@cached_property
def funnel_order_type(self) -> Optional[FunnelOrderType]:
return self._data.get(FUNNEL_ORDER_TYPE)
@cached_property
def funnel_viz_type(self) -> Optional[FunnelVizType]:
funnel_viz_type = self._data.get(FUNNEL_VIZ_TYPE)
if (
funnel_viz_type is None
and self._data.get(INSIGHT) == INSIGHT_FUNNELS
and self._data.get(DISPLAY) == TRENDS_LINEAR
):
# Backwards compatibility
# Before Filter.funnel_viz_type funnel trends were indicated by Filter.display being TRENDS_LINEAR
return FunnelVizType.TRENDS
return funnel_viz_type
@include_dict
def funnel_type_to_dict(self):
result = {}
if self.funnel_order_type:
result[FUNNEL_ORDER_TYPE] = self.funnel_order_type
if self.funnel_viz_type:
result[FUNNEL_VIZ_TYPE] = self.funnel_viz_type
return result
class HistogramMixin(BaseParamMixin):
@cached_property
def bin_count(self) -> Optional[int]:
bin_count = self._data.get(BIN_COUNT)
return int(bin_count) if bin_count else None
@include_dict
def histogram_to_dict(self):
return {"bin_count": self.bin_count} if self.bin_count else {}
|
#!/usr/bin/env python3
import argparse
import logging
import os
import sys
from wwpdb.utils.dp.electron_density.common_functions import convert_mdb_to_binary_cif, run_command_and_check_output_file
logger = logging.getLogger()
class EmVolumes:
def __init__(
self,
em_map,
node_path,
volume_server_pack_path,
volume_server_query_path,
binary_map_out,
working_dir,
):
self.em_map = em_map
self.em_map_name = os.path.basename(em_map)
self.mdb_map = "em_map.mdb"
self.node_path = node_path
self.volume_server_pack_path = volume_server_pack_path
self.volume_server_query_path = volume_server_query_path
self.mdb_map_path = None
self.bcif_map_path = binary_map_out
self.workdir = working_dir if working_dir else os.getcwd()
def run_conversion(self):
bcif_dir_out = os.path.dirname(self.bcif_map_path)
if bcif_dir_out:
if not os.path.exists(bcif_dir_out):
os.makedirs(bcif_dir_out)
logging.debug("temp working folder: %s", self.workdir)
self.mdb_map_path = os.path.join(self.workdir, self.mdb_map)
worked = self.make_volume_server_map()
if worked:
worked = self.convert_map_to_binary_cif()
return worked
def make_volume_server_map(self):
if os.path.exists(self.em_map):
command = "%s %s em %s %s" % (self.node_path, self.volume_server_pack_path, self.em_map, self.mdb_map_path)
logging.debug(command)
return run_command_and_check_output_file(command=command, process_name="make Volume server map", workdir=self.workdir, output_file=self.mdb_map_path)
else:
logging.error("input map file missing: %s", self.em_map)
return False
def convert_map_to_binary_cif(self):
return convert_mdb_to_binary_cif(
node_path=self.node_path,
volume_server_query_path=self.volume_server_query_path,
map_id="em_volume",
source_id="em",
output_file=self.bcif_map_path,
working_dir=self.workdir,
mdb_map_path=self.mdb_map_path,
detail=1,
)
def main(): # pragma: no cover
parser = argparse.ArgumentParser()
parser.add_argument("--em_map", help="EM map", type=str, required=True)
parser.add_argument("--working_dir", help="working dir", type=str, required=True)
parser.add_argument("--binary_map_out", help="Output filename of binary map", type=str, required=True)
parser.add_argument("--node_path", help="path to node", type=str, required=True)
parser.add_argument("--volume_server_pack_path", help="path to volume-server-pack", type=str, required=True)
parser.add_argument("--volume_server_query_path", help="path to volume-server-query", type=str, required=True)
parser.add_argument("--keep_working_directory", help="keep working directory", action="store_true")
parser.add_argument("--debug", help="debugging", action="store_const", dest="loglevel", const=logging.DEBUG, default=logging.INFO)
args = parser.parse_args()
logger.setLevel(args.loglevel)
if not sys.argv[1:]:
parser.print_help()
exit()
em = EmVolumes(
em_map=args.em_map,
node_path=args.node_path,
volume_server_pack_path=args.volume_server_pack_path,
volume_server_query_path=args.volume_server_query_path,
binary_map_out=args.binary_map_out,
working_dir=args.working_dir,
)
worked = em.run_conversion()
logging.info("EM map conversion worked: {}".format(worked)) # pylint: disable=logging-format-interpolation
if not worked:
sys.exit(1)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import asyncio
from datetime import datetime, timedelta
import discord
import toml
from discord.ext import commands
class Rollback(Exception):
pass
class DBUtils(commands.Cog):
def __init__(self, bot):
super().__init__()
self.bot = bot
# ===================== #
# ======= CACHE ======= #
# ===================== #
async def load_scan_channels(self):
async with self.bot.db.acquire() as conn:
record = await conn.fetch("SELECT channel_id FROM scan_channels WHERE active")
return [x['channel_id'] for x in record]
async def load_reviewer_channels(self):
async with self.bot.db.acquire() as conn:
record = await conn.fetch("SELECT user_id,channel_id FROM reviewers WHERE active")
return [dict(x) for x in record]
# ====================== #
# ======= CHECKS ======= #
# ====================== #
async def has_empty_queue(self, user_id: int):
async with self.bot.db.acquire() as conn:
record = await conn.fetchrow(
"""
SELECT COUNT(*) FROM review_log WHERE user_id = $1
""",
user_id
)
return record['count'] != 0
# ======================== #
# ======= REVIEWER ======= #
# ======================== #
async def add_reviewer(self, user_id: int, channel_id: int):
async with self.bot.db.acquire() as conn:
async with conn.transaction():
await conn.fetch(
"""
INSERT INTO reviewers (user_id, channel_id)
VALUES ($1, $2)
""",
user_id,
channel_id
)
async def remove_reviewer(self, user_id: int):
async with self.bot.db.acquire() as conn:
async with conn.transaction():
await conn.fetch(
"""
UPDATE reviewers
SET active = FALSE
WHERE user_id = $1
""",
user_id
)
# ===================== #
# ======= SCORE ======= #
# ===================== #
async def get_score(self, score_id: int):
async with self.bot.db.acquire() as conn:
record = await conn.fetchrow(
"""
SELECT insult, severe_toxic, identity_hate, threat, nsfw
FROM scores
WHERE id = $1
""",
score_id
)
return record
async def add_score(self, scanned_content: str, scores: dict):
async with self.bot.db.acquire() as conn:
async with conn.transaction():
record = await conn.fetchrow(
"""
INSERT INTO scores (scanned_content, insult, severe_toxic, identity_hate, threat, nsfw)
VALUES ($1, $2::REAL, $3::REAL, $4::REAL, $5::REAL, $6::REAL)
RETURNING id
""",
scanned_content,
scores['insult'],
scores['severe_toxic'],
scores['identity_hate'],
scores['threat'],
scores['nsfw']
)
return record['id']
# ====================== #
# === REVIEW MESSAGE === #
# ====================== #
async def add_review_message(self, scanned_content: str, scores: dict):
score_id = await self.add_score(scanned_content, scores)
async with self.bot.db.acquire() as conn:
async with conn.transaction():
record = await conn.fetchrow(
"""
INSERT INTO review_messages (score_id, clean_content)
VALUES ($1, $2)
RETURNING *
""",
score_id,
scanned_content
)
return record['id']
async def get_review_message(self, message_id, user_id):
async with self.bot.db.acquire() as conn:
record = await conn.fetchrow(
"""
SELECT review_id, clean_content
FROM review_log INNER JOIN review_messages ON id = review_id
WHERE message_id = $1 AND user_id = $2 AND review_log.active
""",
message_id, user_id
)
return record
async def edit_review_message(self, review_id, clean_content: str):
async with self.bot.db.acquire() as conn:
async with conn.transaction():
await conn.fetch(
"""
UPDATE review_messages
SET clean_content = $2, in_sanitize = FALSE
WHERE id = $1
""",
review_id,
clean_content
)
await conn.fetch(
"""
DELETE FROM review_log
WHERE review_id = $1
""",
review_id
)
async def delete_active_review_message(self, user_id):
async with self.bot.db.acquire() as conn:
await conn.fetch(
"""
DELETE FROM review_log
WHERE user_id = $1 AND active
""",
user_id
)
# ====================== #
# ==== REVIEW QUEUE ==== #
# ====================== #
async def find_empty_queues(self):
async with self.bot.db.acquire() as conn:
record = await conn.fetch(
"""
SELECT user_id, channel_id
FROM reviewers r
WHERE NOT EXISTS(
SELECT *
FROM review_log
WHERE user_id = r.user_id AND active
);
"""
)
return record
async def pop_review_queue(self, user_id: int):
async with self.bot.db.acquire() as conn:
record = await conn.fetchrow(
"""
SELECT *
FROM review_messages r
WHERE in_sanitize = FALSE
AND active
AND NOT EXISTS(
SELECT *
FROM review_log
WHERE user_id = $1 AND review_id = r.id AND active = FALSE
)
AND NOT r.id IN (
SELECT review_id
FROM review_log
GROUP BY review_id HAVING COUNT(*) >= $2
)
ORDER BY r.id ASC
""",
user_id,
self.bot.config.get('min_votes')
)
return record
async def get_active_queue_messages(self, review_id: int):
async with self.bot.db.acquire() as conn:
record = await conn.fetch(
"""
SELECT message_id,review_log.user_id,channel_id
FROM review_log INNER JOIN reviewers ON review_log.user_id = reviewers.user_id
WHERE review_id = $1 AND review_log.active = TRUE
""",
review_id
)
return record
# ====================== #
# ===== REVIEW LOG ===== #
# ====================== #
async def add_review_log(self, review_id: int, user_id: int, message_id: int):
async with self.bot.db.acquire() as conn:
async with conn.transaction():
await conn.fetch(
"""
INSERT INTO review_log (review_id, user_id, message_id, trusted_review)
SELECT $1, $2, $3, trusted
FROM reviewers
WHERE user_id = $2
""",
review_id,
user_id,
message_id
)
async def remove_review_log(self, review_id: int):
async with self.bot.db.acquire() as conn:
async with conn.transaction():
await conn.fetch(
"""
DELETE FROM review_log
WHERE review_id = $1
""",
review_id
)
# ============================= #
# ===== REVIEW SUBMISSION ===== #
# ============================= #
async def submit_review(self, review_id: int, user_id: int, scores: dict):
async with self.bot.db.acquire() as conn:
async with conn.transaction():
record = await conn.fetch(
"""
UPDATE review_log
SET insult = $3::SMALLINT, severe_toxic = $4::SMALLINT, identity_hate = $5::SMALLINT, threat = $6::SMALLINT, nsfw = $7::SMALLINT, active = FALSE
WHERE review_id = $1 and user_id = $2
RETURNING *
""",
review_id,
user_id,
scores['insult'],
scores['severe_toxic'],
scores['identity_hate'],
scores['threat'],
scores['nsfw'],
)
async def check_complete_review(self, review_id: int):
async with self.bot.db.acquire() as conn:
record = await conn.fetchval(
"""
SELECT COUNT(*)
FROM review_log
WHERE review_id = $1 AND active = FALSE
""",
review_id
)
if record >= self.bot.config.get('min_votes'):
record = await conn.fetch(
"""
WITH reviews_table AS (
SELECT *
FROM review_log INNER JOIN review_messages ON id = review_id
WHERE review_id = $1
), decision_table AS (
SELECT
review_id,
CASE WHEN AVG(insult) > 2/3::float THEN 1 ELSE 0 END insult,
CASE WHEN AVG(severe_toxic) > 2/3::float THEN 1 ELSE 0 END severe_toxic,
CASE WHEN AVG(identity_hate) > 2/3::float THEN 1 ELSE 0 END identity_hate,
CASE WHEN AVG(threat) > 2/3::float THEN 1 ELSE 0 END threat,
CASE WHEN AVG(nsfw) > 2/3::float THEN 1 ELSE 0 END nsfw
FROM (
SELECT * FROM reviews_table
UNION
SELECT *
FROM reviews_table
WHERE trusted_review
) votes
GROUP BY review_id
)
SELECT clean_content, decision_table.*
FROM decision_table
INNER JOIN review_messages ON review_id = id
WHERE review_id = $1
""",
review_id
)
new_scores = {
'insult': 0,
'severe_toxic': 0,
'identity_hate': 0,
'threat': 0,
'nsfw': 0
}
for r in record:
for k, v in new_scores.items():
new_scores[k] += r[k]
await self.complete_review(review_id)
return {'message': record[0]['clean_content'], 'score': new_scores}
return None
async def complete_review(self, review_id: int):
async with self.bot.db.acquire() as conn:
async with conn.transaction():
await conn.fetchval(
"""
UPDATE review_log
SET active = FALSE
WHERE review_id = $1
""",
review_id
)
await conn.fetchval(
"""
UPDATE review_messages
SET active = FALSE
WHERE id = $1
""",
review_id
)
# ====================== #
# ====== SANITIZE ====== #
# ====================== #
async def set_sanitize(self, review_id: int):
async with self.bot.db.acquire() as conn:
async with conn.transaction():
await conn.fetch(
"""
UPDATE review_messages
SET in_sanitize = TRUE
WHERE id = $1
""",
review_id
)
record = await self.get_active_queue_messages(review_id)
await self.remove_review_log(review_id)
return record
# ===================== #
# ======= STATS ======= #
# ===================== #
async def get_total_reviews(self):
async with self.bot.db.acquire() as conn:
record = await conn.fetchval("SELECT COUNT(*) FROM review_messages WHERE active = FALSE AND in_sanitize = FALSE")
return record
async def get_reviews_count(self, user_id):
async with self.bot.db.acquire() as conn:
record = await conn.fetchval("SELECT COUNT(*) FROM review_log WHERE active = FALSE AND user_id = $1", user_id)
return record
async def get_deviance(self, user_id):
async with self.bot.db.acquire() as conn:
record = await conn.fetchrow(
"""
WITH reviews_table AS (
SELECT *
FROM review_log INNER JOIN review_messages ON id = review_id
WHERE review_log.active = FALSE
AND review_messages.active = FALSE
AND in_sanitize = FALSE
AND EXISTS (
SELECT 1
FROM review_log
WHERE review_messages.id = review_id
AND user_id = $1
AND active = FALSE
)
), result_table AS (
SELECT
review_id,
CASE WHEN AVG(insult) > 2/3::float THEN 1 ELSE 0 END insult,
CASE WHEN AVG(severe_toxic) > 2/3::float THEN 1 ELSE 0 END severe_toxic,
CASE WHEN AVG(identity_hate) > 2/3::float THEN 1 ELSE 0 END identity_hate,
CASE WHEN AVG(threat) > 2/3::float THEN 1 ELSE 0 END threat,
CASE WHEN AVG(nsfw) > 2/3::float THEN 1 ELSE 0 END nsfw
FROM reviews_table
GROUP BY review_id
), user_table AS (
SELECT *
FROM reviews_table
WHERE user_id = $1
)
SELECT
ROUND(AVG(CASE WHEN result_table.insult = user_table.insult THEN 0 ELSE 1 END), 3) AS insult,
ROUND(AVG(CASE WHEN result_table.severe_toxic = user_table.severe_toxic THEN 0 ELSE 1 END), 3) AS severe_toxic,
ROUND(AVG(CASE WHEN result_table.identity_hate = user_table.identity_hate THEN 0 ELSE 1 END), 3) AS identity_hate,
ROUND(AVG(CASE WHEN result_table.threat = user_table.threat THEN 0 ELSE 1 END), 3) AS threat,
ROUND(AVG(CASE WHEN result_table.nsfw = user_table.nsfw THEN 0 ELSE 1 END), 3) AS nsfw
FROM result_table INNER JOIN user_table USING(review_id)
""",
user_id)
return int(sum(record.values()) * 1000), dict(record)
async def get_remaining_reviews(self, user_id: int):
async with self.bot.db.acquire() as conn:
record = await conn.fetchval(
"""
SELECT COUNT(*)
FROM review_messages r
WHERE in_sanitize = FALSE
AND active
AND NOT EXISTS(
SELECT *
FROM review_log
WHERE user_id = $1 AND review_id = r.id AND active = FALSE
)
AND NOT r.id IN (
SELECT review_id
FROM review_log
GROUP BY review_id HAVING COUNT(*) >= $2
)
""",
user_id,
self.bot.config.get('min_votes')
)
return record
async def get_deviance_messages(self, user_id, field):
async with self.bot.db.acquire() as conn:
record = await conn.fetch(
f"""
WITH reviews_table AS (
SELECT *
FROM review_log INNER JOIN review_messages ON id = review_id
WHERE review_log.active = FALSE
AND review_messages.active = FALSE
AND in_sanitize = FALSE
AND EXISTS (
SELECT 1
FROM review_log
WHERE review_messages.id = review_id
AND active = FALSE
)
), result_table AS (
SELECT
review_id,
CASE WHEN AVG({field}) > 2/3::float THEN 1 ELSE 0 END {field}
FROM reviews_table
GROUP BY review_id
)
SELECT clean_content, reviews_table.{field} AS submitted
FROM result_table INNER JOIN reviews_table USING (review_id)
WHERE result_table.{field} != reviews_table.{field}
AND user_id = $1
ORDER BY review_id
""",
user_id
)
return record
# ======================= #
# ===== INFRACTIONS ===== #
# ======================= #
async def add_infractions(self, infractions):
infs = []
for inf in infractions:
infs.append((
None,
inf['message'].author.id,
inf['message'].guild.id,
inf['message'].channel.id,
inf['message'].id,
await self.add_score(inf['message'].content, inf['score']),
None
))
async with self.bot.db.acquire() as conn:
async with conn.transaction():
record = await conn.fetch(
"""
INSERT INTO infractions (user_id, server_id, channel_id, message_id, score_id)
(SELECT
i.user_id, i.server_id, i.channel_id, i.message_id, i.score_id
FROM
unnest($1::infractions[]) as i
)
""",
infs
)
def setup(bot):
bot.add_cog(DBUtils(bot))
|
"""
A scripts to build the forward simulation structure and submit the job.
"""
from ...slurm.submit_job import submit_job
from ...tasks.xsede.forward import forward_task
# * test is passed for this script on 01/07/2020
class Run_multiple_forward_jobs(object):
def __init__(self, base=None, N_total=None, N_each=None, N_iter=None,
nproc=None, N_node=None, ntasks=None, partition=None, time=None, account=None, run_mesh=True):
super().__init__()
self.base = base
self.N_total = N_total
self.N_each = N_each
self.N_iter = N_iter
self.nproc = nproc
self.N_node = N_node
self.ntasks = ntasks
self.partition = partition
self.time = time
self.account = account
self.run_mesh = run_mesh
def run(self):
thecommand = forward_task(
base=self.base, N_total=self.N_total, N_each=self.N_each, N_iter=self.N_iter, nproc=self.nproc, run_mesh=self.run_mesh)
job_id = submit_job("forward", thecommand, self.N_node, self.ntasks,
self.partition, self.time, self.account, "stampede2")
return job_id
if __name__ == "__main__":
import click
@click.command()
@click.option('--base', required=True, type=str, help="the base dir to be run.")
@click.option('--ntotal', required=True, type=int, help="total number of events.")
@click.option('--neach', required=True, type=int, help="number of running jobs at each iterations.")
@click.option('--niter', required=True, type=int, help="number of iterations to run.")
@click.option('--nproc', required=True, type=int, help="number of mpi processes for each event.")
@click.option('--nnode', required=True, type=int, help="total number of nodes used.")
@click.option('--ntasks', required=True, type=int, help="total number of mpi processes.")
@click.option('--partition', required=True, type=str, help="partition used, eg: skx-normal.")
@click.option('--time', required=True, type=str, help="used in slurm format.")
@click.option('--account', required=True, type=str, help="account used in the slurm system.")
@click.option('--run_mesh/--no-run_mesh', required=True, default=True)
def main(base, ntotal, neach, niter, nproc, nnode, ntasks, partition, time, account, run_mesh):
run_script = Run_multiple_forward_jobs(base=base, N_total=ntotal,
N_each=neach, N_iter=niter, nproc=nproc, N_node=nnode, ntasks=ntasks, partition=partition, time=time, account=account, run_mesh=run_mesh)
run_script.run()
main() # pylint: disable=no-value-for-parameter
|
#!-*- coding:utf-8 -*-
#!/usr/bin/env python
#---------------------------------------------------
#掲示板のIDが空いているかを確認
#copyright 2010-2012 ABARS all rights reserved.
#---------------------------------------------------
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.api import memcache
from myapp.MappingId import MappingId
from myapp.SetUtf8 import SetUtf8
from myapp.Alert import Alert
from myapp.CssDesign import CssDesign
class CheckId(webapp.RequestHandler):
def get(self):
SetUtf8.set()
is_english=CssDesign.is_english(self)
short=self.request.get('id')
if(MappingId.key_format_check(short)):
txt="IDは半角英数である必要があります。"
if(is_english):
txt="ID must be 16 characters or less"
Alert.alert_msg_with_write(self,txt)
return
if(MappingId.check_capability(short,"")==0):
txt="ID:"+short+"は既に登録されていて利用できません。"
if(is_english):
txt="ID:"+short+" is not available"
Alert.alert_msg_with_write(self,txt)
return
txt="ID:"+short+"は利用可能です。"
if(is_english):
txt="ID:"+short+" is available"
Alert.alert_msg_with_write(self,txt)
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from base import BaseModel
import segmentation_models_pytorch as smp
class conv_block(nn.Module):
def __init__(self, channel_in, channel_out):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(channel_in, channel_out, kernel_size=3,stride=1,padding=1,bias=True),
nn.BatchNorm2d(channel_out),
nn.ReLU(inplace=True), # inplace=True: modify the input directly, without allocating any additional memory.
nn.Conv2d(channel_out, channel_out, kernel_size=3,stride=1,padding=1,bias=True),
nn.BatchNorm2d(channel_out),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class up_conv_block(nn.Module):
def __init__(self, channel_in, channel_out):
super(up_conv_block, self).__init__()
self.up_conv = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(channel_in,channel_out,kernel_size=3,stride=1,padding=1,bias=True),
nn.BatchNorm2d(channel_out),
nn.ReLU(inplace=True)
)
def forward(self,x):
x = self.up_conv(x)
return x
class Attention_block(nn.Module):
def __init__(self, F_g, F_l, F_int):
super(Attention_block, self).__init__()
self.W_g = nn.Sequential(
nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(F_int)
)
self.W_x = nn.Sequential(
nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(F_int)
)
self.psi = nn.Sequential(
nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(1),
nn.Sigmoid()
)
self.relu = nn.ReLU(inplace=True)
def forward(self, g, x):
g1 = self.W_g(g)
x1 = self.W_x(x)
psi = self.relu(g1 + x1)
psi = self.psi(psi)
out = x * psi
return out
class ResNet_UNet(BaseModel):
def __init__(self, encoder_name = "resnet18", encoder_weights="imagenet", encoder_depth=5, in_channels=1, classes=1, activation='sigmoid'):
super(ResNet_UNet, self).__init__()
self.model = smp.Unet(encoder_name=encoder_name, encoder_weights=encoder_weights, encoder_depth=encoder_depth, in_channels=in_channels, classes=classes, activation=activation)
def forward(self,x):
return self.model(x)
class MA_Net(BaseModel):
def __init__(self, encoder_name = "resnet18", encoder_weights="imagenet", in_channels=1, activation='sigmoid'):
super(MA_Net, self).__init__()
self.model = smp.MAnet(encoder_name=encoder_name, encoder_weights=encoder_weights, in_channels=in_channels, activation=activation)
def forward(self,x):
return self.model(x)
class Attention_UNet(BaseModel):
def __init__(self, input_channel=3, output_channel=1):
super(Attention_UNet, self).__init__()
filters = [64, 128, 256, 512, 1024]
self.conv1 = conv_block(input_channel, filters[0])
self.conv2 = conv_block(filters[0], filters[1])
self.conv3 = conv_block(filters[1], filters[2])
self.conv4 = conv_block(filters[2], filters[3])
self.conv5 = conv_block(filters[3], filters[4])
self.up_sampling5 = up_conv_block(filters[4], filters[3])
self.up_sampling4 = up_conv_block(filters[3], filters[2])
self.up_sampling3 = up_conv_block(filters[2], filters[1])
self.up_sampling2 = up_conv_block(filters[1], filters[0])
self.up_conv5 = conv_block(filters[4], filters[3])
self.up_conv4 = conv_block(filters[3], filters[2])
self.up_conv3 = conv_block(filters[2], filters[1])
self.up_conv2 = conv_block(filters[1], filters[0])
self.att5 = Attention_block(filters[3], filters[3], filters[2])
self.att4 = Attention_block(filters[2], filters[2], filters[1])
self.att3 = Attention_block(filters[1], filters[1], filters[0])
self.att2 = Attention_block(filters[0], filters[0], int(filters[0]/2))
self.final = nn.Sequential(
nn.Conv2d(filters[0], output_channel, kernel_size=1, stride=1, padding=0),
nn.Sigmoid()
)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
# Encoder path
x1 = self.conv1(x)
x2 = self.maxpool(x1)
x2 = self.conv2(x2)
x3 = self.maxpool(x2)
x3 = self.conv3(x3)
x4 = self.maxpool(x3)
x4 = self.conv4(x4)
x5 = self.maxpool(x4)
x5 = self.conv5(x5)
# Decoder path
d5 = self.up_sampling5(x5)
a4 = self.att5(d5, x4)
d5 = torch.cat((a4, d5), dim=1)
d5 = self.up_conv5(d5)
d4 = self.up_sampling4(d5)
a3 = self.att4(d4, x3)
d4 = torch.cat((a3, d4), dim=1)
d4 = self.up_conv4(d4)
d3 = self.up_sampling3(d4)
a2 = self.att3(d3, x2)
d3 = torch.cat((a2, d3), dim=1)
d3 = self.up_conv3(d3)
d2 = self.up_sampling2(d3)
a1 = self.att2(d2, x1)
d2 = torch.cat((a1, d2), dim=1)
d2 = self.up_conv2(d2)
out = self.final(d2)
return out
class Attention_ResUNet(BaseModel):
def __init__(self, encoder_name = "resnet18", encoder_weights="imagenet", encoder_depth=5, in_channels=1, classes=1, activation='sigmoid', aux_params=None):
super(Attention_ResUNet, self).__init__()
self.model = smp.Unet(encoder_name=encoder_name, encoder_weights=encoder_weights, encoder_depth=encoder_depth,
in_channels=in_channels, classes=classes, activation=activation, decoder_attention_type='scse',
aux_params=aux_params)
def forward(self,x):
return self.model(x)
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import mock
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
@mock.patch('swiftclient.client.Connection')
class SwiftApiTests(test.APIMockTestCase):
def test_swift_get_containers(self, mock_swiftclient):
containers = self.containers.list()
cont_data = [c._apidict for c in containers]
swift_api = mock_swiftclient.return_value
swift_api.get_account.return_value = [{}, cont_data]
(conts, more) = api.swift.swift_get_containers(self.request)
self.assertEqual(len(containers), len(conts))
self.assertFalse(more)
swift_api.get_account.assert_called_once_with(
limit=1001, marker=None, prefix=None, full_listing=True)
def test_swift_get_container_with_data(self, mock_swiftclient):
container = self.containers.first()
objects = self.objects.list()
swift_api = mock_swiftclient.return_value
swift_api.get_object.return_value = (container, objects)
cont = api.swift.swift_get_container(self.request, container.name)
self.assertEqual(container.name, cont.name)
self.assertEqual(len(objects), len(cont.data))
swift_api.get_object.assert_called_once_with(container.name, "")
def test_swift_get_container_without_data(self, mock_swiftclient):
container = self.containers.first()
swift_api = mock_swiftclient.return_value
swift_api.head_container.return_value = container
cont = api.swift.swift_get_container(self.request,
container.name,
with_data=False)
self.assertEqual(cont.name, container.name)
self.assertIsNone(cont.data)
swift_api.head_container.assert_called_once_with(container.name)
def test_swift_create_duplicate_container(self, mock_swiftclient):
metadata = {'is_public': False}
container = self.containers.first()
headers = api.swift._metadata_to_header(metadata=(metadata))
swift_api = mock_swiftclient.return_value
# Check for existence, then create
swift_api.head_container.side_effect = self.exceptions.swift
swift_api.put_container.return_value = container
api.swift.swift_create_container(self.request,
container.name,
metadata=(metadata))
swift_api.head_container.assert_called_once_with(container.name)
swift_api.put_container.assert_called_once_with(container.name,
headers=headers)
def test_swift_create_container(self, mock_swiftclient):
metadata = {'is_public': True}
container = self.containers.first()
swift_api = mock_swiftclient.return_value
swift_api.head_container.return_value = container
with self.assertRaises(exceptions.AlreadyExists):
api.swift.swift_create_container(self.request,
container.name,
metadata=(metadata))
swift_api.head_container.assert_called_once_with(container.name)
def test_swift_update_container(self, mock_swiftclient):
metadata = {'is_public': True}
container = self.containers.first()
swift_api = mock_swiftclient.return_value
headers = api.swift._metadata_to_header(metadata=(metadata))
swift_api.post_container.return_value = container
api.swift.swift_update_container(self.request,
container.name,
metadata=(metadata))
swift_api.post_container.assert_called_once_with(container.name,
headers=headers)
def test_swift_get_objects(self, mock_swiftclient):
container = self.containers.first()
objects = self.objects.list()
swift_api = mock_swiftclient.return_value
swift_api.get_container.return_value = [{}, objects]
(objs, more) = api.swift.swift_get_objects(self.request,
container.name)
self.assertEqual(len(objects), len(objs))
self.assertFalse(more)
swift_api.get_container.assert_called_once_with(
container.name,
limit=1001,
marker=None,
prefix=None,
delimiter='/',
full_listing=True)
def test_swift_get_object_with_data_non_chunked(self, mock_swiftclient):
container = self.containers.first()
object = self.objects.first()
swift_api = mock_swiftclient.return_value
swift_api.get_object.return_value = [object, object.data]
obj = api.swift.swift_get_object(self.request, container.name,
object.name, resp_chunk_size=None)
self.assertEqual(object.name, obj.name)
swift_api.get_object.assert_called_once_with(
container.name, object.name, resp_chunk_size=None)
def test_swift_get_object_with_data_chunked(self, mock_swiftclient):
container = self.containers.first()
object = self.objects.first()
swift_api = mock_swiftclient.return_value
swift_api.get_object.return_value = [object, object.data]
obj = api.swift.swift_get_object(
self.request, container.name, object.name)
self.assertEqual(object.name, obj.name)
swift_api.get_object.assert_called_once_with(
container.name, object.name, resp_chunk_size=api.swift.CHUNK_SIZE)
def test_swift_get_object_without_data(self, mock_swiftclient):
container = self.containers.first()
object = self.objects.first()
swift_api = mock_swiftclient.return_value
swift_api.head_object.return_value = object
obj = api.swift.swift_get_object(self.request,
container.name,
object.name,
with_data=False)
self.assertEqual(object.name, obj.name)
self.assertIsNone(obj.data)
swift_api.head_object.assert_called_once_with(container.name,
object.name)
def test_swift_create_pseudo_folder(self, mock_swiftclient):
container = self.containers.first()
folder = self.folder.first()
swift_api = mock_swiftclient.return_value
exc = self.exceptions.swift
swift_api.head_object.side_effect = exc
swift_api.put_object.return_value = folder
api.swift.swift_create_pseudo_folder(self.request,
container.name,
folder.name)
swift_api.head_object.assert_called_once_with(container.name,
folder.name)
swift_api.put_object.assert_called_once_with(container.name,
folder.name,
None,
headers={})
def test_swift_create_duplicate_folder(self, mock_swiftclient):
container = self.containers.first()
folder = self.folder.first()
swift_api = mock_swiftclient.return_value
swift_api.head_object.return_value = folder
with self.assertRaises(exceptions.AlreadyExists):
api.swift.swift_create_pseudo_folder(self.request,
container.name,
folder.name)
swift_api.head_object.assert_called_once_with(container.name,
folder.name)
def test_swift_upload_object(self, mock_swiftclient):
container = self.containers.first()
obj = self.objects.first()
fake_name = 'fake_object.jpg'
class FakeFile(object):
def __init__(self):
self.name = fake_name
self.data = obj.data
self.size = len(obj.data)
headers = {'X-Object-Meta-Orig-Filename': fake_name}
swift_api = mock_swiftclient.return_value
test_file = FakeFile()
swift_api.put_object.return_value = None
api.swift.swift_upload_object(self.request,
container.name,
obj.name,
test_file)
swift_api.put_object.assert_called_once_with(
container.name,
obj.name,
test.IsA(FakeFile),
content_length=test_file.size,
headers=headers)
def test_swift_upload_object_without_file(self, mock_swiftclient):
container = self.containers.first()
obj = self.objects.first()
swift_api = mock_swiftclient.return_value
swift_api.put_object.return_value = None
response = api.swift.swift_upload_object(self.request,
container.name,
obj.name,
None)
self.assertEqual(0, response['bytes'])
swift_api.put_object.assert_called_once_with(
container.name,
obj.name,
None,
content_length=0,
headers={})
def test_swift_object_exists(self, mock_swiftclient):
container = self.containers.first()
obj = self.objects.first()
swift_api = mock_swiftclient.return_value
swift_api.head_object.side_effect = [container, self.exceptions.swift]
args = self.request, container.name, obj.name
self.assertTrue(api.swift.swift_object_exists(*args))
# Again, for a "non-existent" object
self.assertFalse(api.swift.swift_object_exists(*args))
self.assertEqual(2, swift_api.head_object.call_count)
swift_api.head_object.assert_has_calls([
mock.call(container.name, obj.name),
mock.call(container.name, obj.name),
])
|
import math
import time
from utils import dataset, randomizer, exporter
from assignment3 import forward, backward, squared_error, optimize_sgd
class Autoencoder:
"""Simple Autoencoder implementation with one hidden layer, that uses the
identity function f(x) = x as its activation function and optimizes with
Stochastic Gradient Descent (SGD).
"""
def __init__(self, n_in, n_units, lr=0.01, mean=0.0, stddev=0.01):
"""Model constructor, initializing the parameters.
Args:
n_in (int): Number of input units, i.e. size of the input vector.
n_units (int): Number of hidden units.
lr (float): Initial learning rate.
mean (float): Mean of the initial parameters.
stddev (float): Standard deviation of the initial random
parameters.
"""
self.W1 = randomizer.rnd((n_units, n_in), mean, stddev)
self.b1 = randomizer.rnd((n_units,), mean, stddev)
self.W2 = randomizer.rnd((n_in, n_units), mean, stddev)
self.b2 = randomizer.rnd((n_in,), mean, stddev)
self.lr = lr
def __call__(self, x, train=True):
"""Perform an iteration with one sample, updating the parameters of
this model if the training flag is set to true, otherwise just
perform a forward propagation and return the loss.
Args:
x (list): Input data. 1 dimensional.
train (bool): True if the parameters are to be updated. False
otherwise.
Returns:
float: Loss.
"""
h, y = forward(x, self.W1, self.b1, self.W2, self.b2)
if train:
gW1, gb1, gW2, gb2 = backward(x, h, y, self.W1, self.b1, self.W2,
self.b2)
self.optimize(h, y, gW1, gb1, gW2, gb2)
loss = squared_error(y, x, scale=0.5)
return loss
def optimize(self, h, y, gW1, gb1, gW2, gb2):
"""Optimizes (modifies) the parameters of this model using SGD.
Args:
h (list): Activations of the hidden layer.
y (list): Activations of the output layer.
gW1 (list): Computed gradients of `W1`.
gb1 (list): Computed gradients of `b1`.
gW2 (list): Computed gradients of `W2`.
gb2 (list): Computed gradients of `bw`.
"""
self.W2 = optimize_sgd(self.W2, gW2, self.lr)
self.b2 = optimize_sgd(self.b2, gb2, self.lr)
self.W1 = optimize_sgd(self.W1, gW1, self.lr)
self.b1 = optimize_sgd(self.b1, gb1, self.lr)
if __name__ == '__main__':
# Train an Autoencoder model
N, D, xs = dataset.read('data/dataset.dat')
# Parameters for initializing the ranom parameters. The standard deviation
# is set with respect to the dimensions of the inputs.
# http://docs.chainer.org/en/stable/reference/links.html#linear
mean = 0
stddev = math.sqrt(1 / D)
n_hidden_units = 5
n_epochs = 20
initial_learning_rate = 0.001
model = Autoencoder(n_in=D, n_units=n_hidden_units,
lr=initial_learning_rate, mean=mean, stddev=stddev)
# Start the training
for epoch in range(n_epochs):
randomizer.shuffle(xs)
# Optimize the model
for x in xs:
model(x, train=True)
# Compute the loss
total_loss = 0
for x in xs:
loss = model(x, train=False)
total_loss += loss
average_loss = total_loss / N
print('Epoch: {} Avg. loss: {}'.format(epoch + 1, average_loss))
# Uncomment the following lines to save the trained parameters to a file
# out_filename = 'output/assignment4_params_' + str(int(time.time()))
# exporter.export_model(out_filename, model)
|
from .datamodules import *
from .datasets import *
|
import torch
import math
import warnings
import pyro
from pyro import poutine
from pyro.infer.autoguide.utils import mean_field_entropy
from pyro.contrib.oed.search import Search
from pyro.infer import EmpiricalMarginal, Importance, SVI
from pyro.util import torch_isnan, torch_isinf
from pyro.contrib.util import lexpand
__all__ = [
"laplace_eig",
"vi_eig",
"nmc_eig",
"donsker_varadhan_eig",
"posterior_eig",
"marginal_eig",
"lfire_eig",
"vnmc_eig"
]
def laplace_eig(model, design, observation_labels, target_labels, guide, loss, optim, num_steps,
final_num_samples, y_dist=None, eig=True, **prior_entropy_kwargs):
"""
Estimates the expected information gain (EIG) by making repeated Laplace approximations to the posterior.
:param function model: Pyro stochastic function taking `design` as only argument.
:param torch.Tensor design: Tensor of possible designs.
:param list observation_labels: labels of sample sites to be regarded as observables.
:param list target_labels: labels of sample sites to be regarded as latent variables of interest, i.e. the sites
that we wish to gain information about.
:param function guide: Pyro stochastic function corresponding to `model`.
:param loss: a Pyro loss such as `pyro.infer.Trace_ELBO().differentiable_loss`.
:param optim: optimizer for the loss
:param int num_steps: Number of gradient steps to take per sampled pseudo-observation.
:param int final_num_samples: Number of `y` samples (pseudo-observations) to take.
:param y_dist: Distribution to sample `y` from- if `None` we use the Bayesian marginal distribution.
:param bool eig: Whether to compute the EIG or the average posterior entropy (APE). The EIG is given by
`EIG = prior entropy - APE`. If `True`, the prior entropy will be estimated analytically,
or by Monte Carlo as appropriate for the `model`. If `False` the APE is returned.
:param dict prior_entropy_kwargs: parameters for estimating the prior entropy: `num_prior_samples` indicating the
number of samples for a MC estimate of prior entropy, and `mean_field` indicating if an analytic form for
a mean-field prior should be tried.
:return: EIG estimate
:rtype: torch.Tensor
"""
if isinstance(observation_labels, str):
observation_labels = [observation_labels]
if target_labels is not None and isinstance(target_labels, str):
target_labels = [target_labels]
ape = _laplace_vi_ape(model, design, observation_labels, target_labels, guide, loss, optim, num_steps,
final_num_samples, y_dist=y_dist)
return _eig_from_ape(model, design, target_labels, ape, eig, prior_entropy_kwargs)
def _eig_from_ape(model, design, target_labels, ape, eig, prior_entropy_kwargs):
mean_field = prior_entropy_kwargs.get("mean_field", True)
if eig:
if mean_field:
try:
prior_entropy = mean_field_entropy(model, [design], whitelist=target_labels)
except NotImplemented:
prior_entropy = monte_carlo_entropy(model, design, target_labels, **prior_entropy_kwargs)
else:
prior_entropy = monte_carlo_entropy(model, design, target_labels, **prior_entropy_kwargs)
return prior_entropy - ape
else:
return ape
def _laplace_vi_ape(model, design, observation_labels, target_labels, guide, loss, optim, num_steps,
final_num_samples, y_dist=None):
def posterior_entropy(y_dist, design):
# Important that y_dist is sampled *within* the function
y = pyro.sample("conditioning_y", y_dist)
y_dict = {label: y[i, ...] for i, label in enumerate(observation_labels)}
conditioned_model = pyro.condition(model, data=y_dict)
# Here just using SVI to run the MAP optimization
guide.train()
SVI(conditioned_model, guide=guide, loss=loss, optim=optim, num_steps=num_steps, num_samples=1).run(design)
# Recover the entropy
with poutine.block():
final_loss = loss(conditioned_model, guide, design)
guide.finalize(final_loss, target_labels)
entropy = mean_field_entropy(guide, [design], whitelist=target_labels)
return entropy
if y_dist is None:
y_dist = EmpiricalMarginal(Importance(model, num_samples=final_num_samples).run(design),
sites=observation_labels)
# Calculate the expected posterior entropy under this distn of y
loss_dist = EmpiricalMarginal(Search(posterior_entropy).run(y_dist, design))
ape = loss_dist.mean
return ape
# Deprecated
def vi_eig(model, design, observation_labels, target_labels, vi_parameters, is_parameters, y_dist=None,
eig=True, **prior_entropy_kwargs):
"""Estimates the expected information gain (EIG) using variational inference (VI).
The APE is defined as
:math:`APE(d)=E_{Y\\sim p(y|\\theta, d)}[H(p(\\theta|Y, d))]`
where :math:`H[p(x)]` is the `differential entropy
<https://en.wikipedia.org/wiki/Differential_entropy>`_.
The APE is related to expected information gain (EIG) by the equation
:math:`EIG(d)=H[p(\\theta)]-APE(d)`
in particular, minimising the APE is equivalent to maximising EIG.
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param dict vi_parameters: Variational inference parameters which should include:
`optim`: an instance of :class:`pyro.Optim`, `guide`: a guide function
compatible with `model`, `num_steps`: the number of VI steps to make,
and `loss`: the loss function to use for VI
:param dict is_parameters: Importance sampling parameters for the
marginal distribution of :math:`Y`. May include `num_samples`: the number
of samples to draw from the marginal.
:param pyro.distributions.Distribution y_dist: (optional) the distribution
assumed for the response variable :math:`Y`
:param bool eig: Whether to compute the EIG or the average posterior entropy (APE). The EIG is given by
`EIG = prior entropy - APE`. If `True`, the prior entropy will be estimated analytically,
or by Monte Carlo as appropriate for the `model`. If `False` the APE is returned.
:param dict prior_entropy_kwargs: parameters for estimating the prior entropy: `num_prior_samples` indicating the
number of samples for a MC estimate of prior entropy, and `mean_field` indicating if an analytic form for
a mean-field prior should be tried.
:return: EIG estimate
:rtype: `torch.Tensor`
"""
warnings.warn("`vi_eig` is deprecated in favour of the amortized version: `posterior_eig`.", DeprecationWarning)
if isinstance(observation_labels, str):
observation_labels = [observation_labels]
if target_labels is not None and isinstance(target_labels, str):
target_labels = [target_labels]
ape = _vi_ape(model, design, observation_labels, target_labels, vi_parameters, is_parameters, y_dist=y_dist)
return _eig_from_ape(model, design, target_labels, ape, eig, prior_entropy_kwargs)
def _vi_ape(model, design, observation_labels, target_labels, vi_parameters, is_parameters, y_dist=None):
def posterior_entropy(y_dist, design):
# Important that y_dist is sampled *within* the function
y = pyro.sample("conditioning_y", y_dist)
y_dict = {label: y[i, ...] for i, label in enumerate(observation_labels)}
conditioned_model = pyro.condition(model, data=y_dict)
SVI(conditioned_model, **vi_parameters).run(design)
# Recover the entropy
with poutine.block():
guide = vi_parameters["guide"]
entropy = mean_field_entropy(guide, [design], whitelist=target_labels)
return entropy
if y_dist is None:
y_dist = EmpiricalMarginal(Importance(model, **is_parameters).run(design),
sites=observation_labels)
# Calculate the expected posterior entropy under this distn of y
loss_dist = EmpiricalMarginal(Search(posterior_entropy).run(y_dist, design))
loss = loss_dist.mean
return loss
def nmc_eig(model, design, observation_labels, target_labels=None,
N=100, M=10, M_prime=None, independent_priors=False):
"""
Nested Monte Carlo estimate of the expected information
gain (EIG). The estimate is, when there are not any random effects,
.. math::
\\frac{1}{N}\\sum_{n=1}^N \\log p(y_n | \\theta_n, d) -
\\frac{1}{N}\\sum_{n=1}^N \\log \\left(\\frac{1}{M}\\sum_{m=1}^M p(y_n | \\theta_m, d)\\right)
The estimate is, in the presence of random effects,
.. math::
\\frac{1}{N}\\sum_{n=1}^N \\log \\left(\\frac{1}{M'}\\sum_{m=1}^{M'}
p(y_n | \\theta_n, \\widetilde{\\theta}_{nm}, d)\\right)-
\\frac{1}{N}\\sum_{n=1}^N \\log \\left(\\frac{1}{M}\\sum_{m=1}^{M}
p(y_n | \\theta_m, \\widetilde{\\theta}_{m}, d)\\right)
The latter form is used when `M_prime != None`.
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param int N: Number of outer expectation samples.
:param int M: Number of inner expectation samples for `p(y|d)`.
:param int M_prime: Number of samples for `p(y | theta, d)` if required.
:param bool independent_priors: Only used when `M_prime` is not `None`. Indicates whether the prior distributions
for the target variables and the nuisance variables are independent. In this case, it is not necessary to
sample the targets conditional on the nuisance variables.
:return: EIG estimate
:rtype: `torch.Tensor`
"""
if isinstance(observation_labels, str): # list of strings instead of strings
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
# Take N samples of the model
expanded_design = lexpand(design, N) # N copies of the model
trace = poutine.trace(model).get_trace(expanded_design)
trace.compute_log_prob()
if M_prime is not None:
y_dict = {l: lexpand(trace.nodes[l]["value"], M_prime) for l in observation_labels}
theta_dict = {l: lexpand(trace.nodes[l]["value"], M_prime) for l in target_labels}
theta_dict.update(y_dict)
# Resample M values of u and compute conditional probabilities
# WARNING: currently the use of condition does not actually sample
# the conditional distribution!
# We need to use some importance weighting
conditional_model = pyro.condition(model, data=theta_dict)
if independent_priors:
reexpanded_design = lexpand(design, M_prime, 1)
else:
# Not acceptable to use (M_prime, 1) here - other variables may occur after
# theta, so need to be sampled conditional upon it
reexpanded_design = lexpand(design, M_prime, N)
retrace = poutine.trace(conditional_model).get_trace(reexpanded_design)
retrace.compute_log_prob()
conditional_lp = sum(retrace.nodes[l]["log_prob"] for l in observation_labels).logsumexp(0) \
- math.log(M_prime)
else:
# This assumes that y are independent conditional on theta
# Furthermore assume that there are no other variables besides theta
conditional_lp = sum(trace.nodes[l]["log_prob"] for l in observation_labels)
y_dict = {l: lexpand(trace.nodes[l]["value"], M) for l in observation_labels}
# Resample M values of theta and compute conditional probabilities
conditional_model = pyro.condition(model, data=y_dict)
# Using (M, 1) instead of (M, N) - acceptable to re-use thetas between ys because
# theta comes before y in graphical model
reexpanded_design = lexpand(design, M, 1) # sample M theta
retrace = poutine.trace(conditional_model).get_trace(reexpanded_design)
retrace.compute_log_prob()
marginal_lp = sum(retrace.nodes[l]["log_prob"] for l in observation_labels).logsumexp(0) \
- math.log(M)
terms = conditional_lp - marginal_lp
nonnan = (~torch.isnan(terms)).sum(0).type_as(terms)
terms[torch.isnan(terms)] = 0.
return terms.sum(0)/nonnan
def donsker_varadhan_eig(model, design, observation_labels, target_labels,
num_samples, num_steps, T, optim, return_history=False,
final_design=None, final_num_samples=None):
"""
Donsker-Varadhan estimate of the expected information gain (EIG).
The Donsker-Varadhan representation of EIG is
.. math::
\\sup_T E_{p(y, \\theta | d)}[T(y, \\theta)] - \\log E_{p(y|d)p(\\theta)}[\\exp(T(\\bar{y}, \\bar{\\theta}))]
where :math:`T` is any (measurable) function.
This methods optimises the loss function over a pre-specified class of
functions `T`.
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param int num_samples: Number of samples per iteration.
:param int num_steps: Number of optimisation steps.
:param function or torch.nn.Module T: optimisable function `T` for use in the
Donsker-Varadhan loss function.
:param pyro.optim.Optim optim: Optimiser to use.
:param bool return_history: If `True`, also returns a tensor giving the loss function
at each step of the optimisation.
:param torch.Tensor final_design: The final design tensor to evaluate at. If `None`, uses
`design`.
:param int final_num_samples: The number of samples to use at the final evaluation, If `None,
uses `num_samples`.
:return: EIG estimate, optionally includes full optimisatio history
:rtype: `torch.Tensor` or `tuple`
"""
if isinstance(observation_labels, str):
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
loss = _donsker_varadhan_loss(model, T, observation_labels, target_labels)
return opt_eig_ape_loss(design, loss, num_samples, num_steps, optim, return_history,
final_design, final_num_samples)
def posterior_eig(model, design, observation_labels, target_labels, num_samples, num_steps, guide, optim,
return_history=False, final_design=None, final_num_samples=None, eig=True, prior_entropy_kwargs={},
*args, **kwargs):
"""
Posterior estimate of expected information gain (EIG) computed from the average posterior entropy (APE)
using `EIG = prior entropy - APE`. See [1] for full details.
The posterior representation of APE is
:math:`sup_{q}E_{p(y, \\theta | d)}[\\log q(\\theta | y, d)]`
where :math:`q` is any distribution on :math:`\\theta`.
This method optimises the loss over a given guide family `guide`
representing :math:`q`.
[1] Foster, Adam, et al. "Variational Bayesian Optimal Experimental Design." arXiv preprint arXiv:1903.05480 (2019).
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param int num_samples: Number of samples per iteration.
:param int num_steps: Number of optimisation steps.
:param function guide: guide family for use in the (implicit) posterior estimation.
The parameters of `guide` are optimised to maximise the posterior
objective.
:param pyro.optim.Optim optim: Optimiser to use.
:param bool return_history: If `True`, also returns a tensor giving the loss function
at each step of the optimisation.
:param torch.Tensor final_design: The final design tensor to evaluate at. If `None`, uses
`design`.
:param int final_num_samples: The number of samples to use at the final evaluation, If `None,
uses `num_samples`.
:param bool eig: Whether to compute the EIG or the average posterior entropy (APE). The EIG is given by
`EIG = prior entropy - APE`. If `True`, the prior entropy will be estimated analytically,
or by Monte Carlo as appropriate for the `model`. If `False` the APE is returned.
:param dict prior_entropy_kwargs: parameters for estimating the prior entropy: `num_prior_samples` indicating the
number of samples for a MC estimate of prior entropy, and `mean_field` indicating if an analytic form for
a mean-field prior should be tried.
:return: EIG estimate, optionally includes full optimisation history
:rtype: `torch.Tensor` or `tuple`
"""
if isinstance(observation_labels, str):
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
ape = _posterior_ape(model, design, observation_labels, target_labels, num_samples, num_steps, guide, optim,
return_history=return_history, final_design=final_design, final_num_samples=final_num_samples,
*args, **kwargs)
return _eig_from_ape(model, design, target_labels, ape, eig, prior_entropy_kwargs)
def _posterior_ape(model, design, observation_labels, target_labels,
num_samples, num_steps, guide, optim, return_history=False,
final_design=None, final_num_samples=None, *args, **kwargs):
loss = _posterior_loss(model, guide, observation_labels, target_labels, *args, **kwargs)
return opt_eig_ape_loss(design, loss, num_samples, num_steps, optim, return_history,
final_design, final_num_samples)
def marginal_eig(model, design, observation_labels, target_labels,
num_samples, num_steps, guide, optim, return_history=False,
final_design=None, final_num_samples=None):
"""Estimate EIG by estimating the marginal entropy :math:`p(y|d)`. See [1] for full details.
The marginal representation of EIG is
:math:`inf_{q}E_{p(y, \\theta | d)}\\left[\\log \\frac{p(y | \\theta, d)}{q(y | d)} \\right]`
where :math:`q` is any distribution on :math:`y`.
.. warning :: this method does **not** estimate the correct quantity in the presence of random effects.
[1] Foster, Adam, et al. "Variational Bayesian Optimal Experimental Design." arXiv preprint arXiv:1903.05480 (2019).
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param int num_samples: Number of samples per iteration.
:param int num_steps: Number of optimisation steps.
:param function guide: guide family for use in the marginal estimation.
The parameters of `guide` are optimised to maximise the log-likelihood objective.
:param pyro.optim.Optim optim: Optimiser to use.
:param bool return_history: If `True`, also returns a tensor giving the loss function
at each step of the optimisation.
:param torch.Tensor final_design: The final design tensor to evaluate at. If `None`, uses
`design`.
:param int final_num_samples: The number of samples to use at the final evaluation, If `None,
uses `num_samples`.
:return: EIG estimate, optionally includes full optimisation history
:rtype: `torch.Tensor` or `tuple`
"""
if isinstance(observation_labels, str):
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
loss = _marginal_loss(model, guide, observation_labels, target_labels)
return opt_eig_ape_loss(design, loss, num_samples, num_steps, optim, return_history,
final_design, final_num_samples)
def marginal_likelihood_eig(model, design, observation_labels, target_labels,
num_samples, num_steps, marginal_guide, cond_guide, optim,
return_history=False, final_design=None, final_num_samples=None):
"""Estimates EIG by estimating the marginal entropy, that of :math:`p(y|d)`,
*and* the conditional entropy, of :math:`p(y|\\theta, d)`, both via Gibbs' Inequality. See [1] for full details.
[1] Foster, Adam, et al. "Variational Bayesian Optimal Experimental Design." arXiv preprint arXiv:1903.05480 (2019).
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param int num_samples: Number of samples per iteration.
:param int num_steps: Number of optimisation steps.
:param function marginal_guide: guide family for use in the marginal estimation.
The parameters of `guide` are optimised to maximise the log-likelihood objective.
:param function cond_guide: guide family for use in the likelihood (conditional) estimation.
The parameters of `guide` are optimised to maximise the log-likelihood objective.
:param pyro.optim.Optim optim: Optimiser to use.
:param bool return_history: If `True`, also returns a tensor giving the loss function
at each step of the optimisation.
:param torch.Tensor final_design: The final design tensor to evaluate at. If `None`, uses
`design`.
:param int final_num_samples: The number of samples to use at the final evaluation, If `None,
uses `num_samples`.
:return: EIG estimate, optionally includes full optimisation history
:rtype: `torch.Tensor` or `tuple`
"""
if isinstance(observation_labels, str):
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
loss = _marginal_likelihood_loss(model, marginal_guide, cond_guide, observation_labels, target_labels)
return opt_eig_ape_loss(design, loss, num_samples, num_steps, optim, return_history,
final_design, final_num_samples)
def lfire_eig(model, design, observation_labels, target_labels,
num_y_samples, num_theta_samples, num_steps, classifier, optim, return_history=False,
final_design=None, final_num_samples=None):
"""Estimates the EIG using the method of Likelihood-Free Inference by Ratio Estimation (LFIRE) as in [1].
LFIRE is run separately for several samples of :math:`\\theta`.
[1] Kleinegesse, Steven, and Michael Gutmann. "Efficient Bayesian Experimental Design for Implicit Models."
arXiv preprint arXiv:1810.09912 (2018).
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param int num_y_samples: Number of samples to take in :math:`y` for each :math:`\\theta`.
:param: int num_theta_samples: Number of initial samples in :math:`\\theta` to take. The likelihood ratio
is estimated by LFIRE for each sample.
:param int num_steps: Number of optimisation steps.
:param function classifier: a Pytorch or Pyro classifier used to distinguish between samples of :math:`y` under
:math:`p(y|d)` and samples under :math:`p(y|\\theta,d)` for some :math:`\\theta`.
:param pyro.optim.Optim optim: Optimiser to use.
:param bool return_history: If `True`, also returns a tensor giving the loss function
at each step of the optimisation.
:param torch.Tensor final_design: The final design tensor to evaluate at. If `None`, uses
`design`.
:param int final_num_samples: The number of samples to use at the final evaluation, If `None,
uses `num_samples`.
:return: EIG estimate, optionally includes full optimisation history
:rtype: `torch.Tensor` or `tuple`
"""
if isinstance(observation_labels, str):
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
# Take N samples of the model
expanded_design = lexpand(design, num_theta_samples)
trace = poutine.trace(model).get_trace(expanded_design)
theta_dict = {l: trace.nodes[l]["value"] for l in target_labels}
cond_model = pyro.condition(model, data=theta_dict)
loss = _lfire_loss(model, cond_model, classifier, observation_labels, target_labels)
out = opt_eig_ape_loss(expanded_design, loss, num_y_samples, num_steps, optim, return_history,
final_design, final_num_samples)
if return_history:
return out[0], out[1].sum(0) / num_theta_samples
else:
return out.sum(0) / num_theta_samples
def vnmc_eig(model, design, observation_labels, target_labels,
num_samples, num_steps, guide, optim, return_history=False,
final_design=None, final_num_samples=None):
"""Estimates the EIG using Variational Nested Monte Carlo (VNMC). The VNMC estimate [1] is
.. math::
\\frac{1}{N}\\sum_{n=1}^N \\left[ \\log p(y_n | \\theta_n, d) -
\\log \\left(\\frac{1}{M}\\sum_{m=1}^M \\frac{p(\\theta_{mn})p(y_n | \\theta_{mn}, d)}
{q(\\theta_{mn} | y_n)} \\right) \\right]
where :math:`q(\\theta | y)` is the learned variational posterior approximation and
:math:`\\theta_n, y_n \\sim p(\\theta, y | d)`, :math:`\\theta_{mn} \\sim q(\\theta|y=y_n)`.
As :math:`N \\to \\infty` this is an upper bound on EIG. We minimise this upper bound by stochastic gradient
descent.
[1] Foster, Adam, et al. "Variational Bayesian Optimal Experimental Design." arXiv preprint arXiv:1903.05480 (2019).
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param tuple num_samples: Number of (:math:`N, M`) samples per iteration.
:param int num_steps: Number of optimisation steps.
:param function guide: guide family for use in the posterior estimation.
The parameters of `guide` are optimised to minimise the VNMC upper bound.
:param pyro.optim.Optim optim: Optimiser to use.
:param bool return_history: If `True`, also returns a tensor giving the loss function
at each step of the optimisation.
:param torch.Tensor final_design: The final design tensor to evaluate at. If `None`, uses
`design`.
:param tuple final_num_samples: The number of (:math:`N, M`) samples to use at the final evaluation, If `None,
uses `num_samples`.
:return: EIG estimate, optionally includes full optimisation history
:rtype: `torch.Tensor` or `tuple`
"""
if isinstance(observation_labels, str):
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
loss = _vnmc_eig_loss(model, guide, observation_labels, target_labels)
return opt_eig_ape_loss(design, loss, num_samples, num_steps, optim, return_history,
final_design, final_num_samples)
def opt_eig_ape_loss(design, loss_fn, num_samples, num_steps, optim, return_history=False,
final_design=None, final_num_samples=None):
if final_design is None:
final_design = design
if final_num_samples is None:
final_num_samples = num_samples
params = None
history = []
for step in range(num_steps):
if params is not None:
pyro.infer.util.zero_grads(params)
with poutine.trace(param_only=True) as param_capture:
agg_loss, loss = loss_fn(design, num_samples, evaluation=return_history)
params = set(site["value"].unconstrained()
for site in param_capture.trace.nodes.values())
if torch.isnan(agg_loss):
raise ArithmeticError("Encountered NaN loss in opt_eig_ape_loss")
agg_loss.backward(retain_graph=True)
if return_history:
history.append(loss)
optim(params)
_, loss = loss_fn(final_design, final_num_samples, evaluation=True)
if return_history:
return torch.stack(history), loss
else:
return loss
def monte_carlo_entropy(model, design, target_labels, num_prior_samples=1000):
"""Computes a Monte Carlo estimate of the entropy of `model` assuming that each of sites in `target_labels` is
independent and the entropy is to be computed for that subset of sites only.
"""
if isinstance(target_labels, str):
target_labels = [target_labels]
expanded_design = lexpand(design, num_prior_samples)
trace = pyro.poutine.trace(model).get_trace(expanded_design)
trace.compute_log_prob()
lp = sum(trace.nodes[l]["log_prob"] for l in target_labels)
return -lp.sum(0) / num_prior_samples
def _donsker_varadhan_loss(model, T, observation_labels, target_labels):
"""DV loss: to evaluate directly use `donsker_varadhan_eig` setting `num_steps=0`."""
ewma_log = EwmaLog(alpha=0.90)
def loss_fn(design, num_particles, **kwargs):
try:
pyro.module("T", T)
except AssertionError:
pass
expanded_design = lexpand(design, num_particles)
# Unshuffled data
unshuffled_trace = poutine.trace(model).get_trace(expanded_design)
y_dict = {l: unshuffled_trace.nodes[l]["value"] for l in observation_labels}
# Shuffled data
# Not actually shuffling, resimulate for safety
conditional_model = pyro.condition(model, data=y_dict)
shuffled_trace = poutine.trace(conditional_model).get_trace(expanded_design)
T_joint = T(expanded_design, unshuffled_trace, observation_labels, target_labels)
T_independent = T(expanded_design, shuffled_trace, observation_labels, target_labels)
joint_expectation = T_joint.sum(0)/num_particles
A = T_independent - math.log(num_particles)
s, _ = torch.max(A, dim=0)
independent_expectation = s + ewma_log((A - s).exp().sum(dim=0), s)
loss = joint_expectation - independent_expectation
# Switch sign, sum over batch dimensions for scalar loss
agg_loss = -loss.sum()
return agg_loss, loss
return loss_fn
def _posterior_loss(model, guide, observation_labels, target_labels, analytic_entropy=False):
"""Posterior loss: to evaluate directly use `posterior_eig` setting `num_steps=0`, `eig=False`."""
def loss_fn(design, num_particles, evaluation=False, **kwargs):
expanded_design = lexpand(design, num_particles)
# Sample from p(y, theta | d)
trace = poutine.trace(model).get_trace(expanded_design)
y_dict = {l: trace.nodes[l]["value"] for l in observation_labels}
theta_dict = {l: trace.nodes[l]["value"] for l in target_labels}
# Run through q(theta | y, d)
conditional_guide = pyro.condition(guide, data=theta_dict)
cond_trace = poutine.trace(conditional_guide).get_trace(
y_dict, expanded_design, observation_labels, target_labels)
cond_trace.compute_log_prob()
if evaluation and analytic_entropy:
loss = mean_field_entropy(
guide, [y_dict, expanded_design, observation_labels, target_labels],
whitelist=target_labels).sum(0) / num_particles
agg_loss = loss.sum()
else:
terms = -sum(cond_trace.nodes[l]["log_prob"] for l in target_labels)
agg_loss, loss = _safe_mean_terms(terms)
return agg_loss, loss
return loss_fn
def _marginal_loss(model, guide, observation_labels, target_labels):
"""Marginal loss: to evaluate directly use `marginal_eig` setting `num_steps=0`."""
def loss_fn(design, num_particles, evaluation=False, **kwargs):
expanded_design = lexpand(design, num_particles)
# Sample from p(y | d)
trace = poutine.trace(model).get_trace(expanded_design)
y_dict = {l: trace.nodes[l]["value"] for l in observation_labels}
# Run through q(y | d)
conditional_guide = pyro.condition(guide, data=y_dict)
cond_trace = poutine.trace(conditional_guide).get_trace(
expanded_design, observation_labels, target_labels)
cond_trace.compute_log_prob()
terms = -sum(cond_trace.nodes[l]["log_prob"] for l in observation_labels)
# At eval time, add p(y | theta, d) terms
if evaluation:
trace.compute_log_prob()
terms += sum(trace.nodes[l]["log_prob"] for l in observation_labels)
return _safe_mean_terms(terms)
return loss_fn
def _marginal_likelihood_loss(model, marginal_guide, likelihood_guide, observation_labels, target_labels):
"""Marginal_likelihood loss: to evaluate directly use `marginal_likelihood_eig` setting `num_steps=0`."""
def loss_fn(design, num_particles, evaluation=False, **kwargs):
expanded_design = lexpand(design, num_particles)
# Sample from p(y | d)
trace = poutine.trace(model).get_trace(expanded_design)
y_dict = {l: trace.nodes[l]["value"] for l in observation_labels}
theta_dict = {l: trace.nodes[l]["value"] for l in target_labels}
# Run through q(y | d)
qyd = pyro.condition(marginal_guide, data=y_dict)
marginal_trace = poutine.trace(qyd).get_trace(
expanded_design, observation_labels, target_labels)
marginal_trace.compute_log_prob()
# Run through q(y | theta, d)
qythetad = pyro.condition(likelihood_guide, data=y_dict)
cond_trace = poutine.trace(qythetad).get_trace(
theta_dict, expanded_design, observation_labels, target_labels)
cond_trace.compute_log_prob()
terms = -sum(marginal_trace.nodes[l]["log_prob"] for l in observation_labels)
# At evaluation time, use the right estimator, q(y | theta, d) - q(y | d)
# At training time, use -q(y | theta, d) - q(y | d) so gradients go the same way
if evaluation:
terms += sum(cond_trace.nodes[l]["log_prob"] for l in observation_labels)
else:
terms -= sum(cond_trace.nodes[l]["log_prob"] for l in observation_labels)
return _safe_mean_terms(terms)
return loss_fn
def _lfire_loss(model_marginal, model_conditional, h, observation_labels, target_labels):
"""LFIRE loss: to evaluate directly use `lfire_eig` setting `num_steps=0`."""
def loss_fn(design, num_particles, evaluation=False, **kwargs):
try:
pyro.module("h", h)
except AssertionError:
pass
expanded_design = lexpand(design, num_particles)
model_conditional_trace = poutine.trace(model_conditional).get_trace(expanded_design)
if not evaluation:
model_marginal_trace = poutine.trace(model_marginal).get_trace(expanded_design)
h_joint = h(expanded_design, model_conditional_trace, observation_labels, target_labels)
h_independent = h(expanded_design, model_marginal_trace, observation_labels, target_labels)
terms = torch.nn.functional.softplus(-h_joint) + torch.nn.functional.softplus(h_independent)
return _safe_mean_terms(terms)
else:
h_joint = h(expanded_design, model_conditional_trace, observation_labels, target_labels)
return _safe_mean_terms(h_joint)
return loss_fn
def _vnmc_eig_loss(model, guide, observation_labels, target_labels):
"""VNMC loss: to evaluate directly use `vnmc_eig` setting `num_steps=0`."""
def loss_fn(design, num_particles, evaluation=False, **kwargs):
N, M = num_particles
expanded_design = lexpand(design, N)
# Sample from p(y, theta | d)
trace = poutine.trace(model).get_trace(expanded_design)
y_dict = {l: lexpand(trace.nodes[l]["value"], M) for l in observation_labels}
# Sample M times from q(theta | y, d) for each y
reexpanded_design = lexpand(expanded_design, M)
conditional_guide = pyro.condition(guide, data=y_dict)
guide_trace = poutine.trace(conditional_guide).get_trace(
y_dict, reexpanded_design, observation_labels, target_labels)
theta_y_dict = {l: guide_trace.nodes[l]["value"] for l in target_labels}
theta_y_dict.update(y_dict)
guide_trace.compute_log_prob()
# Re-run that through the model to compute the joint
modelp = pyro.condition(model, data=theta_y_dict)
model_trace = poutine.trace(modelp).get_trace(reexpanded_design)
model_trace.compute_log_prob()
terms = -sum(guide_trace.nodes[l]["log_prob"] for l in target_labels)
terms += sum(model_trace.nodes[l]["log_prob"] for l in target_labels)
terms += sum(model_trace.nodes[l]["log_prob"] for l in observation_labels)
terms = -terms.logsumexp(0) + math.log(M)
# At eval time, add p(y | theta, d) terms
if evaluation:
trace.compute_log_prob()
terms += sum(trace.nodes[l]["log_prob"] for l in observation_labels)
return _safe_mean_terms(terms)
return loss_fn
def _safe_mean_terms(terms):
mask = torch.isnan(terms) | (terms == float('-inf')) | (terms == float('inf'))
if terms.dtype is torch.float32:
nonnan = (~mask).sum(0).float()
elif terms.dtype is torch.float64:
nonnan = (~mask).sum(0).double()
terms[mask] = 0.
loss = terms.sum(0) / nonnan
agg_loss = loss.sum()
return agg_loss, loss
def xexpx(a):
"""Computes `a*exp(a)`.
This function makes the outputs more stable when the inputs of this function converge to :math:`-\\infty`.
:param torch.Tensor a:
:return: Equivalent of `a*torch.exp(a)`.
"""
mask = (a == float('-inf'))
y = a*torch.exp(a)
y[mask] = 0.
return y
class _EwmaLogFn(torch.autograd.Function):
@staticmethod
def forward(ctx, input, ewma):
ctx.save_for_backward(ewma)
return input.log()
@staticmethod
def backward(ctx, grad_output):
ewma, = ctx.saved_tensors
return grad_output / ewma, None
_ewma_log_fn = _EwmaLogFn.apply
class EwmaLog(object):
"""Logarithm function with exponentially weighted moving average
for gradients.
For input `inputs` this function return :code:`inputs.log()`. However, it
computes the gradient as
:math:`\\frac{\\sum_{t=0}^{T-1} \\alpha^t}{\\sum_{t=0}^{T-1} \\alpha^t x_{T-t}}`
where :math:`x_t` are historical input values passed to this function,
:math:`x_T` being the most recently seen value.
This gradient may help with numerical stability when the sequence of
inputs to the function form a convergent sequence.
"""
def __init__(self, alpha):
self.alpha = alpha
self.ewma = 0.
self.n = 0
self.s = 0.
def __call__(self, inputs, s, dim=0, keepdim=False):
"""Updates the moving average, and returns :code:`inputs.log()`.
"""
self.n += 1
if torch_isnan(self.ewma) or torch_isinf(self.ewma):
ewma = inputs
else:
ewma = inputs * (1. - self.alpha) / (1 - self.alpha**self.n) \
+ torch.exp(self.s - s) * self.ewma \
* (self.alpha - self.alpha**self.n) / (1 - self.alpha**self.n)
self.ewma = ewma.detach()
self.s = s.detach()
return _ewma_log_fn(inputs, ewma)
|
import numpy as np
import os
import sys
import errno
import shutil
import os.path as osp
import scipy.io as sio
from sklearn.neighbors import NearestNeighbors
from scipy import sparse
import scipy.sparse as sps
import timeit
from pyflann import FLANN
import multiprocessing
#import random
#random.seed(0)
#np.random.seed(0)
SHARED_VARS = {}
SHARED_array = {}
class Logger(object):
"""
Write console output to external text file.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py.
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(os.path.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
def mkdir_if_missing(directory):
if not osp.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def normalizefea(X):
"""
L2 normalize
"""
# feanorm = np.maximum(1e-14,np.sum(X**2,axis=1))
# X_out = X/(feanorm[:,None]**0.5)
return X_out
def get_V_jl(x,l,N,K):
x = x.squeeze()
temp = np.zeros((N,K))
index_cluster = l[x]
temp[(x,index_cluster)]=1
temp = temp.sum(0)
return temp
def get_fair_accuracy(u_V,V_list,l,N,K):
# pdb.set_trace()
V_j_list = np.array([get_V_jl(x,l,N,K) for x in V_list])
balance = np.zeros(K)
J = len(V_list)
for k in range(K):
V_j_list_k = V_j_list[:,k].copy()
balance_temp = np.tile(V_j_list_k,[J,1])
balance_temp = balance_temp.T/np.maximum(balance_temp,1e-20)
mask = np.ones(balance_temp.shape, dtype=bool)
np.fill_diagonal(mask,0)
balance[k] = balance_temp[mask].min()
# approx_j_per_K = N/(K*V_j_list.shape[0])
# error = np.abs(V_j_list - approx_j_per_K)
# error = error.sum()/N
return balance.min(), balance.mean()
def get_fair_accuracy_proportional(u_V,V_list,l,N,K):
V_j_list = np.array([get_V_jl(x,l,N,K) for x in V_list])
clustered_uV = V_j_list/sum(V_j_list)
# balance = V_j_list/sum(V_j_list)
fairness_error = np.zeros(K)
u_V =np.array(u_V)
for k in range(K):
fairness_error[k] = (-u_V*np.log(np.maximum(clustered_uV[:,k],1e-20))+u_V*np.log(u_V)).sum()
return fairness_error.sum()
def create_affinity(X, knn, scale = None, alg = "annoy", savepath = None, W_path = None):
N,D = X.shape
if W_path is not None:
if W_path.endswith('.mat'):
W = sio.loadmat(W_path)['W']
elif W_path.endswith('.npz'):
W = sparse.load_npz(W_path)
else:
print('Compute Affinity ')
start_time = timeit.default_timer()
if alg == "flann":
print('with Flann')
flann = FLANN()
knnind,dist = flann.nn(X,X,knn, algorithm = "kdtree",target_precision = 0.9,cores = 5);
# knnind = knnind[:,1:]
else:
nbrs = NearestNeighbors(n_neighbors=knn).fit(X)
dist, knnind = nbrs.kneighbors(X)
row = np.repeat(range(N),knn-1)
col = knnind[:,1:].flatten()
if scale is None:
data = np.ones(X.shape[0]*(knn-1))
elif scale is True:
scale = np.median(dist[:,1:])
data = np.exp((-dist[:,1:]**2)/(2 * scale ** 2)).flatten()
else:
data = np.exp((-dist[:,1:]**2)/(2 * scale ** 2)).flatten()
W = sparse.csc_matrix((data, (row, col)), shape=(N,N),dtype=np.float)
W = (W + W.transpose(copy=True)) /2
elapsed = timeit.default_timer() - start_time
print(elapsed)
if isinstance(savepath,str):
if savepath.endswith('.npz'):
sparse.save_npz(savepath,W)
elif savepath.endswith('.mat'):
sio.savemat(savepath,{'W':W})
return W
### supporting functions to make parallel updates of clusters
def n2m(a):
"""
Return a multiprocessing.Array COPY of a numpy.array, together
with shape, typecode and matrix flag.
"""
if not isinstance(a, np.ndarray): a = np.array(a)
return multiprocessing.Array(a.dtype.char, a.flat, lock=False), tuple(a.shape), a.dtype.char, isinstance(a, np.matrix)
def m2n(buf, shape, typecode, ismatrix=False):
"""
Return a numpy.array VIEW of a multiprocessing.Array given a
handle to the array, the shape, the data typecode, and a boolean
flag indicating whether the result should be cast as a matrix.
"""
a = np.frombuffer(buf, dtype=typecode).reshape(shape)
if ismatrix: a = np.asmatrix(a)
return a
def mpassing(slices):
i,k = slices
Q_s,kernel_s_data,kernel_s_indices,kernel_s_indptr,kernel_s_shape = get_shared_arrays('Q_s','kernel_s_data','kernel_s_indices','kernel_s_indptr','kernel_s_shape')
# kernel_s = sps.csc_matrix((SHARED_array['kernel_s_data'],SHARED_array['kernel_s_indices'],SHARED_array['kernel_s_indptr']), shape=SHARED_array['kernel_s_shape'], copy=False)
kernel_s = sps.csc_matrix((kernel_s_data,kernel_s_indices,kernel_s_indptr), shape=kernel_s_shape, copy=False)
Q_s[i,k] = kernel_s[i].dot(Q_s[:,k])
# return Q_s
def new_shared_array(shape, typecode='d', ismatrix=False):
"""
Allocate a new shared array and return all the details required
to reinterpret it as a numpy array or matrix (same order of
output arguments as n2m)
"""
typecode = np.dtype(typecode).char
return multiprocessing.Array(typecode, int(np.prod(shape)), lock=False), tuple(shape), typecode, ismatrix
def get_shared_arrays(*names):
return [m2n(*SHARED_VARS[name]) for name in names]
def init(*pargs, **kwargs):
SHARED_VARS.update(pargs, **kwargs)
####
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['CloudUserArgs', 'CloudUser']
@pulumi.input_type
class CloudUserArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
openstack_rc: Optional[pulumi.Input[Mapping[str, Any]]] = None,
project_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
role_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a CloudUser resource.
:param pulumi.Input[str] description: A description associated with the user.
:param pulumi.Input[Mapping[str, Any]] openstack_rc: a convenient map representing an openstack_rc file.
Note: no password nor sensitive token is set in this map.
:param pulumi.Input[str] project_id: The id of the public cloud project. If omitted,
the `OVH_PROJECT_ID` environment variable is used. DEPRECATED. Use `service_name` instead.
:param pulumi.Input[str] role_name: The name of a role. See `role_names`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] role_names: A list of role names. Values can be:
- administrator,
- ai_training_operator
- authentication
- backup_operator
- compute_operator
- image_operator
- infrastructure_supervisor
- network_operator
- network_security_operator
- objectstore_operator
- volume_operator
:param pulumi.Input[str] service_name: The id of the public cloud project. Conflicts with `project_id`.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if openstack_rc is not None:
pulumi.set(__self__, "openstack_rc", openstack_rc)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if role_names is not None:
pulumi.set(__self__, "role_names", role_names)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description associated with the user.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="openstackRc")
def openstack_rc(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
a convenient map representing an openstack_rc file.
Note: no password nor sensitive token is set in this map.
"""
return pulumi.get(self, "openstack_rc")
@openstack_rc.setter
def openstack_rc(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "openstack_rc", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the public cloud project. If omitted,
the `OVH_PROJECT_ID` environment variable is used. DEPRECATED. Use `service_name` instead.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of a role. See `role_names`.
"""
return pulumi.get(self, "role_name")
@role_name.setter
def role_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_name", value)
@property
@pulumi.getter(name="roleNames")
def role_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of role names. Values can be:
- administrator,
- ai_training_operator
- authentication
- backup_operator
- compute_operator
- image_operator
- infrastructure_supervisor
- network_operator
- network_security_operator
- objectstore_operator
- volume_operator
"""
return pulumi.get(self, "role_names")
@role_names.setter
def role_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "role_names", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
"""
The id of the public cloud project. Conflicts with `project_id`.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
@pulumi.input_type
class _CloudUserState:
def __init__(__self__, *,
creation_date: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
openstack_rc: Optional[pulumi.Input[Mapping[str, Any]]] = None,
password: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
role_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
roles: Optional[pulumi.Input[Sequence[pulumi.Input['CloudUserRoleArgs']]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering CloudUser resources.
:param pulumi.Input[str] creation_date: the date the user was created.
:param pulumi.Input[str] description: A description associated with the user.
:param pulumi.Input[Mapping[str, Any]] openstack_rc: a convenient map representing an openstack_rc file.
Note: no password nor sensitive token is set in this map.
:param pulumi.Input[str] password: (Sensitive) the password generated for the user. The password can
be used with the Openstack API. This attribute is sensitive and will only be
retrieve once during creation.
:param pulumi.Input[str] project_id: The id of the public cloud project. If omitted,
the `OVH_PROJECT_ID` environment variable is used. DEPRECATED. Use `service_name` instead.
:param pulumi.Input[str] role_name: The name of a role. See `role_names`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] role_names: A list of role names. Values can be:
- administrator,
- ai_training_operator
- authentication
- backup_operator
- compute_operator
- image_operator
- infrastructure_supervisor
- network_operator
- network_security_operator
- objectstore_operator
- volume_operator
:param pulumi.Input[Sequence[pulumi.Input['CloudUserRoleArgs']]] roles: A list of roles associated with the user.
:param pulumi.Input[str] service_name: The id of the public cloud project. Conflicts with `project_id`.
:param pulumi.Input[str] status: the status of the user. should be normally set to 'ok'.
:param pulumi.Input[str] username: the username generated for the user. This username can be used with
the Openstack API.
"""
if creation_date is not None:
pulumi.set(__self__, "creation_date", creation_date)
if description is not None:
pulumi.set(__self__, "description", description)
if openstack_rc is not None:
pulumi.set(__self__, "openstack_rc", openstack_rc)
if password is not None:
pulumi.set(__self__, "password", password)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if role_names is not None:
pulumi.set(__self__, "role_names", role_names)
if roles is not None:
pulumi.set(__self__, "roles", roles)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
if status is not None:
pulumi.set(__self__, "status", status)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> Optional[pulumi.Input[str]]:
"""
the date the user was created.
"""
return pulumi.get(self, "creation_date")
@creation_date.setter
def creation_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "creation_date", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description associated with the user.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="openstackRc")
def openstack_rc(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
a convenient map representing an openstack_rc file.
Note: no password nor sensitive token is set in this map.
"""
return pulumi.get(self, "openstack_rc")
@openstack_rc.setter
def openstack_rc(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "openstack_rc", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
(Sensitive) the password generated for the user. The password can
be used with the Openstack API. This attribute is sensitive and will only be
retrieve once during creation.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the public cloud project. If omitted,
the `OVH_PROJECT_ID` environment variable is used. DEPRECATED. Use `service_name` instead.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of a role. See `role_names`.
"""
return pulumi.get(self, "role_name")
@role_name.setter
def role_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_name", value)
@property
@pulumi.getter(name="roleNames")
def role_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of role names. Values can be:
- administrator,
- ai_training_operator
- authentication
- backup_operator
- compute_operator
- image_operator
- infrastructure_supervisor
- network_operator
- network_security_operator
- objectstore_operator
- volume_operator
"""
return pulumi.get(self, "role_names")
@role_names.setter
def role_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "role_names", value)
@property
@pulumi.getter
def roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CloudUserRoleArgs']]]]:
"""
A list of roles associated with the user.
"""
return pulumi.get(self, "roles")
@roles.setter
def roles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CloudUserRoleArgs']]]]):
pulumi.set(self, "roles", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
"""
The id of the public cloud project. Conflicts with `project_id`.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
the status of the user. should be normally set to 'ok'.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
the username generated for the user. This username can be used with
the Openstack API.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
class CloudUser(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
openstack_rc: Optional[pulumi.Input[Mapping[str, Any]]] = None,
project_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
role_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a user in a public cloud project.
## Example Usage
```python
import pulumi
import pulumi_ovh as ovh
user1 = ovh.CloudUser("user1", project_id="67890")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description associated with the user.
:param pulumi.Input[Mapping[str, Any]] openstack_rc: a convenient map representing an openstack_rc file.
Note: no password nor sensitive token is set in this map.
:param pulumi.Input[str] project_id: The id of the public cloud project. If omitted,
the `OVH_PROJECT_ID` environment variable is used. DEPRECATED. Use `service_name` instead.
:param pulumi.Input[str] role_name: The name of a role. See `role_names`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] role_names: A list of role names. Values can be:
- administrator,
- ai_training_operator
- authentication
- backup_operator
- compute_operator
- image_operator
- infrastructure_supervisor
- network_operator
- network_security_operator
- objectstore_operator
- volume_operator
:param pulumi.Input[str] service_name: The id of the public cloud project. Conflicts with `project_id`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[CloudUserArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a user in a public cloud project.
## Example Usage
```python
import pulumi
import pulumi_ovh as ovh
user1 = ovh.CloudUser("user1", project_id="67890")
```
:param str resource_name: The name of the resource.
:param CloudUserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CloudUserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
openstack_rc: Optional[pulumi.Input[Mapping[str, Any]]] = None,
project_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
role_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CloudUserArgs.__new__(CloudUserArgs)
__props__.__dict__["description"] = description
__props__.__dict__["openstack_rc"] = openstack_rc
__props__.__dict__["project_id"] = project_id
__props__.__dict__["role_name"] = role_name
__props__.__dict__["role_names"] = role_names
__props__.__dict__["service_name"] = service_name
__props__.__dict__["creation_date"] = None
__props__.__dict__["password"] = None
__props__.__dict__["roles"] = None
__props__.__dict__["status"] = None
__props__.__dict__["username"] = None
super(CloudUser, __self__).__init__(
'ovh:index/cloudUser:CloudUser',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
creation_date: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
openstack_rc: Optional[pulumi.Input[Mapping[str, Any]]] = None,
password: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
role_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
roles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CloudUserRoleArgs']]]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None) -> 'CloudUser':
"""
Get an existing CloudUser resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] creation_date: the date the user was created.
:param pulumi.Input[str] description: A description associated with the user.
:param pulumi.Input[Mapping[str, Any]] openstack_rc: a convenient map representing an openstack_rc file.
Note: no password nor sensitive token is set in this map.
:param pulumi.Input[str] password: (Sensitive) the password generated for the user. The password can
be used with the Openstack API. This attribute is sensitive and will only be
retrieve once during creation.
:param pulumi.Input[str] project_id: The id of the public cloud project. If omitted,
the `OVH_PROJECT_ID` environment variable is used. DEPRECATED. Use `service_name` instead.
:param pulumi.Input[str] role_name: The name of a role. See `role_names`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] role_names: A list of role names. Values can be:
- administrator,
- ai_training_operator
- authentication
- backup_operator
- compute_operator
- image_operator
- infrastructure_supervisor
- network_operator
- network_security_operator
- objectstore_operator
- volume_operator
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CloudUserRoleArgs']]]] roles: A list of roles associated with the user.
:param pulumi.Input[str] service_name: The id of the public cloud project. Conflicts with `project_id`.
:param pulumi.Input[str] status: the status of the user. should be normally set to 'ok'.
:param pulumi.Input[str] username: the username generated for the user. This username can be used with
the Openstack API.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _CloudUserState.__new__(_CloudUserState)
__props__.__dict__["creation_date"] = creation_date
__props__.__dict__["description"] = description
__props__.__dict__["openstack_rc"] = openstack_rc
__props__.__dict__["password"] = password
__props__.__dict__["project_id"] = project_id
__props__.__dict__["role_name"] = role_name
__props__.__dict__["role_names"] = role_names
__props__.__dict__["roles"] = roles
__props__.__dict__["service_name"] = service_name
__props__.__dict__["status"] = status
__props__.__dict__["username"] = username
return CloudUser(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> pulumi.Output[str]:
"""
the date the user was created.
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description associated with the user.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="openstackRc")
def openstack_rc(self) -> pulumi.Output[Mapping[str, Any]]:
"""
a convenient map representing an openstack_rc file.
Note: no password nor sensitive token is set in this map.
"""
return pulumi.get(self, "openstack_rc")
@property
@pulumi.getter
def password(self) -> pulumi.Output[str]:
"""
(Sensitive) the password generated for the user. The password can
be used with the Openstack API. This attribute is sensitive and will only be
retrieve once during creation.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[Optional[str]]:
"""
The id of the public cloud project. If omitted,
the `OVH_PROJECT_ID` environment variable is used. DEPRECATED. Use `service_name` instead.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="roleName")
def role_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of a role. See `role_names`.
"""
return pulumi.get(self, "role_name")
@property
@pulumi.getter(name="roleNames")
def role_names(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of role names. Values can be:
- administrator,
- ai_training_operator
- authentication
- backup_operator
- compute_operator
- image_operator
- infrastructure_supervisor
- network_operator
- network_security_operator
- objectstore_operator
- volume_operator
"""
return pulumi.get(self, "role_names")
@property
@pulumi.getter
def roles(self) -> pulumi.Output[Sequence['outputs.CloudUserRole']]:
"""
A list of roles associated with the user.
"""
return pulumi.get(self, "roles")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Output[Optional[str]]:
"""
The id of the public cloud project. Conflicts with `project_id`.
"""
return pulumi.get(self, "service_name")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
the status of the user. should be normally set to 'ok'.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def username(self) -> pulumi.Output[str]:
"""
the username generated for the user. This username can be used with
the Openstack API.
"""
return pulumi.get(self, "username")
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.style
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
X, y = load_wine(return_X_y=True)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.3, random_state=0, stratify=y)
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.transform(X_test)
colors = ['blue', 'green', 'red', 'cyan',
'magenta', 'yellow', 'black',
'pink', 'lightgreen', 'lightblue',
'gray', 'indigo', 'orange']
columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue', 'OD280/OD315 of diluted wines',
'Proline']
weights, params = [], []
for c in np.arange(-4., 6.):
lr = LogisticRegression(
solver='liblinear',
penalty='l1',
C=10.**c,
random_state=0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(10**c)
weights = np.array(weights)
plt.style.use('ggplot')
mpl.rcParams['image.cmap'] = 'viridis'
mpl.rcParams['font.serif'] = 'Source Han Serif'
mpl.rcParams['font.sans-serif'] = 'Source Han Sans'
fig = plt.figure(figsize=(6, 3.2))
ax = plt.subplot(111)
for column, color in zip(range(weights.shape[1]), colors):
plt.plot(params, weights[:, column],
label=columns[column + 1],
color=color)
plt.axhline(0, color='black', linestyle='--', linewidth=3)
plt.xlim([10**(-5), 10**5])
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.xscale('log')
plt.legend(loc='upper left')
ax.legend(loc='upper center',
bbox_to_anchor=(1.4, 1),
ncol=1, fancybox=True)
plt.savefig('wine-lr-regularization.png', dpi=300,
bbox_inches='tight', pad_inches=0.2)
|
# -*- coding: utf-8 -*-
#
# __init__.py
#
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# SPDX-License-Identifier: MIT-0
#
#
__author__ = "Rafael M. Koike"
__email__ = "koiker@amazon.com"
__date__ = "2016-11-14"
__version__ = '0.1.5'
|
"""
Given a string, find the length of the longest substring without repeating characters.
Examples:
Given "abcabcbb", the answer is "abc", which the length is 3.
Given "bbbbb", the answer is "b", with the length of 1.
Given "pwwkew", the answer is "wke", with the length of 3.
Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
"""
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
length = len(s)
if length <= 1:
return length
# Solution 1
# res = 1
# slow_index = 0
# for i in range(1, length):
# if s[i] in s[slow_index:i]:
# slow_index += s[slow_index:i+1].index(s[i]) + 1
# if res < i - slow_index + 1:
# res = i - slow_index + 1
# return res
# Solution 2
res = 1
character_index_mapping = {}
slow_index = 0
character_index_mapping[s[0]] = 0
for i in range(1, length):
if character_index_mapping.get(s[i], -1) > -1 and character_index_mapping.get(s[i]) >= slow_index:
if res < i - slow_index:
res = i - slow_index
slow_index = character_index_mapping.get(s[i]) + 1
else:
if res < i - slow_index + 1:
res = i - slow_index + 1
character_index_mapping[s[i]] = i
return res
|
from sklearn import datasets
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn import svm
from sklearn.model_selection import train_test_split
cancer = datasets.load_breast_cancer()
print("Features: ", cancer.feature_names)
print("Labels: ", cancer.target_names)
cancer.data.shape
print(cancer.data[0:5])
print(cancer.target)
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, test_size=0.3, random_state=109)
clf = svm.SVC(kernel = 'linear')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
report_svc = classification_report(y_test, y_pred)
print(report_svc)
print("Precision:",metrics.precision_score(y_test, y_pred))
print("Recall:",metrics.recall_score(y_test, y_pred))
print(metrics.confusion_matrix(y_test, y_pred))
|
import numpy as np
import init
import image_service
import image_draw
import perceptron
import noise_generation
N = 36
H = 30
M = 5
ALPHA = 1
BETA = 1
D = 0.02
letter_map = {
'K': 0,
'L': 1,
'O': 2,
'T': 3,
'U': 4
}
letter_image_map = {}
letter_vector_map = {}
for letter in letter_map:
letter_image_map[letter] = image_service.get_image_from_file(letter)
for letter in letter_map:
letter_vector_map[letter] = init.get_y_vector(letter_map[letter], 5)
inner_hidden_weights = init.init_weights(H, N)
hidden_threshold = init.init_threshold(H)
hidden_neurons = np.zeros(H)
hidden_outer_weights = init.init_weights(M, H)
outer_threshold = init.init_threshold(M)
outer_neurons = np.zeros(M)
def calculate_hidden_neuron_sum(index, image):
return perceptron.sum_function(inner_hidden_weights[index],
image, hidden_threshold[index])
def calculate_outer_neuron_sum(index):
return perceptron.sum_function(hidden_outer_weights[index], hidden_neurons,
outer_threshold[index])
def calculate_hidden_activations(image):
for index in range(H):
hidden_neurons[index] = perceptron.activation_function(calculate_hidden_neuron_sum(index, image))
def calculate_outer_activations():
for index in range(M):
outer_neurons[index] = perceptron.activation_function(calculate_outer_neuron_sum(index))
def get_image_activations(image):
calculate_hidden_activations(image)
calculate_outer_activations()
mistake_vector = [1, 1, 1, 1, 1]
def recalculate_hidden_outer_weights():
for outer_index in range(M):
for hidden_index in range(H):
hidden_outer_weights[outer_index][hidden_index] += ALPHA * \
outer_neurons[outer_index] * \
(1 - outer_neurons[outer_index]) * \
mistake_vector[outer_index] * \
hidden_neurons[hidden_index]
def recalculate_outer_threshold():
for outer_index in range(M):
outer_threshold[outer_index] += ALPHA * \
outer_neurons[outer_index] * \
(1 - outer_neurons[outer_index]) * \
mistake_vector[outer_index]
def calculate_error_for_hidden_weight(index):
return sum(list(map(lambda i: mistake_vector[i] * outer_neurons[i] * \
(1 - outer_neurons[i]) * hidden_outer_weights[i][index], range(M))))
def recalculate_inner_hidden_weights(letter):
for hidden_index in range(H):
for inner_index in range(N):
inner_hidden_weights[hidden_index][inner_index] += BETA * \
hidden_neurons[hidden_index] * \
(1 - hidden_neurons[hidden_index]) * \
calculate_error_for_hidden_weight(hidden_index) * \
letter_image_map[letter][inner_index]
def recalculate_hidden_threshold():
for hidden_index in range(H):
hidden_threshold[hidden_index] += BETA * \
hidden_neurons[hidden_index] * \
(1 - hidden_neurons[hidden_index]) * \
calculate_error_for_hidden_weight(hidden_index)
def back_propagation(letter):
global mistake_vector
mistake_vector = np.subtract(letter_vector_map[letter], outer_neurons)
recalculate_hidden_outer_weights()
recalculate_outer_threshold()
recalculate_inner_hidden_weights(letter)
recalculate_hidden_threshold()
# for letter in letter_map:
# noise_generation.generate_noise_for_single_image(letter)
for letter in letter_image_map:
image_draw.show_image(letter_image_map[letter])
while abs(np.amax(mistake_vector)) >= D:
print(abs(np.amax(mistake_vector)))
for letter in letter_map:
get_image_activations(letter_image_map[letter])
back_propagation(letter)
print(np.amax(mistake_vector), ' - Teaching finished')
while True:
image = image_service.get_noised_image_from_file(input('\nLetter: '), int(input('Noise: ')))
image_draw.show_image(image)
get_image_activations(image)
print(outer_neurons)
|
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
import launch_ros.actions
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
from launch.conditions import IfCondition
def generate_launch_description():
use_sim_time = LaunchConfiguration('use_sim_time', default='false')
rviz_config_dir = os.path.join(
get_package_share_directory('neuronbot2_slam'),
'rviz',
'slam.rviz')
remappings = [('/tf', 'tf'),
('/tf_static', 'tf_static')]
param_substitutions = {
'use_sim_time': use_sim_time,
'scan_topic': 'scan',
'base_frame': 'base_link',
'odom_frame': 'odom',
'map_frame': 'map',
'map_update_interval': 3.0,
'maxUrange': 10.0,
'sigma': 0.05,
'kernelSize': 1,
'lstep': 0.05,
'astep': 0.05,
'iterations': 5,
'lsigma': 0.075,
'ogain': 3.0,
'lskip': 0,
'srr': 0.1,
'srt': 0.2,
'str': 0.1,
'stt': 0.2,
'linearUpdate': 0.3,
'angularUpdate': 3.14,
'temporalUpdate': 5.0,
'resampleThreshold': 0.5,
'particles': 30,
'xmin': -15.0,
'ymin': -15.0,
'xmax': 15.0,
'ymax': 15.0,
'delta': 0.025,
'llsamplerange': 0.01,
'llsamplestep': 0.01,
'lasamplerange': 0.005,
'lasamplestep': 0.005,
}
return LaunchDescription([
DeclareLaunchArgument(
'open_rviz',
default_value='false',
description='open rviz'),
launch_ros.actions.Node(
package='slam_gmapping',
executable='slam_gmapping',
parameters=[param_substitutions],
output='screen'),
Node(
package='rviz2',
executable='rviz2',
name='rviz2',
arguments=['-d', rviz_config_dir],
parameters=[{'use_sim_time': use_sim_time}],
condition=IfCondition(LaunchConfiguration("open_rviz")),
remappings=remappings
),
])
|
class Interval:
type_name = {0:'tick',
1:'m1',
2:'m5'}
name_type = {v:k for k,v in type_name.items()}
names = list(name_type.keys())
def __init__(self, value='m1'):
if value not in self.names:
return AssertionError, 'Interval value not exists'
self.type = self.name_type[value]
def get_name(self):
return self.type_name[self.type]
def get_type(self):
return self.type
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.utilities.calvinlogger import get_actor_logger
from calvin.actor.actor import Actor, manage, condition, calvinlib
_log = get_actor_logger(__name__)
class RandomInteger(Actor):
"""
Produce random integer in range [lower ... upper-1]
Inputs:
trigger : Any token
Outputs:
integer : Random integer in range [lower ... upper-1]
"""
@manage(['lower', 'upper'])
def init(self, lower, upper):
self.lower = lower
self.upper = upper
self.setup()
def setup(self):
self.rng = calvinlib.use("math.random")
def did_migrate(self):
self.setup()
@condition(action_input=['trigger'], action_output=['integer'])
def action(self, trigger):
return self.rng.random_integer(lower=self.lower, upper=self.upper),
action_priority = (action, )
requires = ['math.random']
test_kwargs = {'lower': 1, 'upper': 2}
test_set = [
{
'inports': {'trigger': [True, 1, "a"]},
'outports': {'integer': [1, 1, 1]}
}
]
|
#!/usr/bin/env python3
# std import
import sys
import string
import pkgutil
import configparser
# string-utils import
import string_utils
# project import
from . import logger
from .abcstep import StepStat
def main(config_path):
""" Main function of programme read configuration and run enable step """
config = configparser.ConfigParser(interpolation =
configparser.ExtendedInterpolation())
config.read(config_path)
logger.setup_logging(**config)
for step_name in config['options']['steps'].split(","):
if step_name in [modname for importer, modname, ispkg in
pkgutil.iter_modules(
sys.modules[__package__].__path__)]:
appli_name = config[step_name]['name']
module_step = __import__(".".join(["pysingcells", step_name]),
fromlist="pysingcells")
if appli_name.lower() in [modname for importer, modname, ispkg in
pkgutil.iter_modules(module_step.__path__)]:
appli_module = __import__(".".join(["pysingcells", step_name,
appli_name.lower()]),
fromlist=".".join(["pysingcells",
step_name]))
appli_class = getattr(appli_module,
snake_case_to_capword(appli_name))
appli_instance = appli_class()
appli_instance.read_configuration(**config)
if appli_instance.check_configuration():
appli_instance.run()
if appli_instance.state != StepStat.succes:
logger.log.warning(appli_name + " failled current run see log file")
exit(1)
else:
logger.log.warning(appli_name +
" failled in check her conf")
else:
logger.log.warning(appli_name +
" isn't avaible in pysingcells." + step_name)
else:
logger.log.warning(step_name + " isn't avaible in pysingcells")
def snake_case_to_capword(base):
if not string_utils.is_snake_case(base) or base.isupper():
base = "".join(c.upper() if i == 0 else c for i, c in enumerate(base))
return base
return string_utils.snake_case_to_camel(base).title()
if __name__ == "__main__":
main(sys.argv[1])
|
from .app.models import Category, Article
from .weakrefs import IdMapWeakRefsTests
class IdMapStrongRefsTests(IdMapWeakRefsTests):
# derives from tests with weak refs
# all tests should pass except CachedToRegular, where the expected
# result is the contrary
@classmethod
def setUpClass(cls):
Category._meta.use_strong_refs = True
Article._meta.use_strong_refs = True
super(IdMapStrongRefsTests, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(IdMapStrongRefsTests, cls).tearDownClass()
# restore defaults
Category._meta.use_strong_refs = False
Article._meta.use_strong_refs = False
def test_cached_to_regular(self):
# overrides a test in IdMapWeakRefsTests
# the expected result is that the category objects are the same
# indeed, the reference to the articles is not weak anymore and they
# are kept in memory after setUp. They are only erased when calling
# flush
article_list = Article.objects.all().select_related('category')
last_article = article_list[0]
for article in article_list[1:]:
self.assertIs(article.category2, last_article.category2)
last_article = article
|
import datetime
import http.client
import mock
from rdr_service.clock import FakeClock
from rdr_service.code_constants import CONSENT_PERMISSION_YES_CODE, RACE_NONE_OF_THESE_CODE
from rdr_service.dao.biobank_order_dao import BiobankOrderDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.model.biobank_order import (
BiobankOrderHistory,
BiobankOrderIdentifierHistory,
BiobankOrderedSampleHistory,
)
from rdr_service.model.participant import Participant
from rdr_service.model.utils import from_client_participant_id, to_client_participant_id
from rdr_service.participant_enums import OrderStatus, UNSET_HPO_ID
from tests.api_tests.test_participant_summary_api import _add_code_answer
from tests.helpers.unittest_base import BaseTestCase
from tests.test_data import load_biobank_order_json, load_measurement_json
TIME_1 = datetime.datetime(2016, 1, 1)
TIME_2 = datetime.datetime(2016, 1, 2)
TIME_3 = datetime.datetime(2016, 1, 3)
TIME_4 = datetime.datetime(2016, 1, 4)
TIME_5 = datetime.datetime(2016, 1, 5, 0, 1)
TIME_6 = datetime.datetime(2015, 1, 1)
class BiobankOrderApiTest(BaseTestCase):
def setUp(self):
super().setUp()
self.participant = Participant(participantId=123, biobankId=555)
self.participant_dao = ParticipantDao()
self.participant_dao.insert(self.participant)
self.summary_dao = ParticipantSummaryDao()
self.bio_dao = BiobankOrderDao()
self.path = "Participant/%s/BiobankOrder" % to_client_participant_id(self.participant.participantId)
self.mayolink_response = {
"orders": {
"order": {
"status": "Queued",
"reference_number": "somebarcodenumber",
"received": "2016-12-01T12:00:00-05:00",
"number": "WEB1ABCD1234",
"patient": {"medical_record_number": "PAT-123-456"},
}
}
}
mayolinkapi_patcher = mock.patch(
"rdr_service.dao.biobank_order_dao.MayoLinkApi",
**{"return_value.post.return_value": self.mayolink_response}
)
mayolinkapi_patcher.start()
self.addCleanup(mayolinkapi_patcher.stop)
@mock.patch('rdr_service.dao.biobank_order_dao.get_account_origin_id')
def test_create_quest_order(self, quest_origin):
quest_origin.return_value = 'careevolution'
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="quest_biobank_order_1.json")
result = self.send_post(self.path, order_json)
self.assertEqual(result['id'], 'WEB1ABCD1234')
self.assertEqual(result['collectedInfo']['address'], {'city': 'Little Rock', 'line': ['address1', 'address2'],
'postalCode': '72205-5302', 'state': 'AR'})
@mock.patch('rdr_service.dao.biobank_order_dao.get_account_origin_id')
def test_update_biobank_order_from_different_origin(self, quest_origin):
quest_origin.return_value = 'careevolution'
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="quest_biobank_order_1.json")
result = self.send_post(self.path, order_json)
self.assertEqual(result['id'], 'WEB1ABCD1234')
quest_origin.return_value = 'hpro'
update_path = self.path + "/" + 'WEB1ABCD1234'
update_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
update_json['identifier'][1]['value'] = 'WEB1ABCD1234'
self.send_put(update_path, request_data=update_json, headers={"If-Match": 'W/"1"'},
expected_status=http.client.BAD_REQUEST)
@mock.patch('rdr_service.dao.biobank_order_dao.get_account_origin_id')
def test_get_orders_by_participant_id(self, quest_origin):
quest_origin.return_value = 'careevolution'
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="quest_biobank_order_1.json")
self.send_post(self.path, order_json)
quest_origin.return_value = 'hpro'
order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
self.send_post(self.path, order_json)
get_path = "Participant/%s/BiobankOrder" % to_client_participant_id(self.participant.participantId)
result = self.send_get(get_path)
self.assertEqual(result['total'], 2)
self.assertEqual(len(result['data']), 2)
if result['data'][0]['origin'] == 'careevolution':
self.assertEqual(result['data'][1]['origin'], 'hpro')
else:
self.assertEqual(result['data'][1]['origin'], 'careevolution')
@mock.patch('rdr_service.dao.biobank_order_dao.get_account_origin_id')
def test_get_orders_by_kit_id(self, quest_origin):
quest_origin.return_value = 'careevolution'
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="quest_biobank_order_1.json")
self.send_post(self.path, order_json)
quest_origin.return_value = 'hpro'
order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
self.send_post(self.path, order_json)
get_path = "BiobankOrder?kitId=KIT-12345678"
result = self.send_get(get_path)
self.assertEqual(result['total'], 1)
self.assertEqual(len(result['data']), 1)
self.assertEqual(result['data'][0]['origin'], 'careevolution')
@mock.patch('rdr_service.dao.biobank_order_dao.get_account_origin_id')
def test_get_orders_by_time_range(self, quest_origin):
quest_origin.return_value = 'careevolution'
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="quest_biobank_order_1.json")
self.send_post(self.path, order_json)
quest_origin.return_value = 'hpro'
order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
self.send_post(self.path, order_json)
p2 = Participant(participantId=456, biobankId=666)
self.participant_dao.insert(p2)
self.summary_dao.insert(self.participant_summary(p2))
order_json = load_biobank_order_json(p2.participantId, filename="biobank_order_2.json")
p2_path = "Participant/%s/BiobankOrder" % to_client_participant_id(p2.participantId)
self.send_post(p2_path, order_json)
p3 = Participant(participantId=789, biobankId=777)
self.participant_dao.insert(p3)
self.summary_dao.insert(self.participant_summary(p3))
order_json = load_biobank_order_json(p3.participantId, filename="biobank_order_2.json")
p3_path = "Participant/%s/BiobankOrder" % to_client_participant_id(p3.participantId)
self.send_post(p3_path, order_json)
p4 = Participant(participantId=1244, biobankId=888)
self.participant_dao.insert(p4)
self.summary_dao.insert(self.participant_summary(p4))
order_json = load_biobank_order_json(p4.participantId, filename="biobank_order_4.json")
p4_path = "Participant/%s/BiobankOrder" % to_client_participant_id(p4.participantId)
self.send_post(p4_path, order_json)
get_path = "BiobankOrder?origin=hpro&startDate=2016-01-04&endDate=2016-01-05&page=1&pageSize=2"
result = self.send_get(get_path)
self.assertEqual(result['total'], 3)
self.assertEqual(len(result['data']), 2)
self.assertIn(result['data'][0]['biobankId'][1:], ('555', '666'))
get_path = "BiobankOrder?origin=hpro&startDate=2016-01-03&endDate=2016-01-04&page=2&pageSize=2"
result = self.send_get(get_path)
self.assertEqual(result['total'], 3)
self.assertEqual(len(result['data']), 1)
get_path = "BiobankOrder?origin=hpro&startDate=2016-01-03&endDate=2016-01-04&page=3&pageSize=2"
result = self.send_get(get_path)
self.assertEqual(result['total'], 3)
self.assertEqual(len(result['data']), 0)
get_path = "BiobankOrder?origin=hpro&startDate=2019-12-03&endDate=2019-12-04&page=1&pageSize=2"
result = self.send_get(get_path)
self.assertEqual(result['total'], 0)
self.assertEqual(len(result['data']), 0)
get_path = "BiobankOrder?origin=careevolution&startDate=2019-12-03&endDate=2019-12-04&page=1&pageSize=2"
result = self.send_get(get_path)
self.assertEqual(result['total'], 1)
self.assertEqual(len(result['data']), 1)
get_path = "BiobankOrder?origin=careevolution&state=AR&page=1&pageSize=2"
result = self.send_get(get_path)
self.assertEqual(result['total'], 1)
self.assertEqual(len(result['data']), 1)
get_path = "BiobankOrder?origin=careevolution&state=TN&page=1&pageSize=2"
result = self.send_get(get_path)
self.assertEqual(result['total'], 0)
self.assertEqual(len(result['data']), 0)
def test_cancel_order(self):
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
result = self.send_post(self.path, order_json)
full_order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_1.json")
_strip_fields(result)
_strip_fields(full_order_json)
self.assertEqual(full_order_json, result)
biobank_order_id = result["identifier"][1]["value"]
path = self.path + "/" + biobank_order_id
request_data = {
"amendedReason": "Its all wrong",
"cancelledInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "fred@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-monroeville"},
},
"status": "cancelled",
}
cancelled_order = self.send_patch(path, request_data=request_data, headers={"If-Match": 'W/"1"'})
get_cancelled_order = self.send_get(path)
get_summary = self.summary_dao.get(self.participant.participantId)
self.assertEqual(get_summary.biospecimenSourceSiteId, None)
self.assertEqual(get_summary.biospecimenCollectedSiteId, None)
self.assertEqual(get_summary.biospecimenOrderTime, None)
self.assertEqual(get_summary.biospecimenStatus, None)
self.assertEqual(get_summary.biospecimenFinalizedSiteId, None)
self.assertEqual(get_summary.biospecimenProcessedSiteId, None)
self.assertEqual(get_summary.sampleOrderStatus2ED10, None)
self.assertEqual(get_summary.sampleOrderStatus2ED10Time, None)
self.assertEqual(get_summary.sampleStatus2ED10, None)
self.assertEqual(get_summary.sampleStatus2ED10Time, None)
self.assertEqual(get_summary.sampleOrderStatus1PST8, None)
self.assertEqual(get_summary.sampleOrderStatus1PST8Time, None)
self.assertEqual(get_summary.sampleStatus1PST8, None)
self.assertEqual(get_summary.sampleStatus1PST8Time, None)
self.assertEqual(get_summary.sampleOrderStatus1PS08, None)
self.assertEqual(get_summary.sampleOrderStatus1PS08Time, None)
self.assertEqual(get_summary.sampleStatus1PS08, None)
self.assertEqual(get_summary.sampleStatus1PS08Time, None)
self.assertEqual(get_summary.sampleOrderStatus2PST8, None)
self.assertEqual(get_summary.sampleOrderStatus2PST8Time, None)
self.assertEqual(get_summary.sampleStatus2PST8, None)
self.assertEqual(get_summary.sampleStatus2PST8Time, None)
self.assertEqual(get_summary.sampleOrderStatus1PXR2, None)
self.assertEqual(get_summary.sampleOrderStatus1PXR2Time, None)
self.assertEqual(get_summary.sampleStatus1PXR2, None)
self.assertEqual(get_summary.sampleStatus1PXR2Time, None)
self.assertEqual(get_summary.sampleOrderStatus1CFD9, None)
self.assertEqual(get_summary.sampleOrderStatus1CFD9Time, None)
self.assertEqual(get_summary.sampleStatus1CFD9, None)
self.assertEqual(get_summary.sampleStatus1CFD9Time, None)
self.assertEqual(get_summary.sampleOrderStatus1ED02, None)
self.assertEqual(get_summary.sampleOrderStatus1ED02Time, None)
self.assertEqual(get_summary.sampleStatus1ED02, None)
self.assertEqual(get_summary.sampleStatus1ED02Time, None)
self.assertEqual(cancelled_order, get_cancelled_order)
self.assertEqual(get_cancelled_order["status"], "CANCELLED")
self.assertEqual(get_cancelled_order["amendedReason"], "Its all wrong")
self.assertEqual(get_cancelled_order["cancelledInfo"]["author"]["value"], "fred@pmi-ops.org")
self.assertEqual(get_cancelled_order["cancelledInfo"]["site"]["value"], "hpo-site-monroeville")
def test_you_can_not_cancel_a_cancelled_order(self):
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
result = self.send_post(self.path, order_json)
biobank_order_id = result["identifier"][1]["value"]
path = self.path + "/" + biobank_order_id
request_data = {
"amendedReason": "Its all wrong",
"cancelledInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "fred@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-monroeville"},
},
"status": "cancelled",
}
self.send_patch(path, request_data=request_data, headers={"If-Match": 'W/"1"'})
self.send_patch(
path, request_data=request_data, headers={"If-Match": 'W/"2"'}, expected_status=http.client.BAD_REQUEST
)
def test_cancel_one_order_with_another_good_order(self):
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_1.json")
order_json2 = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
order_json2['identifier'][0]['value'] = 'healthpro-order-id-1231234'
order_json2['identifier'][1]['value'] = 'WEB1YLHV1234'
result = self.send_post(self.path, order_json)
self.send_post(self.path, order_json2)
biobank_order_id = result["identifier"][1]["value"]
path = self.path + "/" + biobank_order_id
request_data = {
"amendedReason": "Its all wrong",
"cancelledInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "fred@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-monroeville"},
},
"status": "cancelled",
}
self.send_patch(path, request_data=request_data, headers={"If-Match": 'W/"1"'})
self.send_patch(
path, request_data=request_data, headers={"If-Match": 'W/"2"'}, expected_status=http.client.BAD_REQUEST
)
get_summary = self.summary_dao.get(self.participant.participantId)
self.assertEqual(get_summary.biospecimenSourceSiteId, 1)
self.assertEqual(get_summary.biospecimenCollectedSiteId, 1)
self.assertEqual(get_summary.biospecimenFinalizedSiteId, 2)
def test_you_can_not_restore_a_not_cancelled_order(self):
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
result = self.send_post(self.path, order_json)
biobank_order_id = result["identifier"][1]["value"]
path = self.path + "/" + biobank_order_id
request_data = {
"amendedReason": "Its all wrong",
"restoredInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "fred@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-monroeville"},
},
"status": "restored",
}
self.send_patch(
path, request_data=request_data, headers={"If-Match": 'W/"1"'}, expected_status=http.client.BAD_REQUEST
)
def test_restore_an_order(self):
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
result = self.send_post(self.path, order_json)
full_order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_1.json")
_strip_fields(result)
_strip_fields(full_order_json)
self.assertEqual(full_order_json, result)
biobank_order_id = result["identifier"][1]["value"]
path = self.path + "/" + biobank_order_id
request_data = {
"amendedReason": "Its all wrong",
"cancelledInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "fred@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-monroeville"},
},
"status": "cancelled",
}
self.send_patch(path, request_data=request_data, headers={"If-Match": 'W/"1"'})
request_data = {
"amendedReason": "I didnt mean to cancel",
"restoredInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "fred@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-monroeville"},
},
"status": "restored",
}
self.send_patch(path, request_data=request_data, headers={"If-Match": 'W/"2"'})
restored_order = self.send_get(path)
get_summary = self.summary_dao.get(self.participant.participantId)
self.assertEqual(get_summary.sampleOrderStatus1SST8, OrderStatus.CREATED)
self.assertEqual(get_summary.sampleOrderStatus2ED10, OrderStatus.CREATED)
self.assertEqual(get_summary.sampleOrderStatus1SAL, OrderStatus.CREATED)
self.assertEqual(get_summary.sampleOrderStatus1UR10, OrderStatus.CREATED)
self.assertEqual(get_summary.sampleOrderStatus1CFD9, OrderStatus.FINALIZED)
self.assertEqual(get_summary.sampleOrderStatus1ED02, OrderStatus.FINALIZED)
self.assertEqual(get_summary.sampleOrderStatus2SST8, OrderStatus.FINALIZED)
self.assertEqual(get_summary.sampleOrderStatus2PST8, OrderStatus.FINALIZED)
self.assertEqual(get_summary.biospecimenFinalizedSiteId, 2)
self.assertEqual(restored_order["status"], "UNSET")
self.assertEqual(restored_order["restoredInfo"]["author"]["value"], "fred@pmi-ops.org")
self.assertEqual(restored_order["restoredInfo"]["site"]["value"], "hpo-site-monroeville")
self.assertEqual(restored_order["amendedReason"], "I didnt mean to cancel")
def test_amending_an_order(self):
# pylint: disable=unused-variable
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
result = self.send_post(self.path, order_json)
biobank_order_id = result["identifier"][1]["value"]
path = self.path + "/" + biobank_order_id
request_data = {
"amendedReason": "Its all better",
"amendedInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "fred@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-bannerphoenix"},
},
}
biobank_order_identifiers = {
"created": "2018-02-21T16:25:12",
"createdInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "nobody@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-clinic-phoenix"},
},
}
get_order = self.send_get(path)
full_order = get_order.copy()
full_order.update(request_data)
full_order.update(biobank_order_identifiers)
self.assertEqual(len(full_order["samples"]), 16)
del full_order["samples"][0]
self.send_put(path, request_data=full_order, headers={"If-Match": 'W/"1"'})
get_amended_order = self.send_get(path)
get_summary = self.summary_dao.get(self.participant.participantId)
self.assertEqual(get_summary.biospecimenProcessedSiteId, 1)
self.assertEqual(get_summary.biospecimenFinalizedSiteId, 2)
self.assertEqual(get_summary.biospecimenCollectedSiteId, 1)
self.assertEqual(get_summary.sampleOrderStatus2PST8, OrderStatus.FINALIZED)
self.assertEqual(get_summary.sampleOrderStatus1PS08, OrderStatus.FINALIZED)
self.assertEqual(get_summary.sampleOrderStatus1PST8, OrderStatus.FINALIZED)
self.assertEqual(get_summary.sampleOrderStatus1SST8, OrderStatus.CREATED)
self.assertEqual(get_summary.sampleOrderStatus2ED10, OrderStatus.CREATED)
self.assertEqual(len(get_amended_order["samples"]), 15)
self.assertEqual(get_amended_order["meta"], {"versionId": 'W/"2"'})
self.assertEqual(get_amended_order["amendedReason"], "Its all better")
self.assertEqual(get_amended_order["amendedInfo"]["author"]["value"], "fred@pmi-ops.org")
self.assertEqual(get_amended_order["amendedInfo"]["site"]["value"], "hpo-site-bannerphoenix")
self.assertEqual(get_amended_order["createdInfo"]["site"]["value"], "hpo-site-clinic-phoenix")
self.assertEqual(get_amended_order["createdInfo"]["author"]["value"], "nobody@pmi-ops.org")
self.assertEqual(get_amended_order["created"], "2018-02-21T16:25:12")
self.assertEqual(get_amended_order["status"], "AMENDED")
def test_amend_a_restored_order(self):
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
result = self.send_post(self.path, order_json)
full_order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_1.json")
_strip_fields(result)
_strip_fields(full_order_json)
biobank_order_id = result["identifier"][1]["value"]
path = self.path + "/" + biobank_order_id
request_data = {
"amendedReason": "Its all wrong",
"cancelledInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "fred@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-monroeville"},
},
"status": "cancelled",
}
self.send_patch(path, request_data=request_data, headers={"If-Match": 'W/"1"'})
self.send_get(path)
request_data = {
"amendedReason": "I didnt mean to cancel",
"restoredInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "fred@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-monroeville"},
},
"status": "restored",
}
self.send_patch(path, request_data=request_data, headers={"If-Match": 'W/"2"'})
request_data = {
"amendedReason": "Its all better",
"samples": [
{
"test": "1ED10",
"description": "EDTA 10 mL (1)",
"processingRequired": False,
"collected": "2016-01-04T09:45:49Z",
"finalized": "2016-01-04T10:55:41Z",
},
{
"test": "1PST8",
"description": "Plasma Separator 8 mL",
"collected": "2016-01-04T09:45:49Z",
"processingRequired": True,
"processed": "2016-01-04T10:28:50Z",
"finalized": "2016-01-04T10:55:41Z",
},
],
"amendedInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "mike@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-monroeville"},
},
}
get_order = self.send_get(path)
full_order = get_order.copy()
full_order.update(request_data)
self.send_put(path, request_data=full_order, headers={"If-Match": 'W/"3"'})
get_amended_order = self.send_get(path)
self.assertEqual(len(get_amended_order["samples"]), 2)
self.assertEqual(get_amended_order["amendedInfo"]["author"]["value"], "mike@pmi-ops.org")
self.assertEqual(get_amended_order["status"], "AMENDED")
self.assertEqual(get_amended_order.get("restoredSiteId"), None)
self.assertEqual(get_amended_order.get("restoredUsername"), None)
self.assertEqual(get_amended_order.get("restoredTime"), None)
self.assertEqual(get_amended_order["meta"], {"versionId": 'W/"4"'})
def test_insert_and_refetch(self):
self.summary_dao.insert(self.participant_summary(self.participant))
self.create_and_verify_created_obj(self.path, load_biobank_order_json(self.participant.participantId))
def test_insert_new_order(self):
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
result = self.send_post(self.path, order_json)
full_order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_1.json")
_strip_fields(result)
_strip_fields(full_order_json)
self.assertEqual(full_order_json, result)
# check order origin
biobank_order_id = result["identifier"][1]["value"]
get_path = "Participant/{}/BiobankOrder/{}".format(to_client_participant_id(123), biobank_order_id)
get_result = self.send_get(get_path)
self.assertEqual(get_result["origin"], "example")
def test_biobank_history_on_insert(self):
with self.bio_dao.session() as session:
self.summary_dao.insert(self.participant_summary(self.participant))
order_json = load_biobank_order_json(self.participant.participantId, filename="biobank_order_2.json")
result = self.send_post(self.path, order_json)
load_biobank_order_json(self.participant.participantId, filename="biobank_order_1.json")
order_history = session.query(BiobankOrderHistory).first()
identifier_history = session.query(BiobankOrderIdentifierHistory).first()
sample_history = session.query(BiobankOrderedSampleHistory).first()
all_samples_history = session.query(BiobankOrderedSampleHistory).all()
self.assertEqual(result["id"], order_history.biobankOrderId)
self.assertEqual(identifier_history.biobankOrderId, result["id"])
self.assertEqual(sample_history.biobankOrderId, result["id"])
self.assertEqual(result["meta"]["versionId"], 'W/"1"')
self.assertEqual(order_history.version, 1)
self.assertEqual(len(all_samples_history), 16)
# Test history on updates...
biobank_order_id = result["identifier"][1]["value"]
path = self.path + "/" + biobank_order_id
request_data = {
"amendedReason": "Its all better",
"amendedInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "fred@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-bannerphoenix"},
},
}
biobank_order_identifiers = {
"created": "2018-02-21T16:25:12",
"createdInfo": {
"author": {"system": "https://www.pmi-ops.org/healthpro-username", "value": "nobody@pmi-ops.org"},
"site": {"system": "https://www.pmi-ops.org/site-id", "value": "hpo-site-clinic-phoenix"},
},
}
get_order = self.send_get(path)
full_order = get_order.copy()
full_order.update(request_data)
full_order.update(biobank_order_identifiers)
self.assertEqual(len(full_order["samples"]), 16)
del full_order["samples"][0]
self.send_put(path, request_data=full_order, headers={"If-Match": 'W/"1"'})
with self.bio_dao.session() as session:
amended_order = self.send_get(path)
second_order_history = session.query(BiobankOrderHistory).filter_by(version=2).first()
second_order_samples = session.query(BiobankOrderedSampleHistory).filter_by(version=2).first()
second_order_identifier = session.query(BiobankOrderIdentifierHistory).filter_by(version=2).first()
self.assertEqual(second_order_history.biobankOrderId, amended_order["id"])
self.assertEqual(second_order_identifier.biobankOrderId, amended_order["id"])
self.assertEqual(second_order_samples.biobankOrderId, amended_order["id"])
# Check that original order hasn't changed in history
original = session.query(BiobankOrderHistory).filter_by(version=1).first()
self.assertEqual(original.asdict(), order_history.asdict())
def test_error_no_summary(self):
order_json = load_biobank_order_json(self.participant.participantId)
self.send_post(self.path, order_json, expected_status=http.client.BAD_REQUEST)
def test_error_missing_required_fields(self):
order_json = load_biobank_order_json(self.participant.participantId)
del order_json["identifier"]
self.send_post(self.path, order_json, expected_status=http.client.BAD_REQUEST)
def test_no_duplicate_test_within_order(self):
order_json = load_biobank_order_json(self.participant.participantId)
order_json["samples"].extend(list(order_json["samples"]))
self.send_post(self.path, order_json, expected_status=http.client.BAD_REQUEST)
def test_auto_pair_updates_participant_and_summary(self):
self.summary_dao.insert(self.participant_summary(self.participant))
# Sanity check: No HPO yet.
p_unpaired = self.participant_dao.get(self.participant.participantId)
self.assertEqual(p_unpaired.hpoId, UNSET_HPO_ID)
self.assertIsNone(p_unpaired.providerLink)
s_unpaired = self.summary_dao.get(self.participant.participantId)
self.assertEqual(s_unpaired.hpoId, UNSET_HPO_ID)
self.send_post(self.path, load_biobank_order_json(self.participant.participantId))
# Some HPO has been set. (ParticipantDao tests cover more detailed cases / specific values.)
p_paired = self.participant_dao.get(self.participant.participantId)
self.assertNotEqual(p_paired.hpoId, UNSET_HPO_ID)
self.assertIsNotNone(p_paired.providerLink)
s_paired = self.summary_dao.get(self.participant.participantId)
self.assertNotEqual(s_paired.hpoId, UNSET_HPO_ID)
self.assertEqual(s_paired.biospecimenCollectedSiteId, s_paired.siteId)
self.assertNotEqual(s_paired.biospecimenCollectedSiteId, s_paired.biospecimenFinalizedSiteId)
self.assertNotEqual(s_paired.siteId, s_paired.physicalMeasurementsCreatedSiteId)
self.assertNotEqual(s_paired.siteId, s_paired.physicalMeasurementsFinalizedSiteId)
def test_not_pairing_at_pm_when_has_bio(self):
self.participant_id = self.create_participant()
_id = int(self.participant_id[1:])
self.path = "Participant/%s/BiobankOrder" % to_client_participant_id(_id)
pid_numeric = from_client_participant_id(self.participant_id)
self.send_consent(self.participant_id)
self.send_post(self.path, load_biobank_order_json(pid_numeric))
participant_paired = self.summary_dao.get(pid_numeric)
self.assertEqual(participant_paired.siteId, participant_paired.biospecimenCollectedSiteId)
self.path = "Participant/%s/PhysicalMeasurements" % to_client_participant_id(pid_numeric)
self._insert_measurements(datetime.datetime.utcnow().isoformat())
self.assertNotEqual(participant_paired.siteId, participant_paired.physicalMeasurementsFinalizedSiteId)
def test_bio_after_cancelled_pm(self):
self.participant_id = self.create_participant()
self.send_consent(self.participant_id)
measurement = load_measurement_json(self.participant_id)
measurement2 = load_measurement_json(self.participant_id)
# send both PM's
pm_path = "Participant/%s/PhysicalMeasurements" % self.participant_id
response = self.send_post(pm_path, measurement)
self.send_post(pm_path, measurement2)
# cancel the 1st PM
pm_path = pm_path + "/" + response["id"]
cancel_info = self.get_restore_or_cancel_info()
self.send_patch(pm_path, cancel_info)
# set up questionnaires to hit the calculate_max_core_sample_time in participant summary
questionnaire_id = self.create_questionnaire("questionnaire3.json")
questionnaire_id_1 = self.create_questionnaire("all_consents_questionnaire.json")
questionnaire_id_2 = self.create_questionnaire("questionnaire4.json")
self._submit_consent_questionnaire_response(
self.participant_id, questionnaire_id_1, CONSENT_PERMISSION_YES_CODE, time=TIME_6
)
self.submit_questionnaire_response(
self.participant_id, questionnaire_id, RACE_NONE_OF_THESE_CODE, None, None, datetime.date(1978, 10, 10)
)
self._submit_empty_questionnaire_response(self.participant_id, questionnaire_id_2)
# send a biobank order
_id = int(self.participant_id[1:])
self.path = "Participant/%s/BiobankOrder" % to_client_participant_id(_id)
pid_numeric = from_client_participant_id(self.participant_id)
self.send_post(self.path, load_biobank_order_json(pid_numeric))
# fetch participant summary
ps = self.send_get("ParticipantSummary?participantId=%s" % _id)
self.assertTrue(ps["entry"][0]["resource"]["physicalMeasurementsFinalizedTime"])
self.assertEqual(ps["entry"][0]["resource"]["physicalMeasurementsFinalizedSite"], "hpo-site-bannerphoenix")
self.assertIsNotNone("biobankId", ps["entry"][0]["resource"])
def _insert_measurements(self, now=None):
measurements_1 = load_measurement_json(self.participant_id, now)
path_1 = "Participant/%s/PhysicalMeasurements" % self.participant_id
self.send_post(path_1, measurements_1)
def _submit_consent_questionnaire_response(
self, participant_id, questionnaire_id, ehr_consent_answer, time=TIME_1
):
code_answers = []
_add_code_answer(code_answers, "ehrConsent", ehr_consent_answer)
qr = self.make_questionnaire_response_json(participant_id, questionnaire_id, code_answers=code_answers)
with FakeClock(time):
self.send_post("Participant/%s/QuestionnaireResponse" % participant_id, qr)
def _submit_empty_questionnaire_response(self, participant_id, questionnaire_id, time=TIME_1):
qr = self.make_questionnaire_response_json(participant_id, questionnaire_id)
with FakeClock(time):
self.send_post("Participant/%s/QuestionnaireResponse" % participant_id, qr)
def _strip_fields(order_json):
if order_json.get("created"):
del order_json["created"]
if order_json.get("id"):
del order_json["id"]
if order_json.get("origin"):
del order_json["origin"]
if order_json.get("version"):
del order_json["version"]
for sample in order_json["samples"]:
if sample.get("collected"):
del sample["collected"]
if sample.get("processed"):
del sample["processed"]
if sample.get("finalized"):
del sample["finalized"]
|
import os
from scipy import io as sio
import numpy as np
import tensorflow as tf
import plotly.graph_objs as go
NUM_POINTS = 31
METER_SCALER = 0.001
CONNECTIONS = ((0, 1), (1, 2), (2, 3), (3, 4), (3, 5), (0, 6), (6, 7), (7, 8),
(8, 9), (8, 10), (0, 11), (11, 12), (12, 13), (13, 14), (14, 15),
(15, 16), (13, 24), (24, 25), (25, 26), (26, 27), (27, 30),
(27, 28), (27, 29), (13, 17), (17, 18), (18, 19), (19, 20),
(20, 21), (20, 22), (20, 23))
BLUE = "rgb(90, 130, 238)"
RED = "rgb(205, 90, 76)"
path = {"tfrecords": "/dataset/chenk/cmu-mocap/tfrecords/"}
def _parse_function(example_proto):
"""Parses raw bytes into tensors."""
features = {
"points3d_raw": tf.FixedLenFeature((), tf.string, default_value=""),
"points2d_raw": tf.FixedLenFeature((), tf.string, default_value=""),
}
parsed_features = tf.parse_single_example(example_proto, features)
output_features = {
"points3d": tf.reshape(
tf.decode_raw(parsed_features["points3d_raw"], tf.float32),
[NUM_POINTS, 3],
),
"points2d": tf.reshape(
tf.decode_raw(parsed_features["points2d_raw"], tf.float32),
[NUM_POINTS, 2],
),
}
# Returns a tuple (features, labels)
return output_features, 0
def train_input_fn(filename, buffer_size, batch_size):
"""An input function for training."""
dataset = tf.data.TFRecordDataset(filename)
dataset = dataset.map(_parse_function)
dataset = dataset.repeat()
dataset = dataset.shuffle(buffer_size)
dataset = dataset.batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
def eval_input_fn(filename, batch_size):
"""An input function for evaluation."""
dataset = tf.data.TFRecordDataset(filename)
dataset = dataset.map(_parse_function)
dataset = dataset.batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
def get_trace3d(points3d, point_color=None, line_color=None, name="PointCloud"):
"""Yields plotly traces for visualization."""
if point_color is None:
point_color = "rgb(30, 20, 160)"
if line_color is None:
line_color = "rgb(30, 20, 160)"
# Trace of points.
trace_of_points = go.Scatter3d(
x=points3d[:, 0],
y=points3d[:, 2],
z=points3d[:, 1],
mode="markers",
name=name,
marker=dict(
symbol="circle",
size=3,
color=point_color))
# Trace of lines.
xlines = []
ylines = []
zlines = []
for line in CONNECTIONS:
for point in line:
xlines.append(points3d[point, 0])
ylines.append(points3d[point, 2])
zlines.append(points3d[point, 1])
xlines.append(None)
ylines.append(None)
zlines.append(None)
trace_of_lines = go.Scatter3d(
x=xlines,
y=ylines,
z=zlines,
mode="lines",
name=name,
line=dict(color=line_color))
return [trace_of_points, trace_of_lines]
def get_figure3d(points3d, gt=None, range_scale=1):
"""Yields plotly fig for visualization"""
traces = get_trace3d(points3d, BLUE, BLUE, "prediction")
if gt is not None:
traces += get_trace3d(gt, RED, RED, "groundtruth")
layout = go.Layout(
scene=dict(
aspectratio=dict(x=0.8,
y=0.8,
z=2),
xaxis=dict(range=(-0.4 * range_scale, 0.4 * range_scale),),
yaxis=dict(range=(-0.4 * range_scale, 0.4 * range_scale),),
zaxis=dict(range=(-1 * range_scale, 1 * range_scale),),),
width=700,
margin=dict(r=20, l=10, b=10, t=10))
return go.Figure(data=traces, layout=layout)
|
import datetime
import threading
import time
import mraa
import InternationalMorseCode as ICM
BASE_TIME_SECONDS = 1.0
TOLERANCE = BASE_TIME_SECONDS / 2.0
# Initialize GPIO settings
def initialize_gpio():
global metronomeLED
metronomeLED = mraa.Gpio(27) #Metronome
metronomeLED.dir(mraa.DIR_OUT)
metronomeLED.write(0)
global button
button = mraa.Gpio(29)
button.dir(mraa.DIR_IN)
# Blink a blue LED on/off (one full cycle per BASE_TIME_SECONDS)
def metronome():
while True:
metronomeLED.write(not metronomeLED.read())
time.sleep(BASE_TIME_SECONDS / 2.0)
#Create a new thread for metronome
def initialize_metronome():
t = threading.Thread(target=metronome)
t.daemon = True
t.start()
last_edge = 0
press = datetime.datetime.now()
release = datetime.datetime.now()
# Intercept a rise or fall on pin 31 (button press/release)
def intercept_morse_code():
global last_edge, press, release, button
while True:
# Button pressed - determine if start of new letter/word
if int(button.read()) == 1 and last_edge == 0:
last_edge = 1
press = datetime.datetime.now()
detect_termination()
# Button released - determine what the input is
elif int(button.read()) == 0 and last_edge == 1:
last_edge = 0
release = datetime.datetime.now()
interpret_input()
#Create a thread to detect button presses.
def initialize_button():
t = threading.Thread(target=intercept_morse_code)
t.daemon = True
t.start()
sequence = ""
letters = []
words = []
# Detect whether most recent button press is start of new letter or word
def detect_termination():
global sequence
if sequence == "":
return
delta = calc_delta_in_sec(release, press)
# Check for start of new letter (gap equal to 3 dots)
if (delta >= ((BASE_TIME_SECONDS * 3) - TOLERANCE)) and (delta <= ((BASE_TIME_SECONDS * 4) + TOLERANCE)):
process_letter()
# Check for start of new word (gap equal to 7 dots - but assume anything > 7 dots is valid too)
elif delta >= ((BASE_TIME_SECONDS * 7) - TOLERANCE):
process_word()
# Process letter
def process_letter():
global sequence
character = ICM.symbols.get(sequence, '')
if character != '':
print("Interpreted sequence " + sequence + " as the letter: " + character)
letters.append(character)
sequence = ""
return True
else:
print('Invalid sequence: ' + sequence + " (deleting current sequence)")
sequence = ""
return False
# Process word
def process_word():
if process_letter():
word = ''.join(letters)
letters[:] = []
if word == "AR":
print("End of transmission. Here's your message: " + ' '.join(words))
print('\nClearing previous transmission. Start a new one now...\n')
words[:] = []
else:
words.append(word)
# Interpret button click (press/release) as dot, dash or unrecognized
def interpret_input():
global sequence
delta = calc_delta_in_sec(press, release)
if (delta >= (BASE_TIME_SECONDS - TOLERANCE)) and (delta <= (BASE_TIME_SECONDS + TOLERANCE)):
sequence += '.'
print(str(delta) + " : Added dot to sequence: " + sequence)
elif (delta >= ((BASE_TIME_SECONDS * 3) - TOLERANCE)) and (delta <= ((BASE_TIME_SECONDS * 3) + TOLERANCE)):
sequence += '-'
print(str(delta) + " : Added dash to sequence: " + sequence)
else:
print(str(delta) + " : Unrecognized input!")
def calc_delta_in_sec(time1, time2):
delta = time2 - time1
return delta.seconds + (delta.microseconds / 1000000.0)
try:
initialize_gpio()
initialize_metronome()
initialize_button()
message = raw_input("\nPress any key to exit.\n")
finally:
pass
print("Goodbye!")
|
# encoding:utf-8
__author__ = 'shiliang'
__date__ = '2019/2/21 19:28'
import xadmin
from xadmin import views
from .models import EmailVerifyRecord
# 创建admin的管理类,这里不再是继承admin,而是继承object
class EmailVerifyRecordAdmin(object):
# 配置后台我们需要显示的列
list_display = ['code', 'email', 'send_type', 'send_time']
# 配置搜索字段,不做时间搜索
search_fields = ['code', 'email', 'send_type']
# 配置筛选字段
list_filter = ['code', 'email', 'send_type', 'send_time']
'''
创建xadmin的全局管理器并与view绑定。
'''
class BaseSetting(object):
# 开启主题功能
enable_themes = True
use_bootswatch = True
'''
xadmin全局配置参数信息设置
'''
class GlobalSettings(object):
site_title = '文献数据挖掘系统管理后台'
site_footer = 'Copyright 2019 shiliang, Inc. NXU.'
# 收起菜单
menu_style = 'accordion'
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
# 将全局配置管理与view绑定注册
xadmin.site.register(views.BaseAdminView, BaseSetting)
# 将头部与脚部信息进行注册:
xadmin.site.register(views.CommAdminView, GlobalSettings)
|
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
import traceback
from mcedit_ui.ui_sprite_editor import Ui_SpriteEditor
from mcedit_ui.clickable_graphics_scene import *
from mcedit_ui.custom_graphics_items import *
from mclib.sprite_loading import SpriteLoadingData
from mclib.sprite import Sprite
from mclib.docs import Docs
class SpriteEditorDialog(QDialog):
def __init__(self, main_window):
super().__init__(main_window)
self.ui = Ui_SpriteEditor()
self.ui.setupUi(self)
self.game = main_window.game
self.rom = self.game.rom
self.renderer = main_window.renderer
self.type = -1
self.subtype = -1
self.form = -1
self.sprite_graphics_scene = ClickableGraphicsScene()
self.ui.sprite_graphics_view.setScene(self.sprite_graphics_scene)
self.ui.enemy_list.currentRowChanged.connect(self.enemy_changed)
self.ui.object_list.currentRowChanged.connect(self.object_changed)
self.ui.npc_list.currentRowChanged.connect(self.npc_changed)
self.ui.player_list.currentRowChanged.connect(self.player_changed)
self.ui.type_4s_list.currentRowChanged.connect(self.type_4_changed)
self.ui.player_items_list.currentRowChanged.connect(self.player_item_changed)
self.ui.form_index.activated.connect(self.form_changed)
self.ui.anim_index.activated.connect(self.anim_changed)
self.ui.frame_index.activated.connect(self.frame_changed)
self.type_to_list_widget = {
3: self.ui.enemy_list,
6: self.ui.object_list,
7: self.ui.npc_list,
1: self.ui.player_list,
4: self.ui.type_4s_list,
8: self.ui.player_items_list,
}
self.type_and_row_index_to_subtype = {}
for type, list_widget in self.type_to_list_widget.items():
self.type_and_row_index_to_subtype[type] = []
subtypes = Docs.get_all_subtypes_for_type("entity", type)
for subtype in subtypes:
self.type_and_row_index_to_subtype[type].append(subtype)
form = -1 # TODO kinda hacky to do it this way
text = "%02X-%02X %s" % (
type, subtype,
Docs.get_name_for_entity("entity", type, subtype, form)
)
list_widget.addItem(text)
self.show()
def enemy_changed(self, row_index):
type = 3
subtype = self.type_and_row_index_to_subtype[type][row_index]
form = 0
self.sprite_changed(type, subtype, form)
def object_changed(self, row_index):
type = 6
subtype = self.type_and_row_index_to_subtype[type][row_index]
form = 0
self.sprite_changed(type, subtype, form)
def npc_changed(self, row_index):
type = 7
subtype = self.type_and_row_index_to_subtype[type][row_index]
form = 0
self.sprite_changed(type, subtype, form)
def player_changed(self, row_index):
type = 1
subtype = self.type_and_row_index_to_subtype[type][row_index]
form = 0
self.sprite_changed(type, subtype, form)
def type_4_changed(self, row_index):
type = 4
subtype = self.type_and_row_index_to_subtype[type][row_index]
form = 0
self.sprite_changed(type, subtype, form)
def player_item_changed(self, row_index):
type = 8
subtype = self.type_and_row_index_to_subtype[type][row_index]
form = 0
self.sprite_changed(type, subtype, form)
def form_changed(self, form):
self.sprite_changed(self.type, self.subtype, form)
def sprite_changed(self, type, subtype, form):
#print(type, subtype, form)
if self.type == type and self.subtype == subtype:
only_form_changed = True
else:
only_form_changed = False
self.type = type
self.subtype = subtype
self.form = form
self.sprite_graphics_scene.clear()
self.ui.anim_index.clear()
self.ui.frame_index.clear()
if not only_form_changed:
self.ui.form_index.clear()
forms = Docs.get_all_forms_for_subtype("entity", self.type, self.subtype)
for other_form in forms:
form_name = Docs.get_name_for_entity_form("entity", self.type, self.subtype, other_form)
self.ui.form_index.addItem("%02X %s" % (other_form, form_name))
self.loading_data = SpriteLoadingData(type, subtype, form, self.rom)
if self.loading_data.has_no_sprite:
self.sprite = None
return
self.sprite = Sprite(self.loading_data.sprite_index, self.rom)
# TODO: how to determine number of anims and frames?
num_frames = 0xFF
num_anims = 0xFF
for i in range(num_frames):
self.ui.frame_index.addItem("%02X" % i)
if self.sprite.animation_list_ptr == 0:
self.frame_changed(0)
else:
for i in range(num_anims):
self.ui.anim_index.addItem("%02X" % i)
self.anim_changed(0)
def anim_changed(self, anim_index):
self.ui.anim_index.setCurrentIndex(anim_index)
try:
anim = self.sprite.get_animation(anim_index)
except Exception as e:
stack_trace = traceback.format_exc()
error_message = "Error getting animation:\n" + str(e) + "\n\n" + stack_trace
QMessageBox.warning(self,
"Error getting animation",
error_message
)
keyframe = anim.keyframes[0]
# TODO: how to handle the keyframe's h and v flip?
frame_index = keyframe.frame_index
self.frame_changed(frame_index)
def frame_changed(self, frame_index):
self.ui.frame_index.setCurrentIndex(frame_index)
self.sprite_graphics_scene.clear()
try:
offsets = (0, 0)
extra_frame_indexes = []
frame_image, x_off, y_off = self.renderer.render_entity_frame(self.loading_data, frame_index, offsets, extra_frame_indexes)
except Exception as e:
stack_trace = traceback.format_exc()
error_message = "Error rendering frame:\n" + str(e) + "\n\n" + stack_trace
QMessageBox.warning(self,
"Error rendering frame",
error_message
)
if frame_image == None:
return
item = GraphicsImageItem(frame_image, x_off, y_off, draw_border=False)
item.setPos(x_off, y_off)
self.sprite_graphics_scene.addItem(item)
|
import os
import pickle
from pathlib import Path
import networkx as nx
import numpy as np
import sys
# sys.path.insert(1, "./transitions_main")
from transitions_main import check
def nodes_func(nodes_file,data_name):
nodes = set()
year_nodes = dict()
nodes_year_labels = dict()
for i, row in enumerate(nodes_file):
node, year, label, count, percent_year = row.split(",")
nodes.add(node) # nodes old id
year_nodes[year] = year_nodes.get(year, set()) | {node} # {year: nodes_old_id}
node_year_id = str(node) + "_" + str(year)
nodes_year_labels[node_year_id] = nodes_year_labels.get(node_year_id, []) + [label] * int(count)
old_to_new_nid = {old_id: i for i, old_id in enumerate(sorted(nodes,key=int))} # {old_id: new_id}
new_to_old_nid = {new_id: old_id for old_id, new_id in old_to_new_nid.items()} # {new_id: old_id}
pickle.dump(old_to_new_nid, open("./dataset/" + data_name + "/pkl/old_to_new_nid.pkl", "wb"))
pickle.dump(new_to_old_nid, open("./dataset/" + data_name + "/pkl/new_to_old_nid.pkl", "wb"))
nodes_id = set(k for k in new_to_old_nid.keys()) # set of new id
year_nodeids = dict() # {year: new node id}
for year, l_nodes in year_nodes.items():
for n in l_nodes:
year_nodeids[year] = year_nodeids.get(year, set()) | set([old_to_new_nid[n]])
year_new_nodeid_labels = dict()
for key, val in nodes_year_labels.items():
old = key.split("_")[0]
n = old_to_new_nid[old]
y = int(key.split("_")[1])
if y not in year_new_nodeid_labels:
year_new_nodeid_labels[y] = {}
year_new_nodeid_labels[y][n] = val
return year_nodeids, old_to_new_nid, nodes_id, year_new_nodeid_labels
def year_id_label_freq(year_new_nodeid_labels, num_of_labels):
count_label = dict()
for year in year_new_nodeid_labels.keys():
if year not in count_label:
count_label[year] = dict()
for node, labels in year_new_nodeid_labels[year].items():
l = [0] * num_of_labels
value, counts = np.unique(labels, return_counts=True)
for val, c in zip(value, counts):
norm_counts = c / counts.sum()
l[int(val)] = norm_counts
count_label[year][node] = l
return count_label
def create_tag_list_by_year(count_label, nodes_id):
l = []
years = sorted(list(count_label.keys()))
for year in years:
y = []
for id in nodes_id:
if id not in count_label[year]:
y.append(-1)
else:
y.append(count_label[year][id])
l.append(y)
return l
def build_graphs(nodes_id, old_to_new_nid, edges_file, years_count,start_year):
initial_g = nx.Graph()
initial_g.add_nodes_from(nodes_id)
g = [initial_g.copy() for _ in range(years_count)]
all_edges_count = 0
for line in edges_file:
spline = line.split(',') # Count right now not as weights, can be added if necessary
year_idx = int(spline[2]) - start_year
if spline[0] not in old_to_new_nid or spline[1] not in old_to_new_nid:
continue
else:
all_edges_count += 1
g[year_idx].add_edge(old_to_new_nid[spline[0]], old_to_new_nid[spline[1]])
return g
def check(file_path,current_path=False):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
print("A new directory was created: "+str(file_path))
else:
print("directory exists: "+str(file_path))
if current_path:
if os.path.isfile(current_path):
Path(current_path).rename(file_path)
print("input files moved to directory:", str(file_path))
elif os.path.isfile(file_path):
print("input file exists in directory:", str(file_path))
else:
print("input file is missing in directory", str(file_path))
return file_path
def main_prep(dataset_name, e_path, n_path,num_of_labels,start_year, all_labeles_are_used='no'):
# n_path = check("../dataset/" + str(dataset_name) + "/input_files/nodes.csv")
# e_path = check("../dataset/" + str(dataset_name) + "/input_files/edges.csv")
nodes_file = open(n_path)
edges_file = open(e_path)
next(nodes_file)
next(edges_file)
year_nodeids, old_to_new_nid, nodes_id, year_new_nodeid_labels = nodes_func(nodes_file, str(dataset_name))
y_id_tag_dist = year_id_label_freq(year_new_nodeid_labels,num_of_labels)
graphs = build_graphs(nodes_id, old_to_new_nid, edges_file, len(year_nodeids), start_year)
labels = create_tag_list_by_year(y_id_tag_dist, nodes_id)
check("./dataset/" + str(dataset_name) + "/pkl/gcn_input/")
for i in range(len(graphs)):
pickle.dump(graphs[i], open("./dataset/" + dataset_name +"/pkl/gcn_input/graph_" + str(i) + ".pkl", "wb"))
pickle.dump(labels[i], open("./dataset/" + dataset_name + "/pkl/gcn_input/labels_" + str(i) + ".pkl", "wb"))
return
if __name__ == "__main__":
fnodes = open("nodes_little.csv", "rt")
fedges = open("edges_little.csv", "rt")
data_name = 'DBLP'
main_prep(fnodes,fedges, data_name)
|
import ray
class _NullLogSpan:
"""A log span context manager that does nothing"""
def __enter__(self):
pass
def __exit__(self, type, value, tb):
pass
NULL_LOG_SPAN = _NullLogSpan()
def profile(event_type, extra_data=None):
"""Profile a span of time so that it appears in the timeline visualization.
Note that this only works in the raylet code path.
This function can be used as follows (both on the driver or within a task).
.. code-block:: python
with ray.profiling.profile("custom event", extra_data={'key': 'val'}):
# Do some computation here.
Optionally, a dictionary can be passed as the "extra_data" argument, and
it can have keys "name" and "cname" if you want to override the default
timeline display text and box color. Other values will appear at the bottom
of the chrome tracing GUI when you click on the box corresponding to this
profile span.
Args:
event_type: A string describing the type of the event.
extra_data: This must be a dictionary mapping strings to strings. This
data will be added to the json objects that are used to populate
the timeline, so if you want to set a particular color, you can
simply set the "cname" attribute to an appropriate color.
Similarly, if you set the "name" attribute, then that will set the
text displayed on the box in the timeline.
Returns:
An object that can profile a span of time via a "with" statement.
"""
worker = ray.worker.global_worker
if worker.mode == ray.worker.LOCAL_MODE:
return NULL_LOG_SPAN
return worker.core_worker.profile_event(
event_type.encode("ascii"), extra_data)
|
#!/usr/bin/env python3
"""転移学習の練習用コード。Food-101を10クラスの不均衡データにしたもの。
train: 25250 -> 250+10*9 = 340 samples
val: 75750 samples
val_acc: 0.582
"""
import pathlib
import typing
import albumentations as A
import numpy as np
import tensorflow as tf
import pytoolkit as tk
num_classes = 10
train_shape = (256, 256, 3)
predict_shape = (256, 256, 3)
batch_size = 16
epochs = 1800
base_lr = 3e-5
data_dir = pathlib.Path("data/food-101")
models_dir = pathlib.Path(f"models/{pathlib.Path(__file__).stem}")
app = tk.cli.App(output_dir=models_dir)
logger = tk.log.get(__name__)
@app.command(logfile=False)
def check():
create_model(100).check(load_data()[0].slice(list(range(10))))
@app.command(use_horovod=True)
def train():
train_set, val_set = load_data()
model = create_model(len(train_set))
evals = model.train(train_set, val_set)
tk.notifications.post_evals(evals)
@app.command(use_horovod=True)
def validate():
train_set, val_set = load_data()
model = create_model(len(train_set)).load()
pred = model.predict(val_set, fold=0)
if tk.hvd.is_master():
tk.evaluations.print_classification(val_set.labels, pred)
@tk.cache.memoize("cache___", prefix="food_ib")
def load_data():
train_set, val_set = tk.datasets.load_trainval_folders(data_dir, swap=True)
indices = np.concatenate(
[
np.where(train_set.labels == 0)[0],
np.where(train_set.labels == 1)[0][:10],
np.where(train_set.labels == 2)[0][:10],
np.where(train_set.labels == 3)[0][:10],
np.where(train_set.labels == 4)[0][:10],
np.where(train_set.labels == 5)[0][:10],
np.where(train_set.labels == 6)[0][:10],
np.where(train_set.labels == 7)[0][:10],
np.where(train_set.labels == 8)[0][:10],
np.where(train_set.labels == 9)[0][:10],
]
)
train_set = train_set.slice(indices)
val_set = val_set.slice(np.where(val_set.labels <= 9)[0])
return train_set, val_set
def create_model(train_size):
return tk.pipeline.KerasModel(
create_network_fn=lambda: create_network(train_size),
score_fn=tk.evaluations.evaluate_classification,
nfold=1,
train_data_loader=MyDataLoader(data_augmentation=True),
val_data_loader=MyDataLoader(),
epochs=epochs,
# callbacks=[tk.callbacks.CosineAnnealing()],
models_dir=models_dir,
model_name_format="model.h5",
skip_if_exists=False,
)
def create_network(train_size):
inputs = x = tf.keras.layers.Input((None, None, 3))
backbone = tk.applications.efficientnet.create_b3(input_tensor=x)
x = backbone.output
x = tk.layers.GeMPooling2D()(x)
x = tf.keras.layers.Dense(
num_classes,
kernel_initializer="zeros",
kernel_regularizer=tf.keras.regularizers.l2(1e-4),
)(x)
model = tf.keras.models.Model(inputs=inputs, outputs=x)
global_batch_size = batch_size * tk.hvd.size() * app.num_replicas_in_sync
learning_rate = tk.schedules.ExponentialDecay(
initial_learning_rate=base_lr * global_batch_size,
decay_steps=-(-train_size // global_batch_size) * epochs,
)
optimizer = tf.keras.optimizers.SGD(
learning_rate=learning_rate, momentum=0.9, nesterov=True
)
def loss(y_true, logits):
return tk.losses.categorical_crossentropy(
y_true, logits, from_logits=True, label_smoothing=0.2
)
tk.models.compile(model, optimizer, loss, ["acc"])
x = tf.keras.layers.Activation(activation="softmax")(x)
pred_model = tf.keras.models.Model(inputs=inputs, outputs=x)
tk.models.compile(pred_model, optimizer, loss, ["acc"])
return model, pred_model
class MyDataLoader(tk.data.DataLoader):
def __init__(self, data_augmentation=False):
super().__init__(
batch_size=batch_size, data_per_sample=2 if data_augmentation else 1
)
self.data_augmentation = data_augmentation
self.aug2: typing.Any = None
if self.data_augmentation:
self.aug1 = A.Compose(
[
tk.image.RandomTransform(
size=train_shape[:2],
base_scale=predict_shape[0] / train_shape[0],
),
tk.image.RandomColorAugmentors(noisy=True),
]
)
self.aug2 = tk.image.RandomErasing()
else:
self.aug1 = tk.image.Resize(size=predict_shape[:2])
self.aug2 = None
def get_data(self, dataset: tk.data.Dataset, index: int):
X, y = dataset.get_data(index)
X = tk.ndimage.load(X)
X = self.aug1(image=X)["image"]
y = tf.keras.utils.to_categorical(y, num_classes)
return X, y
def get_sample(self, data):
if self.data_augmentation:
sample1, sample2 = data
X, y = tk.ndimage.mixup(sample1, sample2, mode="beta")
X = self.aug2(image=X)["image"]
else:
X, y = super().get_sample(data)
X = tk.ndimage.preprocess_tf(X)
return X, y
if __name__ == "__main__":
app.run(default="train")
|
from _md5 import md5
from urllib.parse import urlencode
from jinja2 import environment
from slugify import slugify
environment.DEFAULT_FILTERS['md5'] = lambda s: md5(s.encode('utf-8'))
environment.DEFAULT_FILTERS['hexdigest'] = lambda s: s.hexdigest()
environment.DEFAULT_FILTERS['urlencode'] = urlencode
environment.DEFAULT_FILTERS['slugify'] = slugify
|
from typing import TYPE_CHECKING, List, Literal
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy.sql.expression import cast
from sqlalchemy.sql.schema import Column, ForeignKey
from sqlalchemy.sql.sqltypes import BOOLEAN, INTEGER, SMALLINT
from typing_extensions import TypeAlias
from app.database import BaseModel, CancelableModel, DatedModel
if TYPE_CHECKING:
from app.apis.v1.users.models import User
from ._Project import Project
RoleType: TypeAlias = Literal["admin", "editor", "viewer"]
class ProjectUser(BaseModel, DatedModel, CancelableModel):
"""holds references to users assigned to a project"""
__tablename__ = "project_users"
PROJECT_ROLES: List[RoleType] = ["admin", "editor", "viewer"]
ADMIN = "admin"
EDITOR = "editor"
VIEWER = "viewer"
project_id = Column(INTEGER, ForeignKey("projects.id"))
user_id = Column(INTEGER, ForeignKey("users.id"))
user = relationship("User", foreign_keys=[user_id], uselist=False)
is_active = Column(
BOOLEAN,
nullable=False,
server_default=cast(True, BOOLEAN),
comment="flags the validity of user in the project",
)
role_id = Column(SMALLINT, nullable=False, comment="user's role in this project")
def __init__(self, project: "Project", user: "User", role: RoleType) -> None:
assert role in ProjectUser.PROJECT_ROLES
self.project_id = project.id
self.user_id = user.id
self.role_id = ProjectUser.PROJECT_ROLES.index(role)
@hybrid_property
def role(self) -> RoleType:
return ProjectUser.PROJECT_ROLES[self.role_id]
|
from discord.ext import commands
class Fun(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.command()
async def norskeuniversiteter(self, ctx):
await ctx.send('<https://imgur.com/a/uGopaSq>')
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.command()
async def informatikkuio(self, ctx):
await ctx.send('https://i.imgur.com/ypyK1mi.jpg')
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.command()
async def informatikkuio2(self, ctx):
await ctx.send('https://i.imgur.com/ZqgZEEA.jpg')
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.command()
async def informatikkuio3(self, ctx):
await ctx.send('https://i.imgur.com/Gx9DQE5.jpg')
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.command()
async def uio(self, ctx):
await ctx.send('https://i.imgur.com/188MoIV.jpg')
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.command()
async def ntnu(self, ctx):
await ctx.send('https://twitter.com/NTNU/status/970667413564993536')
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.command()
async def ntnu2(self, ctx):
await ctx.send('https://i.imgur.com/h84fknj.jpg')
def setup(bot):
bot.add_cog(Fun(bot))
|
from __future__ import unicode_literals
from geckoboard import Dataset, Field as F
from datetime import date, datetime, timedelta
def test_create_delete(session):
fields = {
'date': F.date('Date', unique=True),
'datetime': F.datetime('Date Time'),
'number': F.number('Number'),
'percentage': F.percentage('Percentage'),
'string': F.string('String'),
'money': F.money('Dollars', 'USD'),
}
result = Dataset.create(session, 'test', fields)
assert result.id == 'test'
assert result.fields == fields
invalid_field = F("invalid", None)
for field in fields.values():
assert field != invalid_field
assert result.delete() is True
def _tmp_data(start_date, count):
def _day_date(day_offset):
return start_date + timedelta(days=day_offset)
return [{
'date': _day_date(offset),
'datetime': datetime(2016, 9, 23),
'number': 22,
'percentage': 0.5,
'string': "test string",
'money': 7.95
} for offset in range(count)]
def test_replace(tmp_dataset):
data = _tmp_data(date(2016, 9, 23), 5)
assert tmp_dataset.replace(data) is True
def test_append(tmp_dataset):
data = _tmp_data(date(2016, 9, 23), 5)
assert tmp_dataset.append(data, delete_by='date') is True
|
from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
# in real code, this section might
# read in data parameters from a file
numData = 10
comm.send(numData, dest=1)
data = np.linspace(0.0,3.14,numData)
comm.Send(data, dest=1)
elif rank == 1:
numData = comm.recv(source=0)
print('Number of data to receive: ',numData)
data = np.empty(numData, dtype='d') # allocate space to receive the array
comm.Recv(data, source=0)
print('data received: ',data)
|
'''
Function:
define the base model for all models
Author:
Zhenchao Jin
'''
import copy
import torch
import numpy as np
import torch.nn as nn
import torch.distributed as dist
from ...losses import *
from ...backbones import *
from ..base import BuildAssigner
'''base model'''
class BaseModel(nn.Module):
def __init__(self, cfg, **kwargs):
super(BaseModel, self).__init__()
self.cfg = cfg
self.mode = kwargs.get('mode')
assert self.mode in ['TRAIN', 'TEST']
self.norm_cfg, self.act_cfg = cfg['norm_cfg'], cfg['act_cfg']
# build backbone_net
backbone_cfg = copy.deepcopy(cfg['backbone'])
backbone_cfg.update({'norm_cfg': self.norm_cfg})
backbone_net = BuildBackbone(backbone_cfg)
if backbone_cfg['series'] in ['vgg']:
self.backbone_net_stage1 = backbone_net.features
self.backbone_net_stage2 = backbone_net.classifier
else:
raise ValueError('fail to parse backbone series %s' % backbone_cfg['series'])
# build roi extractor
self.roi_extractor = BuildRoILayer(copy.deepcopy(cfg['roi_extractor']))
# build assigner
self.assigner = BuildAssigner(copy.deepcopy(cfg['assigner']))
# build head
head_cfg = copy.deepcopy(cfg['head'])
head_cfg.update({'num_classes': cfg['num_classes']})
self.buildhead(head_cfg)
# freeze norm layer
if cfg.get('is_freeze_norm', True): self.freezenormalization()
'''forward'''
def forward(self, x, proposals, targets=None, losses_cfg=None):
raise NotImplementedError('not to be implemented')
'''build head'''
def buildhead(self, cfg):
raise NotImplementedError('not to be implemented')
'''return all layers with learnable parameters'''
def alllayers(self):
raise NotImplementedError('not to be implemented')
'''freeze normalization'''
def freezenormalization(self):
for module in self.modules():
if type(module) in BuildNormalization(only_get_all_supported=True):
module.eval()
'''calculate the losses'''
def calculatelosses(self, predictions, targets, losses_cfg):
assert (len(predictions) == len(targets)) and (len(targets) == len(losses_cfg))
# calculate loss according to losses_cfg
losses_log_dict = {}
for loss_name, loss_cfg in losses_cfg.items():
losses_log_dict[loss_name] = self.calculateloss(
prediction=predictions[loss_name],
target=targets[loss_name],
loss_cfg=loss_cfg
)
loss = 0
for key, value in losses_log_dict.items():
value = value.mean()
loss += value
losses_log_dict[key] = value
losses_log_dict.update({'total': loss})
# convert losses_log_dict
for key, value in losses_log_dict.items():
if dist.is_available() and dist.is_initialized():
value = value.data.clone()
dist.all_reduce(value.div_(dist.get_world_size()))
losses_log_dict[key] = value.item()
else:
losses_log_dict[key] = torch.Tensor([value.item()]).type_as(loss)
# return the loss and losses_log_dict
return loss, losses_log_dict
'''calculate the loss'''
def calculateloss(self, prediction, target, loss_cfg):
# define the supported losses
supported_losses = {
'celoss': CrossEntropyLoss,
'sigmoidfocalloss': SigmoidFocalLoss,
'binaryceloss': BinaryCrossEntropyLoss,
}
# calculate the loss
loss = 0
for key, value in loss_cfg.items():
assert key in supported_losses, 'unsupport loss type %s...' % key
loss += supported_losses[key](
prediction=prediction,
target=target,
scale_factor=value['scale_factor'],
**value['opts']
)
# return the loss
return loss
|
"""Declare API endpoints with Django RestFramework viewsets."""
from django.apps import apps
from django.shortcuts import get_object_or_404
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .. import defaults, serializers
from ..models import Video
from ..utils.api_utils import validate_signature
class ObjectPkMixin:
"""
Get the object primary key from the URL path.
This is useful to avoid making extra requests using view.get_object() on
a ViewSet when we only need the object's id, which is available in the URL.
"""
def get_object_pk(self):
"""Get the object primary key from the URL path."""
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
return self.kwargs.get(lookup_url_kwarg)
@api_view(["POST"])
def update_state(request):
"""View handling AWS POST request to update the state of an object by key.
Parameters
----------
request : Type[django.http.request.HttpRequest]
The request on the API endpoint, it should contain a payload with the following fields:
- key: the key of an object in the source bucket as delivered in the upload policy,
- state: state of the upload, should be either "ready" or "error",
- extraParameters: Dict containing arbitrary data sent from AWS Lambda.
Returns
-------
Type[rest_framework.response.Response]
HttpResponse acknowledging the success or failure of the state update operation.
"""
msg = request.body
serializer = serializers.UpdateStateSerializer(data=request.data)
if serializer.is_valid() is not True:
return Response(serializer.errors, status=400)
# Check if the provided signature is valid against any secret in our list
if not validate_signature(request.headers.get("X-Marsha-Signature"), msg):
return Response("Forbidden", status=403)
# Retrieve the elements from the key
key_elements = serializer.get_key_elements()
# Update the object targeted by the "object_id" and "resource_id"
model = apps.get_model(app_label="core", model_name=key_elements["model_name"])
extra_parameters = serializer.validated_data["extraParameters"]
if (
serializer.validated_data["state"] == defaults.READY
and hasattr(model, "extension")
and "extension" not in extra_parameters
):
# The extension is part of the s3 key name and added in this key
# when generated by the initiate upload
extra_parameters["extension"] = key_elements.get("extension")
try:
object_instance = model.objects.get(id=key_elements["object_id"])
except model.DoesNotExist:
return Response({"success": False}, status=404)
object_instance.update_upload_state(
upload_state=serializer.validated_data["state"],
uploaded_on=key_elements.get("uploaded_on")
if serializer.validated_data["state"] == defaults.READY
else None,
**extra_parameters,
)
return Response({"success": True})
@api_view(["POST"])
def recording_slices_manifest(request):
"""View handling AWS POST request to set a manifest on a record slice.
Parameters
----------
request : Type[django.http.request.HttpRequest]
The request on the API endpoint, it should contain a payload with the following fields:
- video_id: the pk of a video.
- harvest_job_id: the id of the harvest job.
- manifest_key: the manifest key of the record slice.
Returns
-------
Type[rest_framework.response.Response]
HttpResponse containing the current harvest status of all recording slices.
"""
# Check if the provided signature is valid against any secret in our list
if not validate_signature(request.headers.get("X-Marsha-Signature"), request.body):
return Response("Forbidden", status=403)
video = get_object_or_404(Video, pk=request.data["video_id"])
video.set_recording_slice_manifest_key(
request.data["harvest_job_id"], request.data["manifest_key"]
)
return Response({"success": True})
@api_view(["POST"])
def recording_slices_state(request):
"""View handling AWS POST request to check each record slice harvest status by video pk.
Parameters
----------
request : Type[django.http.request.HttpRequest]
The request on the API endpoint, it should contain a payload with the following fields:
- video_id: the pk of a video.
Returns
-------
Type[rest_framework.response.Response]
HttpResponse containing the current harvest status of all recording slices.
"""
# Check if the provided signature is valid against any secret in our list
if not validate_signature(request.headers.get("X-Marsha-Signature"), request.body):
return Response("Forbidden", status=403)
video = get_object_or_404(Video, pk=request.data["video_id"])
return Response(video.get_recording_slices_state())
|
import mapnik
m = mapnik.Map(2560,2560)
mapnik.load_map(m, "mapnik.xml")
m.zoom_all()
mapnik.render_to_file(m, "the_image.png")
|
import inspect
from .metric import Metric
from .decoding import Decoding
from .dimensionality import Dimensionality
from .factorization import Factorization
from .generalization import Generalization
from .neural_fits import NeuralFits
from .rdm import RDM
from .sparsity import Sparsity
from .curvature import Curvature
from .trajectory import Trajectory
metrics_dict = {k: v for k, v in globals().items() if inspect.isclass(v) and issubclass(v, Metric) and v != Metric}
### TODO: Instead of creating a metrics_dict, change implementation to getattr(importlib.import_module('nn_analysis.metric.custom_metrics.some_metric'), 'SomeMetric') ###
|
from xml.etree.ElementTree import fromstring, ElementTree
import time, datetime, os
import json
import boto3
import urllib
S3R = boto3.resource('s3')
OUTPUT_BUCKET_NAME = os.environ['OUTPUT_BUCKET_NAME']
print('Loading function')
def lambda_handler(event, context):
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key']).decode('utf8')
filename = key.rsplit('.', 1)[0].split('/')[-1] # Without the extension
try:
print('Getting S3 content for bucket:' + bucket)
s3_content = S3R.Bucket(bucket).Object(key).get()["Body"].read()
if "entity" in s3_content.lower() or "system" in s3_content.lower():
# Use error keyword as cloudwatch alert looking for this
raise Exception("Error: Injection attempt for bucket file name:" + bucket)
print('Parsing XML into a dictionary')
xml_list = parse_dmarc_xml_to_list(s3_content, filename)
print('Uploading the list into a JSON file on S3. Key:' + key)
upload_dmarc_json_to_s3(xml_list, filename)
except Exception as e:
print(e)
raise e
def parse_dmarc_xml_to_list(xml_string, filename):
"""
:param xml_string: string with the content of the XML file
:return: List of dict with the records and the metadata in each record
"""
try:
# create element tree object
tree = ElementTree(fromstring(xml_string))
except Exception:
# Some reports contain xml format errors. Killing function nicely so lambda does not retry the function.
# TODO: Dead Letter Queue
# Avoiding word Error so cloud watch alert is not triggered.
print ("Not well format for file name:" + filename)
exit(0)
# get root element
root = tree.getroot()
# Metadata - Only the one interested in
date_parsed = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
org_name = root.findtext("report_metadata/org_name", default="none")
report_id = root.findtext("report_metadata/report_id", default="none")
# records
records_list = []
for record in root.findall("record"):
record_dict = {}
# Add metadata to the record
record_dict.update({"date": date_parsed})
record_dict.update({"org_name": org_name})
record_dict.update({"report_id": report_id})
record_dict.update({"file_name": filename})
record_dict.update({"source_ip": record.findtext("row/source_ip", default="none")})
record_dict.update({"count": record.findtext("row/count", default="none")})
record_dict.update({"disposition": record.findtext("row/policy_evaluated/disposition", default="none")})
record_dict.update({"policy_dkim": record.findtext("row/policy_evaluated/dkim", default="none")})
record_dict.update({"policy_spf": record.findtext("row/policy_evaluated/spf", default="none")})
record_dict.update({"type": record.findtext("row/policy_evaluated/reason/type", default="none")})
record_dict.update({"header_from": record.findtext("identifiers/header_from", default="none")})
record_dict.update({"envelope_from": record.findtext("identifiers/envelope_from", default="none")})
record_dict.update({"envelope_to": record.findtext("identifiers/envelope_to", default="none")})
record_dict.update({"human_result": record.findtext("auth_results/dkim/human_result", default="none")})
record_dict.update({"spf_domain": record.findtext("auth_results/spf/domain", default="none")})
record_dict.update({"spf_result": record.findtext("auth_results/spf/result", default="none")})
record_dict.update({"spf_scope": record.findtext("auth_results/spf/scope", default="none")})
# DKIM can be a list as multiple signing can happen.
dkim_results = []
for dkim in record.findall("auth_results/dkim"):
dkim_dict = {}
dkim_dict.update({"dkim_domain": dkim.findtext("domain", default="none")})
dkim_dict.update({"dkim_result": dkim.findtext("result", default="none")})
dkim_dict.update({"dkim_selector": dkim.findtext("selector", default="none")})
dkim_results.append(dkim_dict)
record_dict.update({"dkim": dkim_results})
records_list.append(record_dict)
return records_list
def upload_dmarc_json_to_s3(recordList, filename):
print('Temporarely saving the dict into a file')
tmp_file_name = '/tmp/dmarc.json'
g = open(tmp_file_name, 'w')
for record in recordList:
g.write(json.dumps(record) + '\n')
g.close()
# Upload the json file to S3
print('Uploading the JSON into the destination bucket for report id' + filename)
S3R.meta.client.upload_file(tmp_file_name, OUTPUT_BUCKET_NAME, filename + '.json')
|
import re
# order and rank functions are from here
# http://code.activestate.com/recipes/491268-ordering-and-ranking-for-lists/
def order(x, NoneIsLast=True, decreasing=False):
"""
Returns the ordering of the elements of x. The list
[ x[j] for j in order(x) ] is a sorted version of x.
Missing values in x are indicated by None. If NoneIsLast is true,
then missing values are ordered to be at the end.
Otherwise, they are ordered at the beginning.
"""
omitNone = False
if NoneIsLast is None:
NoneIsLast = True
omitNone = True
n = len(x)
ix = range(n)
if None not in x:
ix.sort(reverse=decreasing, key=lambda j: x[j])
else:
# Handle None values properly.
def key(i, x=x):
elem = x[i]
# Valid values are True or False only.
if decreasing == NoneIsLast:
return not(elem is None), elem
else:
return elem is None, elem
ix = range(n)
ix.sort(key=key, reverse=decreasing)
if omitNone:
n = len(x)
for i in range(n-1, -1, -1):
if x[ix[i]] is None:
n -= 1
return ix[:n]
return ix
def rank(x, NoneIsLast=True, decreasing=False, ties="first"):
"""
Returns the ranking of the elements of x. The position of the first
element in the original vector is rank[0] in the sorted vector.
Missing values are indicated by None. Calls the order() function.
Ties are NOT averaged by default. Choices are:
"first" "average" "min" "max" "random" "average"
"""
omitNone = False
if NoneIsLast is None:
NoneIsLast = True
omitNone = True
O = order(x, NoneIsLast=NoneIsLast, decreasing=decreasing)
R = O[:]
n = len(O)
for i in range(n):
R[O[i]] = i
if ties == "first" or ties not in ["first", "average", "min", "max", "random"]:
return R
blocks = []
newblock = []
for i in range(1, n):
if x[O[i]] == x[O[i-1]]:
if i-1 not in newblock:
newblock.append(i-1)
newblock.append(i)
else:
if len(newblock) > 0:
blocks.append(newblock)
newblock = []
if len(newblock) > 0:
blocks.append(newblock)
for i, block in enumerate(blocks):
# Don't process blocks of None values.
if x[O[block[0]]] is None:
continue
if ties == "average":
s = 0.0
for j in block:
s += j
s /= float(len(block))
for j in block:
R[O[j]] = s
elif ties == "min":
s = min(block)
for j in block:
R[O[j]] = s
elif ties == "max":
s = max(block)
for j in block:
R[O[j]] = s
else:
for i, j in enumerate(block):
R[O[j]] = j
if omitNone:
R = [R[j] for j in range(n) if x[j] is not None]
return R
def match_rank(query, strings, seq=3):
# create regular expression that (a) matches all letters of query, (b) correct order
# see http://stackoverflow.com/a/2897073/1318686 for more details
el = u'[^{s}]*({s})'
expr = u''.join([el.format(s=re.escape(c)) for c in query])
# create matches
mat = [re.match(expr, s, re.IGNORECASE) if query[0:seq].lower() in s.lower() else None for s in strings]
# position of matched elements
position = [[m.end(i) for i in range(1, m.lastindex+1, 1)] if m is not None else None for m in mat]
# proportion of query that is in sequence
letter_seq = [sum([p-pos[i-1] == 1 for i, p in enumerate(pos)][1::]) if pos is not None else None for pos in position]
# [1-float(sum([j-pos[i-1] == 1 for i, j in enumerate(pos)][1::]))/(len(query)-1) if pos is not None else None for pos in position]
# sum of position for matches
pos_sum = [sum(pos) if pos is not None else None for pos in position]
# rank elements
rank_seq = rank(letter_seq, decreasing=True)
rank_pos = rank(pos_sum)
# return ranked output object
return [(rank_seq[i]+rank_pos[i])/2 if m is not None else None for i, m in enumerate(mat)]
def fuzzy_search(query, elements, key=lambda x: x, rank=True, seq=3):
"""Fuzzy search for query in list of strings, dictionaries, tulpes, or lists
Args:
query: search string
elements: list of strings, dictionaries, tulpes, or lists
key: function to access string element in dictionaries, tulpes, or lists
rank: rank the elements in the return list by quality of match (currently not supported)
seq: minimum sequence of characters to match
Returns:
a ranked list of elements that matches the query
Fuzzy matching with rankning based on quality of match with two criteria
(a) sequence of characters (e.g. for query 'nor', 'nor' is better then 'nxoxr')
(b) earlier matches are better (e.g. for query 'nor', 'nor' is better then 'xnor')
"""
R = match_rank(query, [key(el) for el in elements], seq=seq)
out = [(el, R[i]) for i, el in enumerate(elements) if R[i] is not None]
return [el[0] for el in sorted(out, key=lambda el: el[1])]
# elements = [{'key': u'ZB7K535R', 'author': u'Reskin 2003', 'title': u'Including Mechanisms in Our Models of Ascriptive Inequality: 2002 Presidential Address'}, {'key': u'DBTD3HQS', 'author': u'Igor & Ronald 2008', 'title': u'Die Zunahme der Lohnungleichheit in der Bundesrepublik. Aktuelle Befunde f\xfcr den Zeitraum von 1998 bis 2005'}, {'key': u'BKTCNEGP', 'author': u'Kirk & Sampson 2013', 'title': u'Juvenile Arrest and Collateral Educational Damage in the Transition to Adulthood'}, {'key': u'9AN4SPKT', 'author': u'Turner 2003', 'title': u'The Structure of Sociological Theory'}, {'key': u'9M92EV6S', 'author': u'Bruhns et al. 1999', 'title': u'Die heimliche Revolution'}, {'key': u'25QBTM5P', 'author': u'Durkheim 1997', 'title': u'The Division of Labor in Society'}, {'key': u'MQ3BHTBJ', 'author': u'Marx 1978', 'title': u'Alienation and Social Class'}, {'key': u'7G4BRU45', 'author': u'Marx 1978', 'title': u'The German Ideology: Part I'}, {'key': u'9ANAZXQB', 'author': u'Llorente 2006', 'title': u'Analytical Marxism and the Division of Labor'}]
# query = 'marx'
# fuzzy_search(query, elements, key=lambda x: '%s - %s' % (x['author'], x['title']))
|
class Activity(Dict):
def __init__(self, name, desc, res_gained, res_consumed, duration=0, optimal_env, **kargs):
self.name = name
self.desc = desc
self.res_gained = res_gained
self.res_consumed = res_consumed
self.optimal_env = optimal_env
self.duration = duration
for i in kargs:
self.setAttr(i, kargs[i])
def finish(self):
return res_gained, res_consumed
|
import time
import math
'''
FORMAT:
# Police ID
# First Name
# Last Name
# Current precinct
# Complaint id
# Month recieved
# Year recieved
# Month closed
# Year Closed
# Command during incident
# Rank Abbreviation at incident
# Rank Abbreviation now
# Rank at incident
# Rank now
# Officer Ethnicity
# Officer Gender
# Officer Age
# Complaintant ethnicity
# Complaintant Gender
# Complaintant Age
# Allegation Type
# Allegation
# Precinct
# Contact Reason
# Outcome
# Board Disposition
'''
f = 'Data/raw.csv'
data = ""
precincts = {}
officers = {}
outcomes = {}
totalCases = 0
def getPrecinct(info):
# print("PRECINCT: " + info)
if info not in precincts:
precincts[info] = 1
else:
precincts[info] += 1
def getOfficerInfo(info):
# print("OFFICER: " + ", ".join(info))
if " ".join(info[1:3]) in officers:
officers[" ".join(info[1:3])] += 1
else:
officers[" ".join(info[1:3])] = 1
def getOutcome(info):
# print("OUTCOME: " + ", ".join(info))
officerResult = info[-1]
if officerResult in outcomes:
outcomes[officerResult] += 1
else:
outcomes[officerResult] = 1
def getIncidentInfo(info):
info = info.split(",")
getOfficerInfo(info[:3] + info[14:17])
getComplaintantInfo(info[17:20])
getOutcome(info[-2:])
getPrecinct(info[22])
# print("\n")
# time.sleep(1)
def getComplaintantInfo(info):
# print("COMPLAINTANT: " + ", ".join(info))
pass
def processData(data):
global totalCases
incidents = data.split("\n")
totalCases = len(incidents)
for incident in incidents:
if incident:
getIncidentInfo(incident)
def getTotalSubstantiated(dictionary):
totalSub = 0
for case in dictionary:
if "Substantiated" in case:
if "Un" in case:
pass
else:
totalSub += dictionary[case]
return totalSub + 1
with open(f,'r') as f:
line = f.readline()
# print(line) # throwaway line for headings
line = f.readline()
while line:
data += line + "\n"
line = f.readline()
processData(data)
sorted_Officers = sorted(officers.items(), key= lambda x: x[1], reverse=True)
sorted_Precincts = sorted(precincts.items(), key= lambda x: x[1], reverse=True)
sorted_Outcomes = sorted(outcomes.items(), key= lambda x: x[1], reverse=True)
for i in sorted_Officers:
print(i[0],i[1])
for i in sorted_Precincts:
print(i[0],i[1])
for i in sorted_Outcomes:
print(i[0] + ":", i[1])
totalSubstantiated = getTotalSubstantiated(outcomes)
print("TOTAL CASES: " + str(totalCases))
print("TOTAL SUBSTANTIATED: " + str(totalSubstantiated))
totalUnsubstantiated = totalCases-totalSubstantiated
print("TOTAL UNSUBSTANTIATED: " + str(totalUnsubstantiated))
percentageSubstantiated = totalSubstantiated/totalCases * 100
percentageUnsubstantiated = totalUnsubstantiated/totalCases * 100
print("% SUBSTANTIATED: " + str(round(percentageSubstantiated,2)))
print("% UNSUBSTANTIATED: " + str(round(percentageUnsubstantiated,2)))
# print(data)
|
import argparse
import os
import shutil
def _parse_args():
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument(
"--dir", type=str, required=True, help="directory to be cleaned"
)
parser.add_argument(
"--date", type=int, required=True, help="logs before this date will be removed"
)
args = parser.parse_args()
assert args.date >= 20200101 and args.date <= 20201231
return args
def _user_confirms(prompt):
yes = {"yes", "y"}
no = {"no", "n"}
choice = input(prompt).lower()
if choice in yes:
return True
elif choice in no:
return False
else:
print(f"Invalid input: {choice}")
return False
def _log_dates_earlier_than(log_dir, reference_date):
log_date = int(os.path.basename(log_dir).split("_")[0])
return log_date < reference_date
def clean_log_dir(log_dir, latest_date):
logs_list = sorted(
[
x
for x in os.listdir(log_dir)
if os.path.isdir(os.path.join(log_dir, x))
and _log_dates_earlier_than(x, latest_date)
]
)
print("Following logs will be removed:")
for x in logs_list:
print(os.path.join(log_dir, x))
if not _user_confirms("Continue [y/n]?"):
return
for x in logs_list:
x = os.path.join(log_dir, x)
print(f"Remove {x}")
shutil.rmtree(x)
if __name__ == "__main__":
args = _parse_args()
clean_log_dir(args.dir, args.date)
|
# Generated by Django 3.2.9 on 2021-12-03 09:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hotels_app', '0008_auto_20211203_0906'),
]
operations = [
migrations.RemoveField(
model_name='review',
name='reservation',
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 2 16:19:01 2021
@author: ccamargo
"""
import numpy as np
# import scipy.optimize as opti
import xarray as xr
import matplotlib.pyplot as plt
import sys
sys.path.append("/Users/ccamargo/Documents/py_scripts/")
import utils_SL as sl
import utils_SLE_v2 as sle
# from netCDF4 import Dataset
import pandas as pd
import os
import datetime as dt
import cmocean as cm
# from mpl_toolkits.basemap import Basemap
# from matplotlib.gridspec import GridSpec
from cartopy import crs as ccrs#, feature as cfeature
#%% Open dataset made in barystatic_standardize2.py
path='/Volumes/LaCie_NIOZ/data/barystatic/use/'
ds=xr.open_dataset(path+'comb/ALL_datasets_1993-2020_180x360_v2.nc')
# We need to correct PCR-GLOBWB
# the unit is METERS of Equivalent Water Thickness [x1000]
# and not dcm of Equivalent Water Thickness [x100]
file='/Volumes/LaCie_NIOZ/data/barystatic/use/TWS_PCR-GLOBWB_vW5E5_noGLA_180x360.nc'
name='LWS_GWB'
SL_mm=np.array(ds.SL_mm)
SL_mm_y=np.array(ds.SL_mm_y)
SL_EWH=np.array(ds.SL_EWH)
SL_EWH_y=np.array(ds.SL_EWH_y)
# SL_mm (name, time, lat, lon) float64 ...
# SL_EWH (name, time, lat, lon) float64 ...
# SL_mm_y (name, year, lat, lon) float64 ...
# SL_EWH_y (name, year, lat, lon) float64 ...
#%% time vectors
# time range: months 1993-2020.08
time,time2=sl.make_date('01-01-1993',(27*12)+8)
tdec,tdec0=sl.get_dec_time(time2,ns=1e-6)
t=[dt.datetime.utcfromtimestamp(t.astype(int) * 1e-6) for t in time2]
ty=np.array([dt.datetime.utcfromtimestamp(t.astype(int) * 1e-6).timetuple().tm_year
for t in time2])
tm=np.array([dt.datetime.utcfromtimestamp(t.astype(int) * 1e-6).timetuple().tm_mon
for t in time2])
# yearly
timey=np.arange(1993,2021)
idx=np.zeros((len(timey)))
for i,year in enumerate(timey):
idx[i]=np.where(ty==year)[0][0]
#%% TWS GWB
ifile = 24
iname=ifile
print(ds.name[ifile])
if name==ds.name[ifile]:
da=xr.open_dataset(file)
da_y=da.groupby('time.year').mean(dim='time')
name='tws_no_gla'
#% % find time index
time_local=np.array(da.time)
tdec_local,tdec0=sl.get_dec_time(time_local,ns=1e-9)
lon=np.array(da.lon)
lat=np.array(da.lat)
timey_local=np.array(da_y.year)
# find index for yearly:
idx_local=np.zeros((len(timey)))
idx_local.fill('nan')
# idx_local=np.full_like(timey,np.nan)
for i,year in enumerate(timey):
if year<=np.max(timey_local) and \
year>=np.min(timey_local):
idx_local[i]=np.where(timey_local==year)[0][0]
idx_0y=np.array(np.where(np.isfinite(idx_local)))[0,0]
idx_00y=int(idx_local[np.isfinite(idx_local)][0])
idx_1y=np.array(np.where(np.isfinite(idx_local)))[0,-1]+1
# find index for monthly:
ty_local=np.array([dt.datetime.utcfromtimestamp(t.astype(int) * 1e-9).timetuple().tm_year
for t in time_local])
tm_local=np.array([dt.datetime.utcfromtimestamp(t.astype(int) * 1e-9).timetuple().tm_mon
for t in time_local])
idx_local_m=np.zeros((len(ty)))
idx_local_m.fill('nan')
t2_local=[dt.datetime.utcfromtimestamp(t.astype(int) * 1e-9 ) for t in time_local]
for i,iy in enumerate(t):
year=iy.year
if np.any(ty_local==year):
month=iy.month
if np.any(tm_local==month):
# print(year)
# print(month)
jdx=np.array(np.where(year==ty_local))[0]
jdx2=np.array(np.where(month==tm_local))[0]
for jd in jdx2:
if np.any(jdx==jd):
j=jdx[jdx==jd]
idx_local_m[i]=j
#% % Water thickness original (m of water thickness)
data=np.array(da[name])*1000 # m->mm of water thickness
datay=np.array(da_y[name])*1000
# Put data in main array
SL_EWH_y[iname,idx_0y:idx_1y,:,:]=datay[idx_00y:len(timey_local),:,:]
for i,j in enumerate(idx_local_m):
if np.isfinite(j):
j=int(j)
SL_EWH[iname,i,:,:]=data[j,:,:]
# Transform data:
for i in range(len(data)):
data[i,:,:]=sle.EWH_to_height(data[i,:,:]).reshape(180,360)
for i in range(len(datay)):
datay[i,:,:]=sle.EWH_to_height(datay[i,:,:]).reshape(180,360)
# Put transformed data in main array:
SL_mm_y[iname,idx_0y:idx_1y,:,:]=datay[idx_00y:len(timey_local),:,:]
for i,j in enumerate(idx_local_m):
if np.isfinite(j):
j=int(j)
SL_mm[iname,i,:,:]=data[j,:,:]
#%% make and save data array
ds_mask=xr.open_dataset('/Volumes/LaCie_NIOZ/data/barystatic/masks/barystatic_mask2.nc')
ds_mask
#%%
da=xr.Dataset(data_vars={'SL_mm':(('name','time','lat','lon'),SL_mm),
'SL_EWH':(('name','time','lat','lon'),SL_EWH),
'SL_mm_y':(('name','year','lat','lon'),SL_mm_y),
'SL_EWH_y':(('name','year','lat','lon'),SL_EWH_y),
'mask':(('lat','lon'),ds_mask['mask6']),
'mask2':(('lat','lon'),ds_mask['mask12']),
},
coords={'lat':ds.lat,
'lon':ds.lon,
'time':time2,
'tdec':tdec,
'year':timey,
'name':ds.name})
da['SL_mm'].attrs['units']='mm of sea level'
da['SL_mm_y'].attrs['units']='mm of sea level'
da['SL_EWH'].attrs['units']='mm of Equivalent Water Thickness'
da['SL_EWH_y'].attrs['units']='mm of Equivalent Water Thickness'
da['SL_mm'].attrs['long_name']='Monthly ocean mass change in mm of sea level height'
da['SL_EWH'].attrs['long_name']='Monthly ocean mass change in mm of equivalent water thickness'
da['SL_mm_y'].attrs['long_name']='Yearly averages of ocean mass change in mm of sea level height'
da['SL_EWH_y'].attrs['long_name']='Yearly averages of ocean mass change in mm of equivalent water thickness'
da.attrs=ds.attrs
da.attrs['script']='barystatic_standardize_update.py'
da.attrs['date_created']=str(dt.datetime.now())
da.to_netcdf(path+'comb/ALL_datasets_1993-2020_180x360_v3_update.nc')
#%% Selection updated
path='/Volumes/LaCie_NIOZ/data/barystatic/use/comb/'
da=xr.open_dataset(path+'ALL_datasets_1993-2020_180x360_v3_update.nc')
print(da.name)
da=da.sel(name=[#'AIS_IMB', 'AIS_R19_basins',
'GLWS_ZMP', 'GLWS_WGP_gl',
'AIS_300_CSR', 'GIS_300_CSR', 'LWS_CSR', 'GLWS_CSR',
# 'TCWS_CSR',
# 'AIS_CSR', 'GIS_CSR',
'AIS_300_JPL', 'GIS_300_JPL', 'LWS_JPL', 'GLWS_JPL',
# 'TCWS_JPL',
# 'AIS_JPL', 'GIS_JPL',
# 'AIS_proj_CSR', 'GIS_proj_CSR', 'AIS_proj_JPL','GIS_proj_JPL',
# 'GIS_IMB', 'GIS_M19',
'LWS_GWB', 'LWS_WGP_gl',
# 'TCWS_WaterGAP'
])
print(da)
da.to_netcdf(path+'12_input_datasets_buf_1993-2020_180x360_update_v3.nc')
#%%
#%% Selection updated
path='/Volumes/LaCie_NIOZ/data/barystatic/use/comb/'
da=xr.open_dataset(path+'ALL_datasets_1993-2020_180x360_v3_update.nc')
print(da.name)
da=da.sel(name=['AIS_IMB', 'AIS_R19_basins',
'GLWS_ZMP', 'GLWS_WGP_gl',
'AIS_300_CSR', 'GIS_300_CSR', 'LWS_CSR', 'GLWS_CSR',
# 'TCWS_CSR',
# 'AIS_CSR', 'GIS_CSR',
'AIS_300_JPL', 'GIS_300_JPL', 'LWS_JPL', 'GLWS_JPL',
# 'TCWS_JPL',
# 'AIS_JPL', 'GIS_JPL',
# 'AIS_proj_CSR', 'GIS_proj_CSR', 'AIS_proj_JPL','GIS_proj_JPL',
'GIS_IMB', 'GIS_M19',
'LWS_GWB', 'LWS_WGP_gl',
# 'TCWS_WaterGAP'
])
print(da)
da.to_netcdf(path+'16_input_datasets_buf_1993-2020_180x360_update_v3.nc')
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2019-2020, Maximilian Köhl <koehl@cs.uni-saarland.de>
from __future__ import annotations
import typing as t
def get_subclasses(cls: type, recursive: bool = True) -> t.AbstractSet[type]:
subclasses: t.Set[type] = set()
if recursive:
queue = [cls]
while queue:
element = queue.pop()
subclasses.update(element.__subclasses__())
queue.extend(element.__subclasses__())
else:
subclasses.update(cls.__subclasses__())
return subclasses
|
import argparse
import os
import glob
from KerasRFCN.Model.Model import RFCN_Model
from keras.preprocessing import image
from WiderFace import RFCNNConfig
def loadModel(config, modelPath):
model = RFCN_Model(mode="inference", config=config,
model_dir=os.path.join(ROOT_DIR, "logs"))
if not modelPath:
modelPath = model.find_last()[1]
print("Loading weights from: {}".format(modelPath))
if modelPath and os.path.isfile(modelPath):
# Load trained weights
model.load_weights(modelPath, by_name=True)
else:
raise AssertionError("Model weight file does not exists")
return model
def performPrediction(model, imageDir, saveDir, saveImage=False):
"""
Prediction format:
1. Create a folder same as the folder of image
2. Create a text file with same name as image file (replace extension with txt)
3. Content of file
0_Parade_marchingband_1_20 -> Name of image file minus the extension
2 -> Number of face
541 354 36 46 1.000 -> [x, y, width, height, confidence] of a face
100 242 20 35 0.98 -> [x, y, width, height, confidence] of a face
...
"""
if not os.path.isdir(imageDir):
raise AssertionError("Image directory does not exists")
for directory in os.listdir(imageDir):
curSavDir = os.path.join(saveDir, directory)
if not os.path.isdir(curSavDir):
os.makedirs(curSavDir)
curImgDir = os.path.join(imageDir, directory)
imagePathList = glob.glob(os.path.join(curImgDir, "*.jpg"))
for idx, imagePath in enumerate(imagePathList):
print("-" * 80)
print("Processing image [{}/{}]: {}".format(idx + 1, len(imagePathList), imagePath))
filename, ext = os.path.splitext(os.path.basename(imagePath))
txtFilePath = os.path.join(curSavDir, filename + ".txt")
with open(txtFilePath, "w") as fp:
fp.write(filename)
fp.write("\n")
img = image.img_to_array(image.load_img(imagePath))
prediction = model.detect([img], verbose=0)[0]
faceList = prediction["rois"]
scoreList = prediction["scores"]
noOfFaces = len(faceList)
print("Found {} faces".format(noOfFaces))
fp.write("{}\n".format(noOfFaces))
for face, score in zip(faceList, scoreList):
y1, x1, y2, x2 = face
width = x2 - x1
height = y2 - y1
fp.write("{} {} {} {} {}\n".format(x1, y1, width, height, score))
if __name__ == "__main__":
ROOT_DIR = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
parser.add_argument("--saveDir", default="prediction",
help="Directory where predictions should be stored")
parser.add_argument("--imageDir", required=True,
help="Directory containing the images to be evaluated")
parser.add_argument("--modelPath", default=None,
help="Path to model weights file (h5)")
args = parser.parse_args()
config = RFCNNConfig()
model = loadModel(config, args.modelPath)
if not os.path.isdir(args.saveDir):
os.makedirs(args.saveDir)
performPrediction(model, args.imageDir, args.saveDir)
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
from casadi.tools import *
#! An SX graph
a = SX.sym("a")
b = SX.sym("b")
c = sin(a**5 + b)
c = c - b/ sqrt(fabs(c))
print(c)
dotdraw(c)
#! An SX
dotdraw(SX.sym("x",Sparsity.lower(3)))
dotdraw(SX.sym("x",Sparsity.lower(3))**2)
#! An MX graph
x = MX.sym("x",Sparsity.lower(2))
y = MX.sym("y",Sparsity.lower(2))
z = MX.sym("z",4,2)
zz = x+y+6
dotdraw(zz)
f = Function("magic", [z,y],[z+x[0,0],x-y])
z,z2 = f(vertcat(x,y),zz.T)
z = z[:2,:] +x + cos(x) - sin(x) / tan(z2)
dotdraw(z)
|
from molecules.ml.unsupervised.point_autoencoder.aae import AAE3d
from molecules.ml.unsupervised.point_autoencoder.hyperparams import AAE3dHyperparams
|
from parallelm.mlops import mlops
from parallelm.mlops.constants import PyHealth
from parallelm.mlops.mlops_exception import MLOpsException
from parallelm.mlops.stats.mlops_stat import MLOpsStat
from parallelm.mlops.stats.mlops_stat_getter import MLOpsStatGetter
from parallelm.mlops.stats.stats_utils import check_list_of_str, check_vec_of_numbers
from parallelm.mlops.stats_category import StatGraphType, StatsMode
from parallelm.protobuf import InfoType_pb2
import copy
class _HistogramOverlapScoreStat(MLOpsStatGetter):
"""
A container for histogram score data that allows users to specify a generic score providing the feature and its values.
The histogram score is calculated and replaced for every pipeline run.
:Example:
>>> hos = _HistogramOverlapScoreStat().name("hos").features(["g1", "g2"]).data([1.1, 2.4])
>>> mlops.set_stat(hos)
This example creates and populates a BarGraph structure, then reports it to MLOps.
"""
def __init__(self):
self._name = "HistogramOverlapScore"
self._feature_names = []
self._data = []
def name(self, name):
"""
Set the name of the HistogramOverlapScore. This will used in the MLOps UI.
:param name: HistogramOverlapScore name
:return: self (the HistogramOverlapScore object)
"""
self._name = name
return self
def features(self, features_names):
"""
Set the feature names
:param features_names: List of names
:return: self
"""
check_list_of_str(features_names, error_prefix="feature names")
self._feature_names = copy.deepcopy(features_names)
return self
def data(self, vec):
"""
Set the data
:param vec: list of data values
:return: self
"""
check_vec_of_numbers(vec, "values for heat graph")
self._data = copy.deepcopy(vec)
return self
def _to_dict(self):
dd = {}
for label, value in zip(self._feature_names, self._data):
dd[PyHealth.HISTOGRAM_OVERLAP_SCORE_PREPENDER + str(label)] = str(value)
return dd
def get_mlops_stat(self, model_id):
if len(self._data) == 0:
raise MLOpsException("There is no data in histogram score")
if len(self._feature_names) == 0:
raise MLOpsException("No columns names were provided")
if len(self._data) != len(self._feature_names):
raise MLOpsException("Number of data point does not match number of columns")
data = self._to_dict()
mlops_stat = MLOpsStat(name=self._name,
stat_table=self._name,
stat_type=InfoType_pb2.HealthCompare,
graph_type=StatGraphType.MULTILINEGRAPH,
mode=StatsMode.TimeSeries,
data=data,
model_id=model_id)
return mlops_stat
|
from models.fast_scnn import get_fast_scnn
import torch
import numpy as np
from torch import nn
import torch.utils.model_zoo as model_zoo
import torch.onnx
model = get_fast_scnn(dataset= 'simulation', aux= False)
model.eval()
x = torch.randn(1, 3, 160, 320, requires_grad=True)
torch_out = model(x)
print(torch_out[0].size())
torch.onnx.export(model, x, 'out_model.onnx', export_params=True, opset_version=12, do_constant_folding=True,
input_names = ['input'], output_names = ['output'])
|
"""Unit test package for easy_selenium."""
|
"""
A handy script for extracting all events from a particular year
from an ICS file into another ICS file.
@author Derek Ruths (druths@networkdynamics.org)
"""
import argparse
import re
import sys, os
from datetime import datetime
from datetime import timedelta
parser = argparse.ArgumentParser()
parser.add_argument('input_file',help='the input ICS file')
parser.add_argument('schedule_from',help='the date which from schedules are extract: %Y%m%d')
parser.add_argument('schedule_to', help='the date which to schedules are extract: %Y%m%d')
parser.add_argument('output_file',help='the output ICS file')
args = parser.parse_args()
print 'Extracting %s ~ %s events from %s into %s' % (args.schedule_from, args.schedule_to ,args.input_file,args.output_file)
schedule_from = datetime.strptime(args.schedule_from, '%Y%m%d')
schedule_to = datetime.strptime(args.schedule_to, '%Y%m%d')
range_in_date = [schedule_from + timedelta(days=x) for x in range(0, (schedule_to-schedule_from).days)]
in_fname = args.input_file
out_fname = args.output_file
if os.path.exists(out_fname):
print 'ERROR: output file already exists! As a safety check, this script will not overwrite an ICS file'
exit()
infh = open(in_fname,'r')
outfh = open(out_fname,'w')
# parsing constants
BEGIN_CALENDAR = 'BEGIN:VCALENDAR'
END_CALENDAR = 'END:VCALENDAR'
BEGIN_EVENT = 'BEGIN:VEVENT'
END_EVENT = 'END:VEVENT'
CREATED2017_OPENER = 'CREATED:2017'
in_preamble = True
in_event = False
event_content = None
event_in_2017 = False
event_count = 0
out_event_count = 0
for line in infh:
if in_preamble and line.startswith(BEGIN_EVENT):
in_preamble = False
if in_preamble:
outfh.write(line)
else:
if line.startswith(BEGIN_EVENT):
event_content = []
event_count += 1
event_in_2017 = False
in_event = True
if in_event:
if 'DTSTART' in line:
event_string = re.split(r'DTSTART.*:', line)[1].split('T')[0]
event_date = datetime.strptime(event_string.strip(), '%Y%m%d')
if event_date in range_in_date:
event_in_2017 = True
event_content.append(line)
if line.startswith(END_EVENT):
in_event = False
if event_in_2017:
out_event_count += 1
outfh.write(''.join(event_content))
outfh.write(END_CALENDAR)
outfh.close()
# done!
print 'wrote %d of %d events' % (out_event_count,event_count)
|
'''
ANKIT KHANDELWAL
15863
Exercise 7
'''
from math import sin, sqrt, pi
import matplotlib.pyplot as plt
import numpy as np
def q(u):
alpha = pi / (20 * 10 ** -6)
return sin(alpha * u) ** 2
def y(u):
return sqrt(q(u))
w = 200 * 10 ** -6
W = 10 * w
wavelength = 500 * 10 ** -9
f = 1
n = 40
N = 10 * n
xv = np.arange(n)
u = xv * w / n - w / 2
yv = np.zeros(40)
for i in xv:
yv[i] = y(u[i])
extra = np.zeros(N - n)
yv = np.concatenate((yv, extra))
c = np.fft.fft(yv)
cmod = abs(c) ** 2
Intensity = W ** 2 / N ** 2 * cmod
Intensity = Intensity[:201]
Intensity = np.concatenate((np.zeros(200), Intensity))
for i in range(200):
Intensity[i] = Intensity[-i - 1]
xvalue = np.linspace(-5, 5, N + 1)
plt.plot(xvalue, Intensity)
plt.title('Diffraction Pattern')
plt.xlabel('Distance (in cm)')
plt.ylabel('Intensity')
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.