content
stringlengths 5
1.05M
|
|---|
from dagster import repository
from src.pipelines.ingest import ingest_pipeline
from src.pipelines.populate_dm import populate_dm_pipeline
from src.pipelines.asset_experimentation import asset_experimentation
@repository
def software_releases_repository():
"""
The repository definition for this newp Dagster repository.
For hints on building your Dagster repository, see our documentation overview on Repositories:
https://docs.dagster.io/overview/repositories-workspaces/repositories
"""
pipelines = [ingest_pipeline, populate_dm_pipeline, asset_experimentation]
schedules = [] # [my_hourly_schedule]
sensors = [] # [my_sensor]
return pipelines + schedules + sensors
|
import numpy as np
import torch
def snr(S, N):
'''
Returns the signal to noise ratio for
@param S: the signal
@param N: the noise
'''
temp = 20 * np.log10(1 + np.linalg.norm(np.squeeze(S), axis=(1, 2)) /
np.linalg.norm(np.squeeze(N), axis=(1, 2)))
# filter inf values
return np.mean(temp[np.invert(np.isinf(temp))])
def evaluate_acc(model, images, labels):
"""Evaluate model's prediction accuracy on given batch of data."""
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total = labels.size(0)
correct = (predicted == labels).sum().item()
acc=(correct/total)
return acc
def arr21hot(labels, num_classes):
one_hot = np.zeros((len(labels), num_classes))
one_hot[np.arange(len(labels)), labels] = 1
return one_hot
def mi(T, Y, num_classes=10):
"""
Computes the mutual information I(T; Y) between predicted T and true labels Y
as I(T;Y) = H(Y) - H(Y|T) = H_Y - H_cond_YgT
@param T: vector with dimensionality (num_instances,)
@param Y: vector with dimensionality (num_instances,)
@param num_classes: number of classes, default=10
"""
Y = Y.detach().cpu().numpy()
T = T.detach().cpu().numpy()
epsilon = 1e-4 # to prevent divide by zero
num_instances = Y.shape[0]
py = np.zeros(num_classes) # p(y)
pt = np.zeros(num_classes) # p(t)
pygt = np.zeros(num_classes) # p(y|t)
H_YgT = np.zeros(num_classes) # H(Y|T)
# Compute H(Y)
for i in range(num_classes):
py[i] = np.sum(Y == i) / float(num_instances)
pt[i] = np.sum(T == i) / float(num_instances)
H_Y = -np.dot( py, np.log2(py + epsilon) ) # H(Y)
# Compute H(Y | T)
for t in range(num_classes):
t_idx = T == t
for y in range(num_classes):
y_idx = Y == y
pygt[y] = np.sum(y_idx[t_idx])
# convert counts to probabilities
c = np.sum(pygt)
if c > 0:
pygt /= c
H_YgT[t] = -np.dot( pygt, np.log2(pygt + epsilon) )
H_cond_YgT = np.dot( pt, H_YgT )
return H_Y - H_cond_YgT
def pygt(labels_o, labels_p, num_classes=10):
"""
Computes the conditional probability p(y|t)
for true label or targeted label y given predicted label t.
@param labels_o: true (original) labels
@param labels_p: predicted labels
@param num_classes: number of classes, default=10
"""
labels_o = labels_o.detach().cpu().numpy()
labels_p = labels_p.detach().cpu().numpy()
epsilon = 1e-4 # to prevent divide by zero
num_instances = labels_o.shape[0]
pygt = np.zeros(( num_classes,num_classes )) # p(y|t)
# Compute p(y|t)
for t in range(num_classes):
t_idx = labels_p == t
for y in range(num_classes):
y_idx = labels_o == y
pygt[y,t] = np.sum(y_idx[t_idx])
# convert counts to probabilities
c = np.sum(pygt[:,t])
if c > 0:
pygt[:,t] /= c
return pygt
def build_targeted_dataset(X_test, Y_test, indices, num_classes, device):
"""
Build a dataset for targeted attacks, each source image is repeated num_classes-1 times,
and target labels are assigned that do not overlap with true label.
:param X_test: clean source images
:param Y_test: true labels for X_test
:param indices: indices of source samples to use
:param num_classes: number of classes in classification problem
"""
num_samples = len(indices)
num_target_classes = num_classes - 1
X = X_test[indices]
Y = Y_test[indices]
img_shape = np.array(X.shape[1:])
adv_inputs = np.repeat(X, num_target_classes, axis=0)
true_labels = np.repeat(Y, num_target_classes, axis=0)
adv_inputs = torch.FloatTensor(adv_inputs).to(device)
true_labels = torch.LongTensor(true_labels).to(device)
a = np.repeat([np.arange(num_classes)], len(Y), axis=0)
target_labels = torch.LongTensor(a[a != np.array(Y)[:, None]]).to(device)
return adv_inputs, true_labels, target_labels
def build_targeted_dataset_1hot(X_test, Y_test, indices, num_classes):
"""
Build a dataset for targeted attacks, each source image is repeated num_classes-1 times,
and target labels are assigned that do not overlap with true label.
:param X_test: clean source images
:param Y_test: true labels for X_test, in 1-hot format
:param indices: indices of source samples to use
:param num_classes: number of classes in classification problem
"""
num_samples = len(indices)
num_target_classes = num_classes - 1
X = X_test[indices]
Y = Y_test[indices]
img_shape = np.array(X.shape[1:])
adv_inputs = np.repeat(X, num_target_classes, axis=0)
#dims = tuple(np.hstack((num_samples * num_target_classes, img_shape)))
#adv_inputs = adv_inputs.reshape((dims))
true_labels_1hot = np.repeat(Y, num_target_classes, axis=0)
#dims = tuple(np.hstack((num_samples * num_target_classes, num_classes)))
#true_labels = true_labels.reshape((dims))
diag = np.eye(num_target_classes)
target_labels_1hot=np.zeros((1,num_classes))
for pos in np.argmax(Y, axis=1):
target_labels_1hot=np.vstack((target_labels_1hot, np.insert(diag, pos, 0, axis=1) ))
target_labels_1hot=target_labels_1hot[1:]
return adv_inputs, true_labels_1hot, target_labels_1hot
def evaluate(sess, training, acc, loss, x_, y_, x_np, y_np, feed=None):
feed_dict = {x_: x_np, y_: y_np, training: False}
if feed is not None:
feed_dict.update(feed)
return sess.run([acc, loss], feed_dict)
# Init result var
def evaluate_model(sess, training, acc, loss, x_, data_x, y_, data_y, batch_size):
nb_examples = data_x.shape[0]
nb_batches = int(np.ceil(float(nb_examples) / batch_size))
#print('nb_batches=%d' % nb_batches)
assert nb_batches * batch_size >= nb_examples
loss_np = 0.
accuracy_np = 0.
for test_batch in range(nb_batches):
start = test_batch * batch_size
end = min(nb_examples, start + batch_size)
cur_batch_size = end - start
batch_xs = data_x[start:end]
batch_ys = data_y[start:end]
cur_acc, cur_loss = evaluate(sess, training, acc, loss,
x_, y_, batch_xs, batch_ys)
accuracy_np += (cur_batch_size * cur_acc)
loss_np += (cur_batch_size * cur_loss)
accuracy_np /= nb_examples
loss_np /= nb_examples
return accuracy_np, loss_np
def zca_whitening_matrix(X):
"""
Function to compute ZCA whitening matrix (aka Mahalanobis whitening).
INPUT: X: [M x N] matrix.
Rows: Variables
Columns: Observations
OUTPUT: ZCAMatrix: [M x M] matrix
"""
# Covariance matrix [column-wise variables]: Sigma = (X-mu)' * (X-mu) / N
sigma = np.cov(X, rowvar=True) # [M x M]
# Singular Value Decomposition. X = U * np.diag(S) * V
U,S,V = np.linalg.svd(sigma)
# U: [M x M] eigenvectors of sigma.
# S: [M x 1] eigenvalues of sigma.
# V: [M x M] transpose of U
# Whitening constant: prevents division by zero
epsilon = 1e-5
# ZCA Whitening matrix: U * Lambda * U'
ZCAMatrix = np.dot(U, np.dot(np.diag(1.0 / np.sqrt(S + epsilon)), U.T)) # [M x M]
return ZCAMatrix
|
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.db.models import Sum, Q
from django.db.models.functions import Coalesce
from django.utils import timezone
from djmoney.money import Money
from decimal import Decimal
from ..models import Transaction, TransactionJournal, Account, Category, Budget, get_default_currency, get_default_value
from ..utilities import set_message_and_redirect, calculate_period, set_message
from ..charts import TransactionChart
from ..forms import TransactionForm, TransactionFilterForm
import datetime
import re
ACCOUNT_REGEX = re.compile(r"(.*)\s-\s(.*)")
@login_required
def transactions(request):
transaction_journals = TransactionJournal.objects.all()
period = get_default_value(key="default_period", default_value="month", user=request.user)
period = calculate_period(periodicity=period, start_date=timezone.localdate())
filter_form = TransactionFilterForm(request.GET or None, initial={"start_date": period["start_date"], "end_date": period["end_date"]})
if filter_form.is_valid():
period["start_date"] = filter_form.cleaned_data["start_date"]
period["end_date"] = filter_form.cleaned_data["end_date"]
if filter_form.cleaned_data["description"] != "":
transaction_journals = transaction_journals.filter(
Q(short_description__icontains=filter_form.cleaned_data["description"])
| Q(description__icontains=filter_form.cleaned_data["description"])
)
if filter_form.cleaned_data["account"] != "":
account_type = Account.AccountType.REVENUE_ACCOUNT
account_name = filter_form.cleaned_data["account"]
for type in Account.AccountType:
if type.label == ACCOUNT_REGEX.match(account_name)[1]:
account_type = type
if ACCOUNT_REGEX.match(account_name) is not None:
account_name = ACCOUNT_REGEX.match(account_name)[2]
account = Account.objects.get(name=account_name, type=account_type)
transaction_journals = transaction_journals.filter(transactions__account=account)
if filter_form.cleaned_data["category"] != "":
transaction_journals = transaction_journals.filter(category__name__icontains=filter_form.cleaned_data["category"])
if filter_form.cleaned_data["budget"] != "":
transaction_journals = transaction_journals.filter(budget__budget__name__icontains=filter_form.cleaned_data["budget"])
transaction_journals = (
transaction_journals.filter(date__range=(period["start_date"], period["end_date"]))
.prefetch_related("transactions")
.select_related("category")
.order_by("-date")
)
transactions = Transaction.objects.filter(journal__in=transaction_journals).annotate(total=Coalesce(Sum("amount"), Decimal(0)))
charts = {
"income_chart": TransactionChart(
data=transactions.exclude(journal__type=TransactionJournal.TransactionType.TRANSFER), user=request.user, income=True
).generate_json(),
"income_chart_count": len([item for item in transactions if not item.amount.amount < 0]),
"expense_budget_chart": TransactionChart(data=transactions, expenses_budget=True, user=request.user).generate_json(),
"expense_budget_chart_count": len([item for item in transactions if item.amount.amount < 0 and item.journal.budget is not None]),
"expense_category_chart": TransactionChart(data=transactions, expenses_category=True, user=request.user).generate_json(),
"expense_category_chart_count": len([item for item in transactions if item.amount.amount < 0 and item.journal.category is not None]),
}
return render(
request,
"blackbook/transactions/list.html",
{"filter_form": filter_form, "charts": charts, "period": period, "transaction_journals": transaction_journals},
)
@login_required
def add_edit(request, transaction_uuid=None):
initial_data = {}
transaction_journal = TransactionJournal()
initial_data["amount"] = Money(0, get_default_currency(user=request.user))
if transaction_uuid is not None:
transaction_journal = (
TransactionJournal.objects.prefetch_related("transactions", "transactions__account").select_related("category").get(uuid=transaction_uuid)
)
initial_data = {
"amount": abs(transaction_journal.amount),
"short_description": transaction_journal.short_description,
"description": transaction_journal.description,
"type": transaction_journal.type,
"date": transaction_journal.date,
"category": transaction_journal.category.name if transaction_journal.category is not None else None,
"budget": transaction_journal.budget.budget.name if transaction_journal.budget is not None else None,
"source_account": None,
"destination_account": None,
}
if len(transaction_journal.source_accounts) > 0:
initial_data["source_account"] = "{type} - {name}".format(
type=transaction_journal.source_accounts[0]["type"], name=transaction_journal.source_accounts[0]["account"]
)
if len(transaction_journal.destination_accounts) > 0:
initial_data["destination_account"] = "{type} - {name}".format(
type=transaction_journal.destination_accounts[0]["type"], name=transaction_journal.destination_accounts[0]["account"]
)
transaction_form = TransactionForm(request.user, request.POST or None, initial=initial_data)
if request.POST and transaction_form.is_valid():
return_url = reverse("blackbook:dashboard")
if transaction_form.cleaned_data["add_new"]:
return_url = reverse("blackbook:transactions_add")
transaction = {
"short_description": transaction_form.cleaned_data["short_description"],
"description": transaction_form.cleaned_data["description"],
"date": transaction_form.cleaned_data["date"],
"type": transaction_form.cleaned_data["type"],
"category": None,
"budget": None,
"transactions": [],
}
for account_type_key in ["source_account", "destination_account"]:
if transaction_form.cleaned_data[account_type_key] != "":
account_name = transaction_form.cleaned_data[account_type_key]
account_type = Account.AccountType.REVENUE_ACCOUNT
if account_type_key == "destination_account":
account_type = Account.AccountType.EXPENSE_ACCOUNT
if ACCOUNT_REGEX.match(transaction_form.cleaned_data[account_type_key]) is not None:
account_name = ACCOUNT_REGEX.match(transaction_form.cleaned_data[account_type_key])[2]
for type in Account.AccountType:
if type.label == ACCOUNT_REGEX.match(transaction_form.cleaned_data[account_type_key])[1]:
account_type = type
account, account_created = Account.objects.get_or_create(
name=account_name, type=account_type, defaults={"type": account_type, "net_worth": False, "dashboard": False}
)
if account_created:
set_message(request, 's|Account "{account.name}" was saved succesfully.'.format(account=account))
amount = transaction_form.cleaned_data["amount"]
if account_type_key == "source_account":
amount *= -1
transaction["transactions"].append({"account": account, "amount": amount})
if transaction_form.cleaned_data["category"] != "":
category, created = Category.objects.get_or_create(name=transaction_form.cleaned_data["category"])
transaction["category"] = category
if created:
set_message(request, 's|Category "{category.name}" was saved succesfully.'.format(category=category))
if transaction_form.cleaned_data["budget"] != "":
budget, created = Budget.objects.get_or_create(name=transaction_form.cleaned_data["budget"])
transaction["budget"] = budget.current_period
if created:
set_message(request, 's|Budget "{budget.name}" was saved succesfully.'.format(budget=budget))
if transaction_uuid is None:
transaction_journal = TransactionJournal.create(transactions=transaction)
else:
transaction_journal.update(transactions=transaction)
if transaction_form.cleaned_data["display"]:
return_url = reverse("blackbook:transactions_edit", kwargs={"transaction_uuid": transaction_journal.uuid})
return set_message_and_redirect(
request,
's|Transaction "{short_description}" was saved succesfully.'.format(short_description=transaction_form.cleaned_data["short_description"]),
return_url,
)
return render(
request,
"blackbook/transactions/form.html",
{"transaction_form": transaction_form, "transaction_journal": transaction_journal, "amount": initial_data["amount"]},
)
@login_required
def delete(request):
if request.method == "POST":
journal_entry = TransactionJournal.objects.get(uuid=request.POST.get("transaction_uuid"))
journal_entry.delete()
return set_message_and_redirect(
request,
's|Transaction "{journal_entry.short_description}" was succesfully deleted.'.format(journal_entry=journal_entry),
reverse("blackbook:dashboard"),
)
else:
return set_message_and_redirect(request, "w|You are not allowed to access this page like this.", reverse("blackbook:dashboard"))
|
# -*- coding: utf-8 -*-
"""
HydroView-Flask
~~~~~~
Python Flask version of HydroView with Apache Cassandra as backend.
"""
import logging
import os
import sys
from flask import Flask
from cassandra.cluster import Cluster
from cassandra.query import dict_factory
from cassandra_udts import Averages
from cassandra_udts import Description
from cassandra_udts import Name
from cassandra_udts import Position
from cassandra_udts import Thumbnails
cluster = None
session = None
app = Flask(__name__)
app.config.from_object(os.environ['HYDROVIEW_CONFIG'])
log = logging.getLogger()
log.setLevel(app.config['CASSANDRA_LOGLEVEL'])
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s"))
log.addHandler(handler)
log.info("Running HydroView-Flask using {config} settings".format(config=os.environ['HYDROVIEW_CONFIG']))
def cassandra_connect():
global cluster, session
log.info("Initializing Cassandra cluster")
cluster = Cluster(app.config['HOSTS'], app.config['PORT'])
session = cluster.connect(app.config['KEYSPACE'])
session.row_factory = dict_factory
session.default_consistency_level=4
log.debug(session.default_consistency_level)
cluster.register_user_type(app.config['KEYSPACE'], 'averages', Averages)
cluster.register_user_type(app.config['KEYSPACE'], 'description', Description)
cluster.register_user_type(app.config['KEYSPACE'], 'name', Name)
cluster.register_user_type(app.config['KEYSPACE'], 'position', Position)
cluster.register_user_type(app.config['KEYSPACE'], 'thumbnails', Thumbnails)
return "Done"
def cassandra_disconnect():
log.info("Disconnecting from Cassandra cluster")
if session is not None:
session.shutdown()
if cluster is not None:
cluster.shutdown()
try:
from uwsgidecorators import postfork
import uwsgi
except ImportError:
# Not in a uWSGI context.
done = cassandra_connect()
from app import views
else:
@postfork
def cassandra_uwsgi_init():
if session is not None:
session.shutdown()
if cluster is not None:
cluster.shutdown()
done = cassandra_connect()
from app import views
uwsgi.atexit = cassandra_disconnect
|
class Vector:
def __init__(self, x, y, z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def _sqrt(self, x):
if x < 0:
raise ValueError('x < 0')
return x**0.5
def __add__(self, vec):
return Vector(self.x+vec.x, self.y+vec.y, self.z+vec.z)
def __iadd__(self, vec):
self.__add__(vec)
def __sub__(self, vec):
return Vector(self.x-vec.x, self.y-vec.y, self.z-vec.z)
def __isub__(self, vec):
self.__sub__(vec)
def __neg__(self):
return Vector(-self.x, -self.y, -self.z)
def __eq__(self, vec):
return self.x == vec.x and self.y == vec.y and self.z == vec.z
def __mul__(self, fl):
return Vector(self.x * fl, self.y * fl, self.z * fl)
def __imul__(self, fl):
return self.__mul__(fl)
def lenght(self):
return self._sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2)
def tuple(self):
return (self.x, self.y, self.z)
def normalize(self):
lenght = self.lenght()
if lenght == 0:
return self
lenght = 1/lenght
return Vector( self.x * lenght, self.y * lenght, self.z * lenght )
def __repr__(self):
return '({}, {}, {})'.format(self.x, self.y, self.z)
def __str__(self):
return self.__repr__()
def UTIL_ClampVectorToBox(vecInput, clampSize):
sourceVector = Vector(vecInput.x, vecInput.y, vecInput.z);
if( sourceVector.x > clampSize.x ):
sourceVector.x -= clampSize.x;
elif( sourceVector.x < -clampSize.x ):
sourceVector.x += clampSize.x;
else:
sourceVector.x = 0;
if( sourceVector.y > clampSize.y ):
sourceVector.y -= clampSize.y;
elif( sourceVector.y < -clampSize.y ):
sourceVector.y += clampSize.y;
else:
sourceVector.y = 0;
if( sourceVector.z > clampSize.z ):
sourceVector.z -= clampSize.z;
elif( sourceVector.z < -clampSize.z ):
sourceVector.z += clampSize.z;
else:
sourceVector.z = 0;
return sourceVector.normalize();
SET_GLOBAL('Vector', Vector)
SET_GLOBAL('UTIL_ClampVectorToBox', UTIL_ClampVectorToBox)
SET_GLOBAL('DotProduct', lambda a, b: a.x * b.x + a.y * b.y + a.z * b.z)
|
# -*- coding: utf-8 -*-
"""
Drift game server management - S3 Functionality
------------------------------------------------
"""
import os
import os.path
from zipfile import ZipFile
import subprocess
import shutil
import time
import socket
import json
from functools import wraps
import sys
import random
import datetime
import boto
import boto.ec2
from boto.s3 import connect_to_region
from boto.s3.connection import OrdinaryCallingFormat
import requests
import config
from logsetup import logger
import dateutil.parser as parser
# This is the S3 bucket name for server builds:
bucket_name = "ncl-teamcity"
def sync_index():
path = config.BUILD_PATH
bucket_name = config.BUILD_BUCKET
file_path = "{path}/index.json".format(path=path)
folder = "config/{path}/".format(path=path)
logger.info("Downloading index.json for %s in %s to %s...", file_path, bucket_name, folder)
try:
conn = connect_to_region(config.S3_REGION_NAME, calling_format=OrdinaryCallingFormat())
except Exception as e:
logger.exception("Fatal error! Could not connect to S3 region '%s': %s", config.S3_REGION_NAME, e)
sys.exit(2)
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(file_path)
if key is None:
logger.error("Index file '%s' not found on S3" % file_path)
sys.exit(1)
contents = key.get_contents_as_string()
try:
os.makedirs(folder)
except:
pass
local_filename = os.path.join(folder, "index.json")
with open(local_filename, "wb") as f:
f.write(contents)
d = json.loads(contents)
for entry in d["refs"]:
path = entry["build_manifest"]
key = bucket.get_key(path)
if key is None:
logger.error("File '%s' not found on S3" % path)
sys.exit(1)
contents = key.get_contents_as_string()
local_filename = os.path.join(folder, path.split("/")[-1])
with open(local_filename, "wb") as f:
f.write(contents)
def get_manifest(ref):
index_file = get_index()
try:
refitem = [refitem for refitem in index_file["refs"] if refitem["ref"] == ref and refitem["target_platform"] == "WindowsServer"][0]
except IndexError:
logger.warning("Ref '%s' not found in index file", ref)
return None
path = refitem["build_manifest"]
folder = "config/{repo}/".format(repo=config.BUILD_PATH)
local_filename = os.path.join(folder, path.split("/")[-1])
cnt = 0
while 1:
try:
with open(local_filename, "r") as f:
manifest = json.load(f)
break
except Exception as e:
cnt += 1
if cnt < 10:
logger.info("Cannot get manifest from file. Retrying...")
time.sleep(1.0)
else:
logger.error("Unable to get manifest from file '%s'. %s", local_filename, e)
return manifest
def get_index():
folder = "config/{repo}/".format(repo=config.BUILD_PATH)
local_filename = os.path.join(folder, "index.json")
logger.debug("Loading index from '%s'", local_filename)
if not os.path.exists(local_filename):
raise RuntimeError("Repository has not been synced")
return json.load(open(local_filename))
def is_build_installed(build_name, executable_path):
build_path = os.path.join(config.BSD_BATTLESERVER_FOLDER, build_name)
executable_path = os.path.join(build_path, executable_path)
if os.path.exists(executable_path):
logger.debug("Build '%s' is installed", build_name)
return True
else:
logger.info("Build '%s' is not installed", build_name)
if os.path.exists("build_path"):
logger.warning("Folder '%s exists but no .exe found!" % build_path)
return False
def download_build(filename, ignore_if_exists=False):
logger.info("Downloading build %s...", filename)
bucket_name = config.BUILD_BUCKET
conn = connect_to_region(config.S3_REGION_NAME, calling_format=OrdinaryCallingFormat())
bucket = conn.get_bucket(bucket_name)
path = filename#"ue4-builds/{repo}/{filename}".format(repo=repository, filename=filename)
head, tail = os.path.split(path)
dest_path = os.path.abspath(os.path.join(config.BSD_TEMP_FOLDER, tail))
if os.path.exists(dest_path):
if ignore_if_exists:
return dest_path
else:
os.remove(dest_path)
key = bucket.get_key(path)
if not key:
raise RuntimeError("Build '%s' not found on S3" % path)
# Prepare destination folder and file.
if not os.path.exists(config.BSD_TEMP_FOLDER):
os.makedirs(config.BSD_TEMP_FOLDER)
def cb(num_bytes, total):
logger.debug("{:,} bytes of {:,} downloaded".format(num_bytes, total))
with open(dest_path + ".tmp", "wb") as fp:
key.get_file(fp=fp, cb=cb, num_cb=100)
os.rename(dest_path + ".tmp", dest_path)
return dest_path
def cleanup_s3(repository):
"""
Slapped together to clean up old unused builds on S3
"""
MAX_DAYS = 30
bucket_name = config.BUILD_BUCKET
path = "ue4-builds/{path}/WindowsServer/".format(path=config.BUILD_PATH) #! WindowsServer hardcoded
index = get_index()
conn = connect_to_region(config.S3_REGION_NAME, calling_format=OrdinaryCallingFormat())
bucket = conn.get_bucket(bucket_name)
now = datetime.datetime.utcnow()
files = []
for f in bucket.list(prefix=path, delimiter="/"):
dt = parser.parse(f.last_modified).replace(tzinfo=None)
diff = now - dt
filename = f.name.split("/")[-1]
build_number = filename.split(".")[-2]
if diff.days > MAX_DAYS:
for entry in index["refs"]:
if "."+build_number+"." in entry["build_manifest"]:
break
else:
files.append((filename, diff.days, f.name, build_number, dt))
print "Deleting build %s from %s..." % (filename, dt)
f.delete()
files.sort(key=lambda x: x[1], reverse=True)
print "Deleted %s files from S3" % len(files)
|
from django.contrib.auth import views as auth_views
from django.urls import path
from .views import register, profile
urlpatterns = [
path("register/", register, name="register"),
path("profile/", profile, name="profile"),
path(
"login/",
auth_views.LoginView.as_view(template_name="users/users_login.html"),
name="login",
),
path(
"logout/",
auth_views.LogoutView.as_view(template_name="users/users_logout.html"),
name="logout",
),
]
|
# Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
import math
import os
import pynvml
pynvml.nvmlInit()
def systemGetDriverVersion():
r"""Get Driver Version"""
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
r"""Get number of devices"""
return pynvml.nvmlDeviceGetCount()
class device(object):
r"""Device used for nvml."""
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def getName(self):
r"""Get obect name"""
return pynvml.nvmlDeviceGetName(self.handle)
def getCpuAffinity(self):
r"""Get CPU affinity"""
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, device._nvml_affinity_elements):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
return [i for i, e in enumerate(affinity_list) if e != 0]
def set_affinity(gpu_id=None):
r"""Set GPU affinity
Args:
gpu_id (int): Which gpu device.
"""
if gpu_id is None:
gpu_id = int(os.getenv('LOCAL_RANK', 0))
dev = device(gpu_id)
os.sched_setaffinity(0, dev.getCpuAffinity())
# list of ints
# representing the logical cores this process is now affinitied with
return os.sched_getaffinity(0)
|
import sys
import numbers
try:
print("read file: \t" + str(sys.argv[1]))
print("write file: \t" + str(sys.argv[2]))
print("reference file: \t" + str(sys.argv[3]))
filenamer = str(sys.argv[1])
filenamew = str(sys.argv[2])
filenameref = str(sys.argv[3])
fr = open(filenamer, 'r')
fref = open(filenameref, 'r')
fw = open(filenamew, 'a')
except IndexError:
print("Error: no Filename")
sys.exit(2)
#experiment file parsing
val_line=[]
inside=False
element = {}
for line in fr:
if (not line.find("==========V=============") and inside):
inside=False
val_line.append(element)
element = {}
if inside:
tmp = line.split()
element[tmp[1]] = float(tmp[0])
#element.append(line.split()[0])
#for s in line.split():
#try:
#if isinstance(float(s), numbers.Real):
# if s.is_digit():
#element.append(s)
#except ValueError: count = count
if (not line.find("==========VVV=============") and not inside):
inside=True
#REF_file inspection
ref_value = {}
inside=True
inside=False
for line in fref:
if (not line.find("==========V=============") and inside):
inside=False
if inside:
tmp = line.split()
ref_value[tmp[1]] = float(tmp[0])
if (not line.find("==========VVV=============") and not inside):
inside=True
#PRECISION-ARE-AAE
f_prec=[]
f_are=[]
f_aae=[]
for trial in val_line:
trial_keys = trial.keys()
ref_keys = ref_value.keys()
ref_k_value = sorted(ref_value, key=ref_value.get, reverse=True)[0:len(trial)]
intersection_rr = [value for value in ref_k_value if value not in trial_keys]
#print(len(trial), len(intersection_rr), len(val_line))
print("\n--------")
print("Printing the first 5 elements not captured in the real first top-"+str(len(trial))+" ("+str(len(trial))+","+ str(len(intersection_rr))+")")
print("--------")
print("[key \t exact_value \t index_in_topk]")
for value in intersection_rr[0:5]:
index =0
pos= 0
for kvalue in ref_k_value:
if kvalue==value: pos=index
index +=1
print(str(value) + "\t" + str(ref_value[value]) + "\t" + str(pos))
#print(str(ref_k_value))
corrected_keys = 0
keys_counted = 0
relative_error = 0
absolute_error = 0
for key_item in trial_keys:
if key_item in ref_k_value:
corrected_keys = corrected_keys +1
if key_item in ref_keys:
keys_counted = keys_counted + 1
relative_error = relative_error + float(abs(trial[key_item]-ref_value[key_item])/(ref_value[key_item]+0.0))
absolute_error = absolute_error + abs(trial[key_item]-ref_value[key_item])
#print(float(corrected_keys),(len(trial)+0.0))
f_prec.append(float(corrected_keys/(len(trial)+0.0)))
f_are.append(relative_error/keys_counted)
f_aae.append(absolute_error/keys_counted)
print("\n--------")
print("Summary:")
print("--------")
print("Precision: "+str(f_prec))
print("Relative error: "+str(f_are))
print("Absolute error: "+ str(f_aae))
for x,y,k in zip(f_prec, f_are, f_aae):
str_tow = str(x) + " " + str(y) + " " + str(k) + " " + filenamer
#print(str(str_tow))
fw.write(str(str_tow) + '\n')
fr.close
fref.close
fw.close
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from urllib.parse import urlparse
from flask_wtf.csrf import CSRFProtect
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mariadb://root:password@localhost:3306/antilupa'
app.config['SECRET_KEY'] = 'ec9439cfc6c796ae2029594d'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = "login_page"
login_manager.login_message_category = "info"
csrf = CSRFProtect()
csrf.init_app(app)
def fetch_domain(url):
return urlparse(url).netloc
app.jinja_env.globals.update(fetch_domain=fetch_domain)
emojisource = "all logos <a href=https://www.freepik.com/vectors/heart>Heart vector created by rawpixel.com - www.freepik.com</a>"
from antilupa import routes
|
# Generated by Django 2.2.5 on 2019-11-19 14:57
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("workflow_handler", "0002_auto_20191119_1457"),
]
operations = [
migrations.AlterField(
model_name="task",
name="completed",
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name="task",
name="completed_by",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="user_handler.User",
),
),
]
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
Test against issue #16.
<https://github.com/heuer/segno/issues/16>
"""
from __future__ import unicode_literals, absolute_import
import segno
def test_boost_error_automatic():
qr = segno.make_qr('ABCDEF')
assert '1-H' == qr.designator
def test_boost_error_automatic_disabled():
qr = segno.make_qr('ABCDEF', boost_error=False)
assert '1-L' == qr.designator
def test_boost_error_automatic_arg_error():
qr = segno.make_qr('ABCDEF', error='l')
assert '1-H' == qr.designator
def test_boost_error_disabled_arg_error():
qr = segno.make_qr('ABCDEF', error='l', boost_error=False)
assert '1-L' == qr.designator
def test_boost_error_m1():
qr = segno.make('01234')
assert qr.is_micro
assert 'M1' == qr.version
assert qr.error is None
def test_boost_error_micro():
qr = segno.make('A', error='l')
assert qr.is_micro
assert 'M2' == qr.version
assert 'M' == qr.error
def test_boost_error_micro_boost_disabled():
qr = segno.make('A', error='l', boost_error=False)
assert qr.is_micro
assert 'M2' == qr.version
assert 'L' == qr.error
def test_boost_error_m3():
qr = segno.make('A', error='l', version='M3')
assert qr.is_micro
assert 'M3' == qr.version
assert 'M' == qr.error
def test_boost_error_m3_boost_disabled():
qr = segno.make('A', error='l', version='M3', boost_error=False)
assert qr.is_micro
assert 'M3' == qr.version
assert 'L' == qr.error
def test_boost_error_m4():
qr = segno.make('A', error='l', version='M4')
assert qr.is_micro
assert 'M4' == qr.version
assert 'Q' == qr.error
def test_boost_error_m4_boost_disabled():
qr = segno.make('A', error='l', version='M4', boost_error=False)
assert qr.is_micro
assert 'M4' == qr.version
assert 'L' == qr.error
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
"""
Know What You Don’t Know: Unanswerable Questions for SQuAD
https://arxiv.org/pdf/1806.03822.pdf
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset,
consisting of questions posed by crowdworkers on a set of Wikipedia articles,
where the answer to every question is a segment of text, or span, from the
corresponding reading passage, or the question might be unanswerable.
SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable
questions written adversarially by crowdworkers to look similar to answerable ones.
To do well on SQuAD2.0, systems must not only answer questions when possible, but
also determine when no answer is supported by the paragraph and abstain from answering.
Homepage: https://rajpurkar.github.io/SQuAD-explorer/
"""
import datasets
from math import exp
from lm_eval.base import rf
from lm_eval.metrics import f1_score, mean
from . common import HFTask
from functools import partial
from packaging import version
_CITATION = """
@misc{rajpurkar2018know,
title={Know What You Don't Know: Unanswerable Questions for SQuAD},
author={Pranav Rajpurkar and Robin Jia and Percy Liang},
year={2018},
eprint={1806.03822},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
def _squad_metric(predictions, references):
squad_metric = datasets.load_metric("squad_v2")
return squad_metric.compute(predictions=predictions, references=references)
def _squad_agg(key, items):
predictions, references = zip(*items)
return _squad_metric(predictions=predictions, references=references)[key]
class SQuAD2(HFTask):
VERSION = 1
DATASET_PATH = "squad_v2"
DATASET_NAME = None
# HF changed squad on us so we have to make sure we aren't running the old one
assert version.parse(datasets.__version__) >= version.parse("1.11.0"), "datasets v1.11.0 or later required for SQuAD"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
return self.data["train"]
def validation_docs(self):
return self.data["validation"]
def doc_to_text(self, doc):
return 'Title: ' + doc['title'] + '\n\n' + 'Background: ' + doc['context'] + '\n\n' + 'Question: ' + doc['question'] + '\n\n' + 'Answer:'
def doc_to_target(self, doc):
answer_list = doc['answers']['text']
if len(answer_list) > 0:
answer = answer_list[0]
else:
answer = 'unanswerable'
return " " + answer
def construct_requests(self, doc, ctx):
""" Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
continuation = rf.greedy_until(ctx, ['\n'])
is_unanswerable = rf.loglikelihood(ctx, " " + "unanswerable")
return continuation, is_unanswerable
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
continuation, (logprob_unanswerable, _) = results
no_answer_probability = exp(logprob_unanswerable)
predictions = {
'id': doc['id'],
'prediction_text': continuation,
'no_answer_probability': no_answer_probability,
}
references = {
'id': doc['id'],
'answers': doc['answers'],
}
return {
'exact': (predictions, references), # Exact match (the normalized answer exactly match the gold answer)
'f1': (predictions, references), # The F-score of predicted tokens versus the gold answer
'HasAns_exact': (predictions, references), # Exact match (the normalized answer exactly match the gold answer)
'HasAns_f1': (predictions, references), # The F-score of predicted tokens versus the gold answer
'NoAns_exact': (predictions, references), # Exact match (the normalized answer exactly match the gold answer)
'NoAns_f1': (predictions, references), # The F-score of predicted tokens versus the gold answer
'best_exact': (predictions, references), # Best exact match (with varying threshold)
'best_f1': (predictions, references), # Best F1 (with varying threshold)
}
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
return {
'exact': partial(_squad_agg, 'exact'), # Exact match (the normalized answer exactly match the gold answer)
'f1': partial(_squad_agg, 'f1'), # The F-score of predicted tokens versus the gold answer
'HasAns_exact': partial(_squad_agg, 'HasAns_exact'), # Exact match (the normalized answer exactly match the gold answer)
'HasAns_f1': partial(_squad_agg, 'HasAns_f1'), # The F-score of predicted tokens versus the gold answer
'NoAns_exact': partial(_squad_agg, 'NoAns_exact'), # Exact match (the normalized answer exactly match the gold answer)
'NoAns_f1': partial(_squad_agg, 'NoAns_f1'), # The F-score of predicted tokens versus the gold answer
'best_exact': partial(_squad_agg, 'best_exact'), # Best exact match (with varying threshold)
'best_f1': partial(_squad_agg, 'best_f1'), # Best F1 (with varying threshold)
}
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
return {
'exact': True, # Exact match (the normalized answer exactly match the gold answer)
'f1': True, # The F-score of predicted tokens versus the gold answer
'HasAns_exact': True, # Exact match (the normalized answer exactly match the gold answer)
'HasAns_f1': True, # The F-score of predicted tokens versus the gold answer
'NoAns_exact': True, # Exact match (the normalized answer exactly match the gold answer)
'NoAns_f1': True, # The F-score of predicted tokens versus the gold answer
'best_exact': True, # Best exact match (with varying threshold)
'best_f1': True, # Best F1 (with varying threshold)
}
|
ImporteAPagar = float(input("Cuanto pagas?"))
importeCoste = float(input("Cuanto cuesta?"))
importeDevolucion = ImporteAPagar-importeCoste
# print(importe)
# importes de los billetes y monedas con su tipo en singular
tipos = (
(500, "billete"),
(200, "billete"),
(100, "billete"),
(50, "billete"),
(20, "billete"),
(10, "billete"),
(5, "billete"),
(2, "moneda"),
(1, "moneda")
)
centimos = (
(0.5, "moneda"),
(0.2, "moneda"),
(0.1, "moneda"),
(0.05, "moneda"),
(0.02, "moneda"),
(0.01, "moneda")
)
while importeDevolucion>0.4:
if importeDevolucion >= 1:
for tipo in tipos:
valor = tipo[0]
descripcion = tipo[1]
# funcion para mostrar la s del plural si es necesario
def s(valor, text): return valor > 1 and text+"s" or text
if importeDevolucion/valor > 1:
# la doble barra es para redonderar en la division, seria lo mismo que seprar los decimales
print(int(importeDevolucion / valor), s((importeDevolucion / valor), descripcion), valor)
# la doble barra es para redonderar en la division, seria lo mismo que seprar los decimales
#print((b / valor), s((importe / valor), descripcion), valor)
# la doble barra es para redonderar en la division, seria lo mismo que seprar los decimales
print((importeDevolucion // valor), s((importeDevolucion / valor), descripcion), valor)
# cogemos el resto de la division
importeDevolucion = importeDevolucion % valor
print(importeDevolucion)
else:
print("el importe restante es: ", importeDevolucion)
for centimo in centimos:
valor = centimo[0]
descripcion = centimo[1]
# funcion para mostrar la s del plural si es necesario
def s(valor, text): return valor > 1 and text+"s" or text
if importeDevolucion/valor > 1:
b = abs(importeDevolucion) - abs(int(importeDevolucion)) # Parte decimal
print((b / valor), s((b / valor), descripcion), valor)
# cogemos el resto de la division
importeDevolucion = b % valor
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2014 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
#Name: RemapIDsOnServices.py
#
#Purpose: Replace portal ids stored on ArcGIS Server services with new ids
#
#Prerequisites: - ArcGIS Service must have already been published.
# - Portal items must have already been published.
#
#==============================================================================
import sys, os, traceback, datetime, ast, copy, json, time
import urlparse
from portalpy import Portal
# Add "Root folder"\SupportFiles to sys path inorder to import
# modules in subfolder
sys.path.append(os.path.join(os.path.dirname(
os.path.dirname(os.path.dirname(sys.argv[0]))), "SupportFiles"))
from AGSRestFunctions import getServiceList
from AGSRestFunctions import getServiceInfo
from AGSRestFunctions import editServiceInfo
scriptName = sys.argv[0]
exitErrCode = 1
debug = False
sectionBreak = '=' * 175
sectionBreak1 = '-' * 175
doUpdateService = True
doDeleteItems = True
def check_args():
# ---------------------------------------------------------------------
# Check arguments
# ---------------------------------------------------------------------
if len(sys.argv) <> 6:
print '\n' + scriptName + ' <Server_FullyQualifiedDomainName> <Server_Port> <User_Name> <Password> <Use_SSL: Yes|No>'
print '\nWhere:'
print '\n\t<Server_FullyQualifiedDomainName> (required): the fully qualified domain name of the ArcGIS Server/Portal for ArcGIS machine.'
print '\n\t<Server_Port> (required): the port number of the ArcGIS Server (specify # if no port).'
print '\n\t<User_Name> (required): ArcGIS Server/Portal for ArcGIS site administrator.'
print '\n\t<Password> (required): Password for ArcGIS Server/Portal for ArcGIS site administrator user.'
print '\n\t<Use_SSL: Yes|No> (required) Flag indicating if ArcGIS Server requires HTTPS.'
return None
else:
# Set variables from parameter values
server = sys.argv[1]
port = sys.argv[2]
adminuser = sys.argv[3]
password = sys.argv[4]
useSSL = sys.argv[5]
if port.strip() == '#':
port = None
if useSSL.strip().lower() in ['yes', 'ye', 'y']:
useSSL = True
else:
useSSL = False
return server, port, adminuser, password, useSSL
def getPortalURLItems(portal):
url_items = None
# Get all portal items not owned by logged in portal user
items = portal.search(['id','type','url','title','owner'], q='-owner:"' + \
portal.logged_in_user()['username'] + '"')
if items:
url_items = {}
for item in items:
url = item.get('url')
if url:
# Remove http/s protocol from url
urlparts = urlparse.urlparse(url)
url_items[url.replace('{}://'.format(urlparts.scheme), '')] = item.get('id')
return url_items
def getServiceSearchString(service, servicePortalItem):
# 'Build' search string
replaceURLEndpointTypes = ['FeatureServer', 'NAServer', 'MobileServer', 'SchematicsServer']
servicePortalItemType = servicePortalItem['type']
serviceType = service.split('.')[1]
serviceSearchElements = service.replace('//', '/').replace('.', '/').split('/')
if serviceType <> servicePortalItemType:
if servicePortalItemType in replaceURLEndpointTypes:
# Replace last element
serviceSearchElements[-1:] = [servicePortalItemType]
else:
# Append portal type
serviceSearchElements.append(servicePortalItemType)
serviceSearchStr = '/'.join(serviceSearchElements)
return serviceSearchStr
def findPortalItemID(server, serviceSearchStr, url_items):
new_id = None
for item_url, item_id in url_items.iteritems():
if item_url.lower().startswith(server.lower()):
if item_url.lower().endswith(serviceSearchStr.lower()):
new_id = item_id
return new_id
def parseService(service):
# Parse folder and service nameType
folder = None
serviceNameType = None
parsedService = service.split('//')
if len(parsedService) == 1:
serviceNameType = parsedService[0]
else:
folder = parsedService[0]
serviceNameType = parsedService[1]
return folder, serviceNameType
def main():
totalSuccess = True
# -------------------------------------------------
# Check arguments
# -------------------------------------------------
results = check_args()
if not results:
sys.exit(exitErrCode)
server, port, adminuser, password, useSSL = results
if debug:
print server, port, adminuser, password, useSSL
print
print '=' * 100
print ' Remap portal ids stored within ArcGIS Server services'
print '=' * 100
print
try:
# -------------------------------------------------
# Get portal items with URLs
# -------------------------------------------------
if useSSL:
protocol = 'https'
else:
protocol = 'http'
# Create portal object
portal_address = '{}://{}:7443/arcgis'.format(protocol, server)
portal = Portal(portal_address, adminuser, password)
if not portal:
raise Exception('ERROR: Could not create "portal" object.')
print '\n- Retrieving portal item information from portal...'
portal_url_items = getPortalURLItems(portal)
if not portal_url_items:
raise Exception('ERROR: There are no URL portal items. Have you published the portal content?')
# -------------------------------------------------
# Get all services that exist on server
# -------------------------------------------------
print '\n- Retrieving list of ArcGIS Server services...'
allServices = getServiceList(server, port, adminuser, password)
# Remove certain services from collection
excludeServices = ['SampleWorldCities.MapServer']
services = [service for service in allServices if service not in excludeServices]
if len(services) == 0:
raise Exception('ERROR: There are no user published ArcGIS Server services. Have you published the ArcGIS Server services?')
# -------------------------------------------------
# Update portal item ids with service portal properties json
# -------------------------------------------------
portalItemIDsToDelete = []
print '\n- Remap portal ids on each ArcGIS Server service...\n'
totalNumIDsNotFound = 0
for service in services:
time.sleep(0.5)
print '\t' + ('-' * 75)
print '\tService: ' + service
folder, serviceNameType = parseService(service)
numIDsFoundForService = 0
# Get the service info
info = getServiceInfo(server, port, adminuser, password, folder, serviceNameType)
# Get the service portal properties json and update the item ids
print '\n\t- Retrieving information about associated portal items stored in the server JSON...'
servicePortalPropsOrig = info.get('portalProperties')
if not servicePortalPropsOrig:
raise Exception('ERROR: The service ' + service + ' does not ' +
'have any portal properties ("portalProperties" JSON key/value). ' +
'Did you federate the server?')
if servicePortalPropsOrig:
servicePortalProps = copy.deepcopy(servicePortalPropsOrig)
servicePortalItemsOrig = servicePortalProps.get('portalItems')
servicePortalItems = copy.deepcopy(servicePortalItemsOrig)
if not servicePortalItems:
totalSuccess = False
print '\n\t**** ERROR: this service does not have any associated portal items.'
continue
if servicePortalItems:
print '\n\t- Associated portal items...'
for servicePortalItem in servicePortalItems:
orig_id = servicePortalItem['itemID']
# Get service search string
serviceSearchStr = getServiceSearchString(service, servicePortalItem)
print '\n\t - ' + serviceSearchStr + ': original item id = ' + orig_id
# Get new portal item id
new_id = findPortalItemID(server, serviceSearchStr, portal_url_items)
if new_id:
if (new_id <> orig_id):
numIDsFoundForService = numIDsFoundForService + 1
servicePortalItem['itemID'] = new_id
portalItemIDsToDelete.append(orig_id)
print '\t\tFound new item id - ' + new_id
else:
print '\t\tItem IDs match, not processing.'
else:
print '\n\t**** WARNING: new item id not found.'
servicePortalProps['portalItems'] = servicePortalItems
info['portalProperties'] = servicePortalProps
if doUpdateService:
print '\n\n\t- Updating portal item information stored within service JSON (service will be restarted automatically)...'
if numIDsFoundForService == 0:
print '\n\t**** WARNING: there were no new ids found for this service so there is no need to update the service JSON info.'
continue
success, status = editServiceInfo(server, port, adminuser, password, folder, serviceNameType, info)
if success:
print '\t\tDone.'
else:
totalSuccess = False
print '**** ERROR: Update of service was not successful.'
print 'status: ' + str(status)
if doDeleteItems:
print
#print '=' * 100
print '\n\n-Deleting portal items that were remapped to original portal item...'
if len(portalItemIDsToDelete) == 0:
print '\n**** ERROR: No portal items to delete; which means there were no portal items '
print '\t owned by ' + portal.logged_in_user()['username'] + ' that were remapped to original portal item.\n'
# Get list of all portal ids so we can verify that the portal item exists before we delete
portal_items = portal.search(['id'])
time.sleep(5)
for portalItemID in portalItemIDsToDelete:
time.sleep(2)
itemFound = False
print ' -Deleting id ' + portalItemID + '...'
# Delete if item exists
for portal_item in portal_items:
if portal_item['id'] == portalItemID:
itemFound = True
results = portal.delete_item(portalItemID, portal.logged_in_user()['username'])
if results:
print '\tDone.'
else:
totalSuccess = False
print '**** ERROR: Deletion of service was not successful.'
if not itemFound:
print '\tItem ' + portalItemID + ' does not exist. Skipping...'
except:
totalSuccess = False
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
# Print Python error messages for use in Python / Python Window
print
print "***** ERROR ENCOUNTERED *****"
print pymsg + "\n"
finally:
print
print
if totalSuccess:
print "Remap of portal item ids on services was completed successfully."
sys.exit(0)
else:
print "ERROR: Remap of portal item ids on services was _NOT_ completed successfully."
sys.exit(1)
if __name__ == "__main__":
main()
|
import os
import unittest
from supervised.tuner.mljar_tuner import MljarTuner
class TunerTest(unittest.TestCase):
def test_key_params(self):
params1 = {
"preprocessing": {"p1": 1, "p2": 2},
"learner": {"p1": 1, "p2": 2},
"validation_strategy": {},
}
params2 = {
"preprocessing": {"p1": 1, "p2": 2},
"learner": {"p2": 2, "p1": 1},
"validation_strategy": {},
}
key1 = MljarTuner.get_params_key(params1)
key2 = MljarTuner.get_params_key(params2)
self.assertEqual(key1, key2)
|
"""Representation base class."""
from abc import ABC, abstractmethod
from copy import deepcopy
import logging
import numpy as np
from rlpy.tools import bin2state, closestDiscretization, hasFunction, id2vec, vec2id
import scipy.sparse as sp
from .value_learner import ValueLearner
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class Hashable(ABC):
"""
A mix-in class for hashable represenation
"""
@abstractmethod
def state_hash(self, s):
"""
Returns a hash value of the state
"""
pass
class Enumerable(Hashable, ABC):
"""
A mix-in class for enumerable represenation
"""
@abstractmethod
def state_id(self, s):
"""
Returns a 0-indexed state id corresponding to the state.
"""
pass
def state_hash(self, s):
return self.state_id(s)
class Representation(ValueLearner, ABC):
"""
The Representation is the :py:class:`~rlpy.agents.agent.Agent`'s model of the
value function associated with a :py:class:`~rlpy.domains.domain.Domain`.
As the Agent interacts with the Domain, it receives updates in the form of
state, action, reward, next state, next action. \n
The Agent passes these quantities to its Representation, which is
responsible for maintaining the value function usually in some
lower-dimensional feature space.
agents can later query the Representation for the value of being in a state
*V(s)* or the value of taking an action in a particular state
( known as the Q-function, *Q(s,a)* ).
.. note::
Throughout the framework, ``phi`` refers to the vector of features;
``phi`` or ``phi_s`` is thus the vector of feature functions evaluated
at the state *s*. phi_s_a appends ``|A| - 1`` copies of ``phi_s``, such
that ``|phi_s_a| = |A| * |phi|``, where ``|A| is the size of the action
space and ``phi`` is the number of features. Each of these blocks
corresponds to a state-action pair; all blocks except for the selected
action ``a`` are set to 0.
The Representation class is a base class that provides the basic framework
for all representations. It provides the methods and attributes
that allow child classes to interact with the Agent and Domain classes
within the RLPy library. \n
All new representation implementations should inherit from this class.
.. note::
At present, it is assumed that the Linear Function approximator
family of representations is being used.
"""
#: True if the number of features may change during execution.
IS_DYNAMIC = False
def __init__(self, domain, features_num, seed=1, discretization=20):
"""
:param domain: the problem :py:class:`~rlpy.domains.domain.Domain` to learn.
:param features: Number of features in the representation.
:param discretization: Number of bins used for each continuous dimension.
For discrete dimensions, this parameter is ignored.
"""
super().__init__(domain.num_actions, features_num)
# A dictionary used to cache expected results of step().
# Used for planning algorithms
self.expected_step_cached = {}
self.set_bins_per_dim(domain, discretization)
self.domain = domain
self.state_space_dims = domain.state_space_dims
self.discretization = discretization
#: Number of aggregated states based on the discretization.
#: If the represenation is adaptive, set to the best resolution possible
self.num_states_total = np.prod(self.bins_per_dim.astype(np.uint64))
self.logger = logging.getLogger(
"rlpy.representations." + self.__class__.__name__
)
self.random_state = np.random.RandomState(seed=seed)
def set_seed(self, seed):
"""
Set the random seed.
Any stochastic behavior in __init__() is broken out into this function
so that if the random seed is later changed (eg, by the Experiment),
other member variables and functions are updated accordingly.
"""
self.random_state.seed(seed)
def V(self, s, terminal, p_actions, phi_s=None):
if phi_s is None:
phi_s = self.phi(s, terminal)
return super().V(s, terminal, p_actions, phi_s)
def Qs(self, s, terminal, phi_s=None):
if phi_s is None:
phi_s = self.phi(s, terminal)
return super().Qs(s, terminal, phi_s)
def Q(self, s, terminal, a, phi_s=None):
""" Returns the learned value of a state-action pair, *Q(s,a)*.
:param s: The queried state in the state-action pair.
:param terminal: Whether or not *s* is a terminal state
:param a: The queried action in the state-action pair.
:param phi_s: (optional) The feature vector evaluated at state s.
If the feature vector phi(s) has already been cached,
pass it here as input so that it need not be computed again.
:return: (float) the value of the state-action pair (s,a), Q(s,a).
"""
if len(self.weight_vec) > 0:
phi_sa, i, j = self.phi_sa(s, terminal, a, phi_s, snippet=True)
return np.dot(phi_sa, self.weight_vec[i:j])
else:
return 0.0
def phi(self, s, terminal):
"""
Returns :py:meth:`~rlpy.representations.representation.phi_non_terminal`
for a given representation, or a zero feature vector in a terminal state.
:param s: The state for which to compute the feature vector
:return: numpy array, the feature vector evaluted at state *s*.
.. note::
If state *s* is terminal the feature vector is returned as zeros!
This prevents the learning algorithm from wrongfully associating
the end of one episode with the start of the next (e.g., thinking
that reaching the terminal state causes it to teleport back to the
start state s0).
"""
if terminal or self.features_num == 0:
return np.zeros(self.features_num, "bool")
else:
return self.phi_non_terminal(s)
def phi_sa(self, s, terminal, a, phi_s=None, snippet=False):
"""
Returns the feature vector corresponding to a state-action pair.
We use the copy paste technique (Lagoudakis & Parr 2003).
Essentially, we append the phi(s) vector to itself *|A|* times, where
*|A|* is the size of the action space.
We zero the feature values of all of these blocks except the one
corresponding to the actionID *a*.
When ``snippet == False`` we construct and return the full, sparse phi_sa.
When ``snippet == True``, we return the tuple (phi_s, index1, index2)
where index1 and index2 are the indices defining the ends of the phi_s
block which WOULD be nonzero if we were to construct the full phi_sa.
:param s: The queried state in the state-action pair.
:param terminal: Whether or not *s* is a terminal state
:param a: The queried action in the state-action pair.
:param phi_s: (optional) The feature vector evaluated at state s.
If the feature vector phi(s) has already been cached,
pass it here as input so that it need not be computed again.
:param snippet: if ``True``, do not return a single phi_sa vector,
but instead a tuple of the components needed to create it.
See return value below.
:return: If ``snippet==False``, return the enormous phi_sa vector
constructed by the copy-paste method.
If ``snippet==True``, do not construct phi_sa, only return
a tuple (phi_s, index1, index2) as described above.
"""
if phi_s is None:
phi_s = self.phi(s, terminal)
if snippet is True:
return phi_s, a * self.features_num, (a + 1) * self.features_num
phi_sa = np.zeros((self.num_actions, self.features_num), dtype=phi_s.dtype)
if self.features_num == 0:
return phi_sa.reshape(-1)
phi_sa[a] = phi_s
return phi_sa.reshape(-1)
def _hash_state(self, s):
"""
Returns a unique id for a given state.
Essentially, enumerate all possible states and return the ID associated
with *s*.
Under the hood: first, discretize continuous dimensions into bins
as necessary. Then map the binstate to an integer.
"""
ds = self.bin_state(s)
return vec2id(ds, self.bins_per_dim)
def set_bins_per_dim(self, domain, discretization):
"""
Set the number of bins for each dimension of the domain.
Continuous spaces will be slices using the ``discretization`` parameter.
:param domain: the problem :py:class:`~rlpy.domains.domain.Domain` to learn
:param discretization: The number of bins a continuous
domain should be sliced into.
"""
#: Number of possible states per dimension [1-by-dim]
self.bins_per_dim = np.zeros(domain.state_space_dims, np.uint16)
#: Width of bins in each dimension
self.binwidth_per_dim = np.zeros(domain.state_space_dims)
statespace_width = domain.statespace_width
for d in range(domain.state_space_dims):
if d in domain.continuous_dims:
self.bins_per_dim[d] = discretization
else:
self.bins_per_dim[d] = statespace_width[d]
self.binwidth_per_dim[d] = statespace_width[d] / self.bins_per_dim[d]
def bin_state(self, s):
"""
Returns a vector where each element is the zero-indexed bin number
corresponding with the given state.
(See :py:meth:`~rlpy.representations.representation._hash_state`)
Note that this vector will have the same dimensionality as *s*.
(Note: This method is binary compact; the negative case of binary features is
excluded from feature activation.
For example, if the domain has a light and the light is off, no feature
will be added. This is because the very *absence* of the feature
itself corresponds to the light being off.
"""
s = np.atleast_1d(s)
limits = self.domain.statespace_limits
assert np.all(s >= limits[:, 0])
assert np.all(s <= limits[:, 1])
width = limits[:, 1] - limits[:, 0]
diff = s - limits[:, 0]
bs = (diff * self.bins_per_dim / width).astype("uint32")
m = bs == self.bins_per_dim
bs[m] = self.bins_per_dim[m] - 1
return bs
def pre_discover(self, s, terminal, a, sn, terminaln):
"""
Identifies and adds ("discovers") new features for this adaptive
representation BEFORE having obtained the TD-Error.
For example, see :py:class:`~rlpy.representations.IncrementalTabular`.
In that class, a new feature is added anytime a novel state is observed.
.. note::
For adaptive representations that require access to TD-Error to
determine which features to add next,
use :py:meth:`~rlpy.representations.representation.post_discover`
instead.
:param s: The state
:param terminal: boolean, whether or not *s* is a terminal state.
:param a: The action
:param sn: The next state
:param terminaln: boolean, whether or not *sn* is a terminal state.
:return: The number of new features added to the representation
"""
return 0
def post_discover(self, s, terminal, a, td_error, phi_s):
"""
Identifies and adds ("discovers") new features for this adaptive
representation AFTER having obtained the TD-Error.
For example, see :py:class:`~rlpy.representations.ifdd.iFDD`.
In that class, a new feature is added based on regions of high TD-Error.
.. note::
For adaptive representations that do not require access to TD-Error
to determine which features to add next, you may
use :py:meth:`~rlpy.representations.representation.pre_discover`
instead.
:param s: The state
:param terminal: boolean, whether or not *s* is a terminal state.
:param a: The action
:param td_error: The temporal difference error at this transition.
:param phi_s: The feature vector evaluated at state *s*.
:return: The number of new features added to the representation
"""
return 0
def best_action(self, s, terminal, p_actions, phi_s=None):
"""
Returns the best action at a given state.
If there are multiple best actions, this method selects one of them
uniformly randomly.
If *phi_s* [the feature vector at state *s*] is given, it is used to
speed up code by preventing re-computation within this function.
See :py:meth:`~rlpy.representations.representation.best_actions`
:param s: The given state
:param terminal: Whether or not the state *s* is a terminal one.
:param phi_s: (optional) the feature vector at state (s).
:return: The best action at the given state.
"""
bestA = self.best_actions(s, terminal, p_actions, phi_s)
if isinstance(bestA, int):
return bestA
elif len(bestA) > 1:
return self.random_state.choice(bestA)
# return bestA[0]
else:
return bestA[0]
@abstractmethod
def phi_non_terminal(self, s):
""" *Abstract Method* \n
Returns the feature vector evaluated at state *s* for non-terminal
states; see
function :py:meth:`~rlpy.representations.representation.phi`
for the general case.
:param s: The given state
:return: The feature vector evaluated at state *s*.
"""
pass
def activeInitialFeatures(self, s):
"""
Returns the index of active initial features based on bins in each
dimension.
:param s: The state
:return: The active initial features of this representation
(before expansion)
"""
bs = self.bin_state(s)
shifts = np.hstack((0, np.cumsum(self.bins_per_dim)[:-1]))
index = bs + shifts
return index.astype("uint32")
def batch_phi_sa(self, all_phi_s, all_actions, use_sparse=False):
"""
Builds the feature vector for a series of state-action pairs (s,a)
using the copy-paste method.
.. note::
See :py:meth:`~rlpy.representations.representation.phi_sa`
for more information.
:param all_phi_s: The feature vectors evaluated at a series of states.
Has dimension *p* x *n*, where *p* is the number of states
(indexed by row), and *n* is the number of features.
:param all_actions: The set of actions corresponding to each feature.
Dimension *p* x *1*, where *p* is the number of states included
in this batch.
:param use_sparse: Determines whether or not to use sparse matrix
libraries provided with numpy.
:return: all_phi_s_a (of dimension p x (s_a) )
"""
p, n = all_phi_s.shape
a_num = self.num_actions
if use_sparse:
phi_s_a = sp.lil_matrix((p, n * a_num), dtype=all_phi_s.dtype)
else:
phi_s_a = np.zeros((p, n * a_num), dtype=all_phi_s.dtype)
for i in range(a_num):
rows = np.where(all_actions == i)[0]
if len(rows):
phi_s_a[rows, i * n : (i + 1) * n] = all_phi_s[rows, :]
return phi_s_a
def batch_best_action(self, all_s, all_phi_s, action_mask=None, use_sparse=True):
"""
Accepts a batch of states, returns the best action associated with each.
.. note::
See :py:meth:`~rlpy.representations.representation.best_action`
:param all_s: An array of all the states to consider.
:param all_phi_s: The feature vectors evaluated at a series of states.
Has dimension *p* x *n*, where *p* is the number of states
(indexed by row), and *n* is the number of features.
:param action_mask: (optional) a *p* x *|A|* mask on the possible
actions to consider, where *|A|* is the size of the action space.
The mask is a binary 2-d array, where 1 indicates an active mask
(action is unavailable) while 0 indicates a possible action.
:param useSparse: Determines whether or not to use sparse matrix
libraries provided with numpy.
:return: An array of the best action associated with each state.
"""
p, n = all_phi_s.shape
a_num = self.num_actions
if action_mask is None:
action_mask = np.ones((p, a_num))
for i, s in enumerate(all_s):
action_mask[i, self.domain.possible_actions(s)] = 0
a_num = self.num_actions
if use_sparse:
# all_phi_s_a will be ap-by-an
all_phi_s_a = sp.kron(np.eye(a_num, a_num), all_phi_s)
all_q_s_a = all_phi_s_a * self.weight.reshape(-1, 1)
else:
# all_phi_s_a will be ap-by-an
all_phi_s_a = np.kron(np.eye(a_num, a_num), all_phi_s)
all_q_s_a = np.dot(all_phi_s_a, self.weight.reshape(-1, 1))
all_q_s_a = all_q_s_a.reshape((a_num, -1)).T # a-by-p
all_q_s_a = np.ma.masked_array(all_q_s_a, mask=action_mask)
best_action = np.argmax(all_q_s_a, axis=1)
# Calculate the corresponding phi_s_a
phi_s_a = self.batch_phi_sa(all_phi_s, best_action, use_sparse)
return best_action, phi_s_a, action_mask
@abstractmethod
def feature_type(self):
"""
Return the data type for the underlying features (eg 'float').
"""
pass
def q_look_ahead(self, s, a, ns_samples, policy=None):
"""
Returns the state action value, Q(s,a), by performing one step
look-ahead on the domain.
.. note::
For an example of how this function works, see
`Line 8 of Figure 4.3 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_
in Sutton and Barto 1998.
If the domain does not define ``expected_step()``, this function uses
``ns_samples`` samples to estimate the one_step look-ahead.
If a policy is passed (used in the policy evaluation), it is used to
generate the action for the next state.
Otherwise the best action is selected.
.. note::
This function should not be called in any RL algorithms unless
the underlying domain is an approximation of the true model.
:param s: The given state
:param a: The given action
:param ns_samples: The number of samples used to estimate the one_step look-ahead.
:param policy: (optional) Used to select the action in the next state
(*after* taking action a) when estimating the one_step look-aghead.
If ``policy == None``, the best action will be selected.
:return: The one-step lookahead state-action value, Q(s,a).
"""
# Hash new state for the incremental tabular case
self.continuous_state_starting_samples = 10
if hasFunction(self, "addState"):
self.addState(s)
if hasFunction(self.domain, "expected_step"):
return self._q_from_expetected_step(s, a, policy)
else:
return self._q_from_sampling(s, a, policy, ns_samples)
def qs_look_ahead(self, s, ns_samples, policy=None):
"""
Returns an array of actions and their associated values Q(s,a),
by performing one step look-ahead on the domain for each of them.
.. note::
For an example of how this function works, see
`Line 8 of Figure 4.3 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_
in Sutton and Barto 1998.
If the domain does not define ``expected_step()``, this function uses
``ns_samples`` samples to estimate the one_step look-ahead.
If a policy is passed (used in the policy evaluation), it is used to
generate the action for the next state.
Otherwise the best action is selected.
.. note::
This function should not be called in any RL algorithms unless
the underlying domain is an approximation of the true model.
:param s: The given state
:param ns_samples: The number of samples used to estimate the one_step look-ahead.
:param policy: (optional) Used to select the action in the next state
(*after* taking action a) when estimating the one_step look-aghead.
If ``policy == None``, the best action will be selected.
:return: an array of length `|A|` containing the *Q(s,a)* for each
possible *a*, where `|A|` is the number of possible actions from state *s*
"""
actions = self.domain.possible_actions(s)
Qs = np.array([self.q_look_ahead(s, a, ns_samples, policy) for a in actions])
return Qs, actions
def _q_from_expetected_step(self, s, a, policy):
p, r, ns, t, p_actions = self.domain.expected_step(s, a)
Q = 0
discount = self.domain.discount_factor
if policy is None:
Q = sum(
[
p[j, 0] * (r[j, 0] + discount * self.V(ns[j], t[j], p_actions[j]))
for j in range(len(p))
]
)
else:
for j in range(len(p)):
# For some domains such as blocks world, you may want to apply
# bellman backup to impossible states which may not have
# any possible actions.
# This if statement makes sure that there exist at least
# one action in the next state so the bellman backup with
# the fixed policy is valid
p_actions = self.domain.possible_actions(ns[j])
if len(p_actions) == 0:
continue
na = policy.pi(ns[j], t[j], p_actions)
Q += p[j, 0] * (r[j, 0] + discount * self.Q(ns[j], t[j], na))
return Q
def _q_from_sampling(self, s, a, policy, ns_samples):
# See if they are in cache:
key = tuple(np.hstack((s, [a])))
cacheHit = self.expected_step_cached.get(key)
if cacheHit is None:
# Not found in cache => Calculate and store in cache
# If continuous domain, sample <continuous_state_starting_samples>
# points within each discritized grid and sample
# <ns_samples>/<continuous_state_starting_samples> for each starting
# state.
# Otherwise take <ns_samples> for the state.
# First put s in the middle of the grid:
# shout(self,s)
s = self.stateInTheMiddleOfGrid(s)
# print "After:", shout(self,s)
if len(self.domain.continuous_dims):
next_states = np.empty((ns_samples, self.domain.state_space_dims))
rewards = np.empty(ns_samples)
# next states per samples initial state
ns_samples_ = ns_samples // self.continuous_state_starting_samples
for i in range(self.continuous_state_starting_samples):
# sample a random state within the grid corresponding
# to input s
new_s = s.copy()
for d in range(self.domain.state_space_dims):
w = self.binwidth_per_dim[d]
# Sample each dimension of the new_s within the
# cell
new_s[d] = (self.random_state.rand() - 0.5) * w + s[d]
# If the dimension is discrete make make the
# sampled value to be int
if d not in self.domain.continuous_dims:
new_s[d] = int(new_s[d])
ns, r = self.domain.sample_step(new_s, a, ns_samples_)
next_states[i * ns_samples_ : (i + 1) * ns_samples_, :] = ns
rewards[i * ns_samples_ : (i + 1) * ns_samples_] = r
else:
next_states, rewards = self.domain.sample_step(s, a, ns_samples)
self.expected_step_cached[key] = [next_states, rewards]
else:
next_states, rewards = cacheHit
discount = self.domain.discount_factor
if policy is None:
Q = np.mean(
[
rewards[i] + discount * self.V(next_states[i, :])
for i in range(ns_samples)
]
)
else:
Q = np.mean(
[
rewards[i]
+ discount * self.Q(next_states[i, :], policy.pi(next_states[i, :]))
for i in range(ns_samples)
]
)
return Q
def stateID2state(self, s_id):
"""
Returns the state vector correponding to a state_id.
If dimensions are continuous it returns the state representing the
middle of the bin (each dimension is discretized according to
``representation.discretization``.
:param s_id: The id of the state, often calculated using the
``state2bin`` function
:return: The state *s* corresponding to the integer *s_id*.
"""
# Find the bin number on each dimension
s = np.array(id2vec(s_id, self.bins_per_dim))
# Find the value corresponding to each bin number
for d in range(self.domain.state_space_dims):
s[d] = bin2state(
s[d], self.bins_per_dim[d], self.domain.statespace_limits[d, :]
)
if len(self.domain.continuous_dims) == 0:
s = s.astype(int)
return s
def stateInTheMiddleOfGrid(self, s):
"""
Accepts a continuous state *s*, bins it into the discretized domain,
and returns the state of the nearest gridpoint.
Essentially, we snap *s* to the nearest gridpoint and return that
gridpoint state.
For continuous MDPs this plays a major rule in improving the speed
through caching of next samples.
:param s: The given state
:return: The nearest state *s* which is captured by the discretization.
"""
s_normalized = s.copy()
for d in range(self.domain.state_space_dims):
s_normalized[d] = closestDiscretization(
s[d], self.bins_per_dim[d], self.domain.statespace_limits[d, :]
)
return s_normalized
def episode_terminated(self):
pass
def feature_learning_rate(self):
"""
:return: An array or scalar used to adapt the learning rate of each
feature individually.
"""
return 1.0
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in list(self.__dict__.items()):
if k == "logger":
continue
setattr(result, k, deepcopy(v, memo))
return result
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2021 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku save commands."""
from functools import reduce
from uuid import uuid4
import git
from renku.core import errors
from renku.core.management import LocalClient
from renku.core.management.command_builder import inject
from renku.core.management.command_builder.command import Command
from renku.core.utils import communication
from renku.core.utils.git import add_to_git
from renku.core.utils.scm import git_unicode_unescape
@inject.autoparams()
def _save_and_push(client: LocalClient, message=None, remote=None, paths=None):
"""Save and push local changes."""
client.setup_credential_helper()
if not paths:
paths = client.dirty_paths
else:
staged = client.repo.index.diff("HEAD")
if staged:
staged = {git_unicode_unescape(p.a_path) for p in staged}
not_passed = staged - set(paths)
if not_passed:
raise errors.RenkuSaveError(
"These files are in the git staging area, but weren't passed to renku save. Unstage them or pass"
+ " them explicitly: \n"
+ "\n".join(not_passed)
)
if paths:
client.track_paths_in_storage(*paths)
return repo_sync(client.repo, message, remote, paths)
def save_and_push_command():
"""Command to save and push."""
return Command().command(_save_and_push)
def repo_sync(repo, message=None, remote=None, paths=None):
"""Commit and push paths."""
origin = None
saved_paths = []
# get branch that's pushed
if repo.active_branch.tracking_branch():
ref = repo.active_branch.tracking_branch().name
pushed_branch = ref.split("/")[-1]
else:
pushed_branch = repo.active_branch.name
if remote:
# get/setup supplied remote for pushing
if repo.remotes:
existing = next((r for r in repo.remotes if r.url == remote), None)
if not existing:
existing = next((r for r in repo.remotes if r.name == remote), None)
origin = next((r for r in repo.remotes if r.name == "origin"), None)
if existing:
origin = existing
elif origin:
pushed_branch = uuid4().hex
origin = repo.create_remote(pushed_branch, remote)
if not origin:
origin = repo.create_remote("origin", remote)
elif not repo.active_branch.tracking_branch():
# No remote set on branch, push to available remote if only a single
# one is available
if len(repo.remotes) == 1:
origin = repo.remotes[0]
else:
raise errors.ConfigurationError("No remote has been set up for the current branch")
else:
# get remote that's set up to track the local branch
origin = repo.remotes[repo.active_branch.tracking_branch().remote_name]
if paths:
# commit uncommitted changes
try:
staged_files = (
{git_unicode_unescape(d.a_path) for d in repo.index.diff("HEAD")} if repo.head.is_valid() else set()
)
path_to_save = set(paths) - staged_files
if path_to_save:
add_to_git(repo.git, *path_to_save)
saved_paths = [d.b_path for d in repo.index.diff("HEAD")]
if not message:
# Show saved files in message
max_len = 100
message = "Saved changes to: "
paths_with_lens = reduce(
lambda c, x: c + [(x, c[-1][1] + len(x))], saved_paths, [(None, len(message))]
)[1:]
# limit first line to max_len characters
message += " ".join(p if l < max_len else "\n\t" + p for p, l in paths_with_lens)
repo.index.commit(message)
except git.exc.GitCommandError as e:
raise errors.GitError("Cannot commit changes") from e
try:
# NOTE: Push local changes to remote branch.
merge_conflict = False
if origin.refs and repo.active_branch.tracking_branch() and repo.active_branch.tracking_branch() in origin.refs:
origin.fetch()
try:
origin.pull(repo.active_branch)
except git.exc.GitCommandError:
# NOTE: Couldn't pull, probably due to conflicts, try a merge.
# NOTE: the error sadly doesn't tell any details.
unmerged_blobs = repo.index.unmerged_blobs().values()
conflicts = (stage != 0 for blobs in unmerged_blobs for stage, _ in blobs)
if any(conflicts):
merge_conflict = True
if communication.confirm(
"There were conflicts when updating the local data with remote changes,"
" do you want to resolve them (if not, a new remote branch will be created)?",
warning=True,
):
repo.git.mergetool("-g")
repo.git.commit("--no-edit")
merge_conflict = False
else:
repo.head.reset(index=True, working_tree=True)
else:
raise
result = None
failed_push = None
if not merge_conflict:
result = origin.push(repo.active_branch)
failed_push = [
push_info
for push_info in result
if push_info.flags & git.PushInfo.ERROR
or push_info.flags & git.PushInfo.REJECTED
or push_info.flags & git.PushInfo.REMOTE_REJECTED
or push_info.flags & git.PushInfo.REMOTE_FAILURE
]
if merge_conflict or (result and "[remote rejected] (pre-receive hook declined)" in result[0].summary):
# NOTE: Push to new remote branch if original one is protected and reset the cache.
old_pushed_branch = pushed_branch
old_active_branch = repo.active_branch
pushed_branch = uuid4().hex
try:
repo.create_head(pushed_branch)
result = repo.remote().push(pushed_branch)
failed_push = [
push_info
for push_info in result
if push_info.flags & git.PushInfo.ERROR
or push_info.flags & git.PushInfo.REJECTED
or push_info.flags & git.PushInfo.REMOTE_REJECTED
or push_info.flags & git.PushInfo.REMOTE_FAILURE
]
finally:
# Reset cache
repo.git.checkout(old_active_branch)
ref = f"{origin}/{old_pushed_branch}"
repo.index.reset(commit=ref, head=True, working_tree=True)
if result and failed_push:
# NOTE: Couldn't push for some reason
msg = "\n".join(info.summary for info in failed_push)
raise errors.GitError(f"Couldn't push changes. Reason:\n{msg}")
except git.exc.GitCommandError as e:
raise errors.GitError("Cannot push changes") from e
return saved_paths, pushed_branch
|
# Output C #defines for errors into wiredtiger.in and the associated error
# message code in strerror.c.
import re, textwrap
import api_data
from dist import compare_srcfile
# Update the #defines in the wiredtiger.in file.
tmp_file = '__tmp'
tfile = open(tmp_file, 'w')
skip = 0
for line in open('../src/include/wiredtiger.in', 'r'):
if not skip:
tfile.write(line)
if line.count('Error return section: END'):
tfile.write(line)
skip = 0
elif line.count('Error return section: BEGIN'):
tfile.write(' */\n')
skip = 1
# We don't want our error returns to conflict with any other
# package, so use an uncommon range, specifically, -31,800 to
# -31,999.
v = -31800
for err in api_data.errors:
if 'undoc' in err.flags:
tfile.write('/*! @cond internal */\n')
tfile.write('/*!%s.%s */\n' %
(('\n * ' if err.long_desc else ' ') +
err.desc[0].upper() + err.desc[1:],
''.join('\n * ' + l for l in textwrap.wrap(
textwrap.dedent(err.long_desc).strip(), 77)) +
'\n' if err.long_desc else ''))
tfile.write('#define\t%s\t%d\n' % (err.name, v))
v -= 1
if 'undoc' in err.flags:
tfile.write('/*! @endcond */\n')
tfile.write('/*\n')
tfile.close()
compare_srcfile(tmp_file, '../src/include/wiredtiger.in')
# Output the wiredtiger_strerror code.
tmp_file = '__tmp'
tfile = open(tmp_file, 'w')
tfile.write('''/* DO NOT EDIT: automatically built by dist/api_err.py. */
#include "wt_internal.h"
/*
* wiredtiger_strerror --
* Return a string for any error value.
*/
const char *
wiredtiger_strerror(int error)
{
static char errbuf[64];
char *p;
if (error == 0)
return ("Successful return: 0");
switch (error) {
''')
for err in api_data.errors:
tfile.write('\tcase ' + err.name + ':\n')
tfile.write('\t\treturn ("' + err.name + ': ' + err.desc + '");\n')
tfile.write('''\
default:
if (error > 0 && (p = strerror(error)) != NULL)
return (p);
break;
}
/*
* !!!
* Not thread-safe, but this is never supposed to happen.
*/
(void)snprintf(errbuf, sizeof(errbuf), "Unknown error: %d", error);
return (errbuf);
}
''')
tfile.close()
compare_srcfile(tmp_file, '../src/conn/api_strerror.c')
# Update the error documentation block.
doc = '../src/docs/error-handling.dox'
tmp_file = '__tmp'
tfile = open(tmp_file, 'w')
skip = 0
for line in open(doc, 'r'):
if not skip:
tfile.write(line)
if line.count('IGNORE_BUILT_BY_API_ERR_END'):
tfile.write(line)
skip = 0
elif line.count('IGNORE_BUILT_BY_API_ERR_BEGIN'):
tfile.write('@endif\n\n')
skip = 1
for err in api_data.errors:
if 'undoc' in err.flags:
continue
tfile.write(
'@par <code>' + err.name.upper() + '</code>\n' +
" ".join(err.long_desc.split()) + '\n\n')
tfile.close()
compare_srcfile(tmp_file, doc)
|
'''
@author: mtmoncur
@Link: https://github.com/mtmoncur/deepracer_env
@License: MIT
'''
def reward_function(params):
"""
Available option:
all_wheels_on_track (bool)
True if car is on track, False otherwise
x (float)
x coordinate in meters
y (float)
y coordinate in meters
distance_from_center (float)
distance from car center to track center in meters
is_left_of_center (bool)
True if car is left of track cener, False otherwise
heading (float)
range of [0,360), this is the angle in degrees between
the car's direction and the x-axis
progress (float)
range of [0,100], this is the percentage of the track completed
steps (int)
number of steps taken in the environment. This resets every time
a new episode begins, and currently the maximum episode length is 200
speed (float)
current speed of car in meters per second
steering_angle (float)
range of about [-30,30], this is the angle at which the wheels are
turning
track_width (float)
the track width in meters
"""
if params['all_wheels_on_track']:
return 1.0
else:
return 0.0
|
from sqlalchemy import Column, Table, Integer, String, create_engine
from sqlalchemy import ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
Base = declarative_base()
Session = sessionmaker()
class Post(Base):
__tablename__ = 'post'
__table_args__ = {'sqlite_autoincrement': True}
post_id = Column(Integer, primary_key=True)
post_value = Column(String)
author_id = Column(Integer, ForeignKey('author.author_id'))
class Author(Base):
__tablename__ = 'author'
__table_args__ = {'sqlite_autoincrement': True}
author_id = Column(Integer, primary_key=True)
author_name = Column(String)
posts = relationship(Post, backref='author')
|
#!/usr/bin/env python3
import sys
print("Hello world!")
sys.exit(0)
|
import matplotlib.pyplot as plt
import numpy as np
import os
plt.style.use(['science','ieee','grid', 'no-latex'])
def savefig(x, y, dirname="./fig", name="test.svg"):
fig, ax = plt.subplots( nrows=1, ncols=1 ) # create figure & 1 axis
ax.plot(x, y)
dirname = os.path.join(dirname, name)
fig.savefig(dirname, facecolor='w', edgecolor='none') # save the figure to file
plt.close(fig)
|
import os
from ctypes import *
import platform
STATUS_CALLBACK = CFUNCTYPE(None, c_int, c_char_p)
STATUS_CALLBACK_COUNT = CFUNCTYPE(None, c_int, c_int, POINTER(c_uint32))
class ASG8005:
_instance = None
m_CountCount = 0
py_callback = {}
def __new__(cls, *args, **kw):
if cls._instance is None:
cls._instance = object.__new__(cls, *args, **kw)
return cls._instance
def __init__(self):
wd = os.path.abspath(os.path.dirname(__file__))
arch = platform.architecture()[0]
dll_path = ""
if arch == '64bit':
dll_path = os.path.join(wd, 'ASGDLL_x64.dll')
print("--- USE ASGDLL_x64.dll ---")
else:
dll_path = os.path.join(wd, 'ASGDLL_x86.dll')
print("--- USE ASGDLL_x86.dll ---")
if os.path.isfile(dll_path):
self.__dll = CDLL(dll_path)
else:
raise Exception("can not found dll")
# dev
self.__dll.open.restype = c_int
self.__dll.close_usb.restype = c_int
self.__dll.monitorDeviceStatus.restype = c_int
# pub
self.__dll.setCallbackFunc.restype = c_int
self.__dll.setCallbackFunc_int.restype = c_int
self.__dll.getDllInfomation.restype = c_char_p
self.__dll.start_download.restype = c_int
self.__dll.stop_download.restype = c_int
# asg
self.__dll.pulse_download.argtypes = [POINTER(POINTER(c_double)), POINTER(c_int)]
self.__dll.pulse_download.restype = c_int
self.__dll.trigger_download.restype = c_int
# count
self.__dll.set_counter_repeat.restype = c_int
self.__dll.set_counter_repeat.argtypes = [c_int]
self.__dll.isCountContinu.restype = c_int
self.__dll.isCountContinu.argtypes = [c_int]
self.__dll.countTimeStep.restype = c_int
self.__dll.countTimeStep.argtypes = [c_int]
self.__dll.countConfig.argtypes = [c_int, c_int]
self.__dll.counter_download.restype = c_int
self.__dll.counter_download.argtypes = [POINTER(c_int), c_int]
# dev
def connect(self):
return self.__dll.open()
def close_device(self):
return self.__dll.close_usb()
def get_monitor_status(self):
return self.__dll.monitorDeviceStatus()
# pub
def set_callback(self, func):
if type(func) == STATUS_CALLBACK:
return self.__dll.setCallbackFunc(func)
else:
return False
def set_callback_count(self, func):
if type(func) == STATUS_CALLBACK_COUNT:
return self.__dll.setCallbackFunc_int(func)
else:
return False
def get_device_info(self):
return str(self.__dll.getDllInfomation())
def start(self, count=1):
return self.__dll.start_download(count)
def stop(self):
return self.__dll.stop_download()
# asg
def checkdata(self, asg_data, length):
channelLen = [0, 0, 0, 0, 0, 0, 0, 0]
for i in length:
if i % 2 != 0 or i < 2:
return bool(False)
for i in range(len(asg_data)):
if len(asg_data[i]) != length[i]:
return bool(False)
if len(asg_data[i]) == 2:
if ((asg_data[i][0] < 7.5) and (asg_data[i][0] != 0)) or (asg_data[i][0] > 26000000000.0) \
or ((asg_data[i][1] < 10) and (asg_data[i][1] != 0)) or (asg_data[i][1] > 26000000000.0):
return bool(False)
continue
for j in range(0, len(asg_data[i]) - 1, 2):
aint = int(asg_data[i][j] * 1000000)
bint = int(asg_data[i][j + 1] * 1000000)
afloat = int(asg_data[i][j] * 100) * 10000
bfloat = int(asg_data[i][j + 1] * 100) * 10000
if (aint != afloat or bint != bfloat) or (aint % 50000 != 0 or bint % 50000 != 0):
return bool(False)
if j == 0:
if ((asg_data[i][0] < 7.5) and (asg_data[i][0] != 0)) or (asg_data[i][0] > 26000000000.0) or (
asg_data[i][1] < 10) or (asg_data[i][1] > 26000000000.0):
return bool(False)
elif j == len(asg_data[i]) - 2:
if (asg_data[i][j] < 7.5) or (asg_data[i][j] > 26000000000.0) or (
(asg_data[i][j + 1] < 10) and (asg_data[i][j + 1] != 0) or (
asg_data[i][j + 1] > 26000000000.0)):
return bool(False)
else:
if (asg_data[i][j] < 7.5) or (asg_data[i][j] > 26000000000.0) or (asg_data[i][j + 1] < 10) or (
asg_data[i][j + 1] > 26000000000.0):
return bool(False)
channelLen[i] += (asg_data[i][j] + asg_data[i][j + 1])
for i in range(8):
if channelLen[i] > 5200000000:
return bool(False)
return bool(True)
def download_ASG_pulse_data(self, asg_data, length):
if True != self.checkdata(asg_data, length):
exit(" ASG Data error !")
c_length = (c_int * 8)(*tuple(length))
max = 0
for i in range(8):
if max < length[i]:
max = length[i]
c_asg_data = (c_double * max * 8)(*(tuple(i) for i in asg_data))
c_asg_data = (POINTER(c_double) * len(c_asg_data))(*c_asg_data)
return self.__dll.pulse_download(c_asg_data, c_length)
def ASG_trigger_download(self):
return self.__dll.trigger_download()
# count
def ASG_set_counter_repeat(self, repeat):
repeat = repeat * 2
return self.__dll.set_counter_repeat(c_int(repeat))
def ASG_isCountContinu(self, isContinu):
return self.__dll.isCountContinu(c_int(isContinu))
def ASG_countTimeStep(self, timeStep):
return self.__dll.countTimeStep(c_int(timeStep))
def ASG_countConfig(self, isCountEnable, asgConfig=0xff):
return self.__dll.countConfig(c_int(asgConfig), c_int(isCountEnable))
def checkCountData(self, countData, length):
countLength = 0
for i in range(0, length, 2):
if (countData[i] < 20) or (countData[i] % 5 != 0) or (countData[i] != int(countData[i])):
return bool(False)
if (countData[i + 1] < 5) or (countData[i + 1] % 5 != 0) or (countData[i + 1] != int(countData[i + 1])):
return bool(False)
countLength += (countData[i] + countData[i + 1])
if countLength < 1500:
return bool(False)
return bool(True)
def ASG_counter_download(self, count_data, length):
if True != self.checkCountData(count_data, length):
exit(" Count Data error !")
m_CountCount = 1
count_data = (c_int * len(count_data))(*tuple(count_data))
return self.__dll.counter_download(count_data, length)
|
"""
顺序模型
@author Aaric
@version 0.5.0-SNAPSHOT
"""
import matplotlib.pyplot as plt
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
# 训练数据
x_data = np.random.random(100)
y_data_noise = np.random.normal(0, 0.01, x_data.shape)
y_data = x_data * 0.1 + 0.2 + y_data_noise
# 顺序模型
model = Sequential()
# 增加一个全连接层
model.add(Dense(units=1, input_dim=1))
# 均方误差
model.compile(optimizer="sgd", loss="mse")
# 训练3001个批次
for count in range(3001):
loss = model.train_on_batch(x_data, y_data)
if count % 500 == 0:
print("loss: {0}".format(loss))
# 打印权值和偏置值
w, b = model.layers[0].get_weights()
print("w: {0}, b: {1}".format(w, b))
# 预测数据
y_rst = model.predict(x_data)
# print("y_rst: {0}".format(y_rst))
# 可视化
plt.scatter(x_data, y_data)
plt.plot(x_data, y_rst, "red", lw=3)
plt.show()
|
# -*- coding: utf-8 -*-
"""
@date: 2020/10/19 下午7:39
@file: __init__.py.py
@author: zj
@description:
"""
"""
参考[Why torchvision doesn’t use opencv?](https://discuss.pytorch.org/t/why-torchvision-doesnt-use-opencv/24311)
使用[jbohnslav/opencv_transforms](https://github.com/jbohnslav/opencv_transforms)替代torchvision transforms实现
"""
from .compose import Compose
from .to_pil_image import ToPILImage
from .to_tensor import ToTensor
from .normalize import Normalize
from .resize import Resize
from .random_rotation import RandomRotation
from .random_erasing import RandomErasing
from .random_horizontal_flip import RandomHorizontalFlip
from .color_jitter import ColorJitter
from .scale_jitter import ScaleJitter
from .center_crop import CenterCrop
from .random_crop import RandomCrop
from .three_crop import ThreeCrop
|
import kopf
import riasc_operator.project # noqa: F401
import riasc_operator.time_sync # noqa: F401
def main():
kopf.configure(
verbose=True
)
kopf.run(
clusterwide=True,
liveness_endpoint='http://0.0.0.0:8080'
)
|
import cv2
import numpy as np
with open("yolov3.txt", 'r') as f:
classes = [line.strip() for line in f.readlines()]
colors = np.random.uniform(0, 300, size=(len(classes), 3))
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
cap = cv2.VideoCapture(0)
scale = 0.00392
conf_threshold = 0.5
nms_threshold = 0.4
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def create_bounding_boxes(outs,Width, Height):
boxes = []
class_ids = []
confidences = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > conf_threshold:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
return boxes, class_ids, confidences
def draw_bounding_boxes(img, class_id, confidence, box):
x = round(box[0])
y = round(box[1])
w = round(box[2])
h =round(box[3])
x_plus_w = x+w
y_plus_h = y+h
label = str(classes[class_id])
color = colors[class_id]
cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)
cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
def Yolo(image):
try:
Width = image.shape[1]
Height = image.shape[0]
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
net.setInput(blob)
outs = net.forward(get_output_layers(net))
boxes, class_ids, confidences = create_bounding_boxes(outs, Width, Height)
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
for i in indices:
i = i[0]
box = boxes[i]
draw_bounding_boxes(image, class_ids[i], confidences[i], box)
except Exception as e:
print('Failed dnn: '+ str(e))
return image
while True:
ret, frame = cap.read()
image = Yolo(frame)
cv2.imshow('frame',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
from django.db import models
import datetime
import django
def accept():
return(
(True, 'Sim'),
(False, 'Nao'),
)
# Create your models here.
class Linguagem(models.Model):
lingua = models.CharField('Lingua',max_length=30, default="Português",unique=True)
def __str__(self):
return self.lingua
class Jornal(models.Model):
nome = models.CharField('Nome do Jornal', max_length=50,unique=True)
email= models.CharField('e-Mail do Jornal', max_length=100)
site=models.CharField('site do Jornal', max_length=100)
def __str__(self):
return self.jornal
class Categoria(models.Model):
categoria= models.CharField("Categoria",max_length=50,unique=True)
def __str__(self):
return self.categoria
class Tema(models.Model):
tema=models.CharField("Tema",max_length=60,unique=True)
def __str__(self):
return self.tema
class Envolvidos(models.Model):
nome=models.CharField("Nome",max_length=200,unique=True)
def __str__(self):
return self.nome
class Tipo_midia(models.Model):
tipo = models.CharField('Tipo de Mídia',max_length=254,help_text="Nome da produção", unique=True)
class Formato(models.Model):
formato=models.CharField("Formato",max_length=6,unique=True)
midia = models.ForeignKey(Tipo_midia, on_delete=models.PROTECT)
class Producao(models.Model):
titulo=models.CharField('Título',max_length=254,help_text="Nome da produção",unique=True)
descricao=models.TextField('Descrição', blank=True)
data = models.DateField('Data do Documento', default=django.utils.timezone.now, help_text="Use formato dd/mm/AAAA")
accepted = models.BooleanField('Accept file', choices=accept(), default=False)
file = models.FileField(upload_to='producoes/', blank=True) #, validators=[validate_producao]
url = models.URLField('URL para Documento', blank=True)
formato = models.ForeignKey(Formato, on_delete=models.PROTECT)
jornal = models.ForeignKey(Jornal, on_delete=models.PROTECT,blank=True)
linguagem = models.ForeignKey(Linguagem,on_delete= models.PROTECT)
categoria = models.ForeignKey(Categoria,on_delete= models.SET('Não categorizado'))
def aprove(self):
self.accepted=True
def __str__(self):
return self.titulo
class Tema_producao(models.Model):
tema = models.ForeignKey(Tema, on_delete=models.PROTECT)
producao = models.ForeignKey(Producao, on_delete=models.PROTECT)
class Envolvidos_producao(models.Model):
role = models.CharField('Roles',max_length=300,help_text="Papeis no desenvolvimento da produção")
producao = models.ForeignKey(Producao, on_delete=models.PROTECT)
envolvido = models.ForeignKey(Envolvidos, on_delete=models.PROTECT)
|
import h5py
import pickle
import numpy as np
from numba import jit
from torch.utils.data import Dataset
class BindspaceSingleProbeDataset(Dataset):
""" Given an probe index, return the embedding of all / some kmers in that probe
(this step is transformer dependent)
h5_filepath (string): path to the h5 file storing the encoded probes as ints
index_to_code_filepath (string): path to the pkl file that stores the encoder / decoder
dicts for the probes.
"""
def __init__(self, h5_filepath, dataset='training', transform=None):
path_dict = {'training': ('/train/data/train_data','/train/labels/train_labels'),
'validation': ('/valid/data/valid_data','/valid/labels/valid_labels')}
self.data_path, self.label_path = path_dict[dataset]
self.embedding_dims = 300
self.h5f = h5py.File(h5_filepath, 'r', libver='latest', swmr=True)
self.num_peaks, self.probes_per_peak, self.kmers_per_probe = self.h5f[self.data_path].shape
# pass through index_to_code filepath here so transformer can get what we need
self.transform = transform
TF_overlaps = [s.encode('utf-8') for s in ["CEBPB","CEBPG", "CREB3L1", "CTCF",
"CUX1","ELK1","ETV1","FOXJ2","KLF13",
"KLF16","MAFK","MAX","MGA","NR2C2",
"NR2F1","NR2F6","NRF1","PKNOX1","ZNF143"]]
TF_colnames = self.h5f[self.label_path].attrs['column_names']
self.TF_mask_array = np.array([n in TF_overlaps for n in TF_colnames])
def __getitem__(self, index):
peak = index // self.probes_per_peak
probe = index // self.num_peaks
features = self.h5f[self.data_path][peak,probe]
labels = self.h5f[self.label_path][peak]
if self.transform is not None:
features = self.transform(features)
labels = labels[self.TF_mask_array]
return features, labels
def __len__(self):
return self.num_peaks * self.probes_per_peak
def close(self):
self.h5f.close()
class ProbeDecodingTransformer(object):
""" Decode the probes into the not-quite average of the embeddings of the probe """
def __init__(self, *args, **kwargs):
self.decode_to_vec = kwargs['decoder']
self.encode_to_kmer = kwargs['encoder']
self.embedding_dim = kwargs['dim']
def __call__(self, probe):
return self.decode_probe(probe)
@jit
def decode_probe(probe):
''' Each element in the probe 1-D array of ints encodes an 8-mer that
matches 16 different WC kmers, and each of those has a code in BindSpace
So to calculate the placement of one probe (row). '''
probe_ints = probe.astype(int)
p = max(probe_ints.shape)
point = np.zeros(p,self.embedding_dim)
for i,elem in enumerate(probe_ints):
wc_kmers = self.encode_to_kmer[elem]
for kmer in wc_kmers:
point[i,:] += self.decode_to_vec[kmer]
point[i,:] = point[i,:] * (1 / np.sqrt(len(wc_kmers)))
return np.sum(point, axis=1) * (1 / np.sqrt(p))
class BindspaceProbeDataset(Dataset):
""" Operate on the pseudo-probes from the K562 dataset. """
def __init__(self, h5_filepath, dataset='training', transform=None):
path_dict = {'training': ('/data/training/train_data','/labels/training/train_labels'),
'validation': ('/data/validation/valid_data','/labels/validation/valid_labels')}
self.data_path, self.label_path = path_dict[dataset]
self.embedding_dims = 300
self.h5f = h5py.File(h5_filepath, 'r', libver='latest', swmr=True)
self.num_peaks, self.rasterized_length = self.h5f[self.data_path].shape
self.probes_per_peak = self.rasterized_length // self.embedding_dims
self.num_entries = self.num_peaks * self.probes_per_peak
self.transform = transform
TF_overlaps = [s.encode('utf-8') for s in ["CEBPB","CEBPG", "CREB3L1", "CTCF",
"CUX1","ELK1","ETV1","FOXJ2","KLF13",
"KLF16","MAFK","MAX","MGA","NR2C2",
"NR2F1","NR2F6","NRF1","PKNOX1","ZNF143"]]
TF_colnames = self.h5f[self.label_path].attrs['column_names']
self.TF_mask_array = np.array([n in TF_overlaps for n in TF_colnames])
def __getitem__(self, index):
peak = index // self.probes_per_peak
probe = index // self.num_peaks
start = probe * self.embedding_dims
stop = (probe + 1) * self.embedding_dims
features = self.h5f[self.data_path][peak][start:stop]
labels = self.h5f[self.label_path][peak]
if self.transform is not None:
features = self.transform(features)
labels = labels[self.TF_mask_array]
return features, labels
def __len__(self):
return self.num_entries
def close(self):
self.h5f.close()
class Embedded_k562_ATAC_train_dataset(Dataset):
""" Load up Han's embedded k562 ATAC data for training """
def __init__(self, h5_filepath, transform=None):
self.embedding_dims = 300
self.h5f = h5py.File(h5_filepath, 'r', libver='latest', swmr=True)
self.num_entries, self.rasterized_length = self.h5f['/data/training/train_data'].shape
self.transform = transform
TF_overlaps = [s.encode('utf-8') for s in ["CEBPB","CEBPG", "CREB3L1", "CTCF",
"CUX1","ELK1","ETV1","FOXJ2","KLF13",
"KLF16","MAFK","MAX","MGA","NR2C2",
"NR2F1","NR2F6","NRF1","PKNOX1","ZNF143"]]
TF_colnames = self.h5f['/labels/training/train_labels'].attrs['column_names']
self.TF_mask_array = np.array([n in TF_overlaps for n in TF_colnames])
def __getitem__(self, index):
features = self.h5f['/data/training/train_data'][index]
labels = self.h5f['/labels/training/train_labels'][index]
if self.transform is not None:
features = self.transform(features)
labels = labels[self.TF_mask_array]
return features, labels
def __len__(self):
return self.num_entries
def close(self):
self.h5f.close()
class Embedded_k562_ATAC_validation_dataset(Dataset):
""" Load up Han's embedded k562 ATAC data for validation """
def __init__(self, h5_filepath, transform=None, TF_overlaps=None):
self.embedding_dims = 300
self.h5f = h5py.File(h5_filepath, 'r', libver='latest', swmr=True)
self.num_entries, self.rasterized_length = self.h5f['/data/validation/valid_data'].shape
self.transform = transform
if not TF_overlaps:
TF_overlaps = [s.encode('utf-8') for s in ["CEBPB","CEBPG", "CREB3L1", "CTCF",
"CUX1","ELK1","ETV1","FOXJ2","KLF13",
"KLF16","MAFK","MAX","MGA","NR2C2",
"NR2F1","NR2F6","NRF1","PKNOX1","ZNF143"]]
else:
TF_overlaps = [s.encode('utf-8') for s in TF_overlaps]
TF_colnames = self.h5f['/labels/training/train_labels'].attrs['column_names']
self.TF_mask_array = np.array([n in TF_overlaps for n in TF_colnames])
def __getitem__(self, index):
features = self.h5f['/data/validation/valid_data'][index]
labels = self.h5f['/labels/validation/valid_labels'][index]
if self.transform is not None:
features = self.transform(features)
labels = labels[self.TF_mask_array]
return features, labels
def __len__(self):
return self.num_entries
def close(self):
self.h5f.close()
class ATAC_Train_Dataset(Dataset):
""" Load the training data set. Multiple workers need to use
forked processes, so stateful stuff from the input is not passed on (maybe??)"""
def __init__(self, h5_filepath, transform=None):
self.h5f = h5py.File(h5_filepath, 'r', libver='latest', swmr=True)
self.num_entries = self.h5f['/data/train_in'].shape[0]
self.transform = transform
def __getitem__(self, index):
features = self.h5f['/data/train_in'][index]
label = self.h5f['/labels/train_out'][index]
if self.transform is not None:
features = self.transform(features)
return features, label
def __len__(self):
return self.num_entries
def close(self):
self.h5f.close()
class ATAC_Test_Dataset(Dataset):
""" Load the test data set. """
def __init__(self, h5_filepath, transform=None):
self.h5f = h5py.File(h5_filepath, 'r', libver='latest', swmr=True)
self.num_entries = self.h5f["/data/test_in"].shape[0]
self.transform = transform
def __getitem__(self, index):
features = self.h5f["/data/test_in"][index]
label = self.h5f["/labels/test_out"][index]
if self.transform is not None:
features = self.transform(features)
return features, label
def __len__(self):
return self.num_entries
def close(self):
self.h5f.close()
class ATAC_Valid_Dataset(Dataset):
""" Load the test data set. """
def __init__(self, h5_filepath, transform=None):
self.h5f = h5py.File(h5_filepath, 'r', libver='latest', swmr=True)
self.num_entries = self.h5f["/data/valid_in"].shape[0]
self.transform = transform
def __getitem__(self, index):
features = self.h5f["/data/valid_in"][index]
label = self.h5f["/labels/valid_out"][index]
if self.transform is not None:
features = self.transform(features)
return features, label
def __len__(self):
return self.num_entries
def close(self):
self.h5f.close()
class DNase_Train_Dataset(Dataset):
def __init__(self, h5_filepath, transform=None):
self.h5f = h5py.File(h5_filepath, 'r')
self.num_entries = self.h5f['/train_in'].shape[0]
self.transform = transform
def __getitem__(self, index):
features = self.h5f['/train_in'][index]
label = self.h5f['/train_out'][index]
if self.transform is not None:
features = self.transform(features)
return features, label
def __len__(self):
return self.num_entries
def close(self):
self.h5f.close()
class DNase_Valid_Dataset(Dataset):
def __init__(self, h5_filepath, transform=None):
self.h5f = h5py.File(h5_filepath, 'r')
self.num_entries = self.h5f['/valid_in'].shape[0]
self.transform = transform
def __getitem__(self, index):
features = self.h5f['/valid_in'][index]
label = self.h5f['/valid_out'][index]
if self.transform is not None:
features = self.transform(features)
return features, label
def __len__(self):
return self.num_entries
def close(self):
self.h5f.close()
class ProbeReshapeTransformer(object):
""" Reshape the 300 dimensional probe embedding in a PyTorch friendly
Tensor shape """
def __init__(self, *args, **kwargs):
self.probe_dim = 300
def __call__(self, probe):
return probe.reshape((1,self.probe_dim))
class EmbeddingReshapeTransformer(object):
""" Reshapes the rasterized embedded ATAC-seq windows using the sequence length
and dimensional embedding. """
def __init__(self, embedding_dim, sequence_length):
assert isinstance(embedding_dim, int)
assert isinstance(sequence_length, int)
self.embedding_dim = embedding_dim
self.sequence_length = sequence_length
def __call__(self, embedded_rasterized):
rasterized_length = max(embedded_rasterized.shape)
return embedded_rasterized.reshape((1,rasterized_length // self.embedding_dim, self.embedding_dim))
class SubsequenceTransformer(object):
""" extract and sub-sample a sequence of given length after
accounting for padding. """
def __init__(self, output_size):
assert isinstance(output_size, int)
self.output_size = output_size
def get_subsequence(self, sequence):
''' Helper function for subsampling '''
_, _, cols = sequence.shape
start = np.random.randint(0, cols - self.output_size) if cols - self.output_size > 0 else 0
end = start + self.output_size
subseq = sequence[:,:,start:end]
return subseq
def __call__(self, sequence):
''' Get the subsequences '''
assert(sequence.shape[-1] >= self.output_size)
# trim the padding from peaks if there is any
if -1.0 in sequence:
pad_start = np.argmin(sequence)
return self.get_subsequence(sequence[:,:,0:pad_start])
else:
return self.get_subsequence(sequence)
|
from src.GraphInterface import GraphInterface
from src.Node import Node
class DiGraph(GraphInterface):
def __init__(self):
self.Nodes= {}
self.Edges= {}
self.mc = 0
def v_size(self) -> int:
return self.Nodes.__len__()
def e_size(self) -> int:
sum=0
for key, v in self.Edges.items():
sum = sum + len(v)
return sum
def get_mc(self) -> int:
return self.mc
def add_edge(self, id1: int, id2: int, weight: float) -> bool:
if id1 in self.Nodes.keys() and id2 in self.Nodes.keys():
self.Edges[id1][id2]=weight
self.mc = self.mc + 1
return True
else: return False
def add_node(self, node_id: int, pos: tuple = None) -> bool:
self.Nodes[node_id]=Node(node_id,pos)
self.Edges[node_id]={}
self.mc=self.mc+1
return True
def remove_node(self, node_id: int) -> bool:
if self.Nodes[node_id]!=None:
del self.Nodes[node_id]
del self.Edges[node_id]
listToRemove=[]
for src, dest in self.Edges.items():
for key in dest.keys():
if key==node_id:
listToRemove.append((src,key))
for t in listToRemove:
self.remove_edge(t[0],t[1])
self.mc = self.mc + 1
return True
else: return False
def remove_edge(self, node_id1: int, node_id2: int) -> bool:
if self.Edges[node_id1][node_id2]!=None:
del self.Edges[node_id1][node_id2]
self.mc = self.mc + 1
return True
else: return False
def __str__(self):
return f"Nodes:{self.Nodes}\nEdges:{self.Edges}"
|
from __future__ import division
import sys
import csv
import numpy as np
import pandas as pd
from pandas import DataFrame
from classifier import *
from sklearn.metrics import confusion_matrix, roc_curve, auc
import matplotlib.pyplot as plt
from sklearn import tree
def run_analysis(data_sets, labels):
print "ROC::run_analysis()"
#print_data(data_sets, labels)
pre_process = False
if(pre_process):
#pre-process data, incl. feature selection
feature_names = ['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE', 'PAY_0', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1', 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6', 'RATIO_1', 'RATIO_2']
data_sets = feature_selection(data_sets)
else:
feature_names = ['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE', 'PAY_0', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1', 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6']
#finish preprocessing
Train_data_sets = data_sets.head(15000)
Train_data_labels = labels.head(15000)
Test_data_sets = data_sets.tail(15000)
Test_data_labels = labels.tail(15000)
print_count = False
if (print_count):
s = Test_data_labels["Y"]
count_default = s.value_counts()
print count_default
Train_data_labels = np.ravel(Train_data_labels)
Test_data_labels = np.ravel(Test_data_labels)
#DT
DT_classifier = build_DT_classifier(Train_data_sets, Train_data_labels)
#print Train_data_sets.head(10)
DT_predicted = predict_test_data(Test_data_sets, DT_classifier)
DT_probas = DT_classifier.predict_proba(Test_data_sets)
print_tree = False
if(print_tree):
#feature_names = list(data_sets.columns.values)
tree.export_graphviz(DT_classifier, class_names = ["No Default", "Yes Default"], feature_names = feature_names, max_depth = 2, out_file='tree.dot')
#KNN
KNN_classifier = build_KNN_classifier(Train_data_sets, Train_data_labels)
KNN_predicted = predict_test_data(Test_data_sets, KNN_classifier)
knn_probas = KNN_classifier.predict_proba(Test_data_sets)
#LR
LR_classifier = build_LR_classifier(Train_data_sets, Train_data_labels)
LR_predicted = predict_test_data(Test_data_sets, LR_classifier)
LR_probas = LR_classifier.predict_proba(Test_data_sets)
#DA
DA_classifier = build_DA_classifier(Train_data_sets, Train_data_labels)
DA_predicted = predict_test_data(Test_data_sets, DA_classifier)
DA_probas = DA_classifier.predict_proba(Test_data_sets)
#NB
NB_classifier = build_NB_classifier(Train_data_sets, Train_data_labels)
NB_predicted = predict_test_data(Test_data_sets, NB_classifier)
NB_probas = NB_classifier.predict_proba(Test_data_sets)
print_error_rates = False
if(print_error_rates):
print_error_rate("KNN", KNN_predicted, Test_data_labels)
print_error_rate("LR", LR_predicted, Test_data_labels)
print_error_rate("DA", DA_predicted, Test_data_labels)
print_error_rate("DT", DT_predicted, Test_data_labels)
print_error_rate("NB", NB_predicted, Test_data_labels)
#ROC analysis
run_ROC_analysis = False
if(run_ROC_analysis):
build_roc_curve(Test_data_labels, knn_probas, LR_probas, DA_probas, DT_probas, NB_probas)
def feature_selection(data_sets):
print "ROC::feature_selection()"
data_sets["percent_max_Sept"] = (data_sets["X12"] / data_sets["X1"]) * 100
data_sets["percent_max_Aug"] = (data_sets["X13"] / data_sets["X1"]) * 100
pd.set_option('display.max_columns', None)
#print data_sets.head(5)
return data_sets
#print data_sets.head(10)
def cardinality(labels):
yes = 0
no = 0
for label in labels:
if (label == 0):
no += 1
else:
yes += 1
print "total yes: " + str(yes)
print "total no: " + str(no)
print "n: " + str(yes + no)
percent_default = yes / (yes+no)
print "percentage defaults: " + str(percent_default)
def build_roc_curve(labels, knn_probas, LR_probas, DA_probas, DT_probas, NB_probas):
knn_fpr, knn_tpr, knn_thresholds = roc_curve(labels, knn_probas[:, 1])
knn_roc_auc = auc(knn_fpr, knn_tpr)
knn_output=('KNN AUC = %0.4f'% knn_roc_auc)
print knn_output
LR_fpr, LR_tpr, LR_thresholds = roc_curve(labels, LR_probas[:, 1])
LR_roc_auc = auc(LR_fpr, LR_tpr)
LR_output=('LR AUC = %0.4f'% LR_roc_auc)
print LR_output
DA_fpr, DA_tpr, DA_thresholds = roc_curve(labels, DA_probas[:, 1])
DA_roc_auc = auc(DA_fpr, DA_tpr)
DA_output=('DA AUC = %0.4f'% DA_roc_auc)
print DA_output
DT_fpr, DT_tpr, DT_thresholds = roc_curve(labels, DT_probas[:, 1])
DT_roc_auc = auc(DT_fpr, DT_tpr)
DT_output=('DT AUC = %0.4f'% DT_roc_auc)
print DT_output
NB_fpr, NB_tpr, NB_thresholds = roc_curve(labels, NB_probas[:, 1])
NB_roc_auc = auc(NB_fpr, NB_tpr)
NB_output=('NB AUC = %0.4f'% NB_roc_auc)
print NB_output
plot_on = True
if(plot_on):
#setup plot
plt.plot(NB_fpr, NB_tpr, label='Naive Bayesian')
plt.plot(DA_fpr, DA_tpr, label='Discriminant Analysis')
plt.plot(LR_fpr, LR_tpr, label='LogRegression')
plt.plot(DT_fpr, DT_tpr, label='Classification tree')
plt.plot(knn_fpr, knn_tpr, label='KNN')
plt.axis([-.1, 1, 0, 1.1])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()
def calc_confusion_matrix(model, predicted, labels):
cm = confusion_matrix(labels, predicted, labels = [0,1])
print model + " confusion_matrix: "
print cm
print "---"
def print_data(data_sets, labels):
pd.set_option('display.max_columns', None)
data_sets["Y"] = labels
print data_sets.tail(5)
def print_error_rate(model, predicted, labels):
error_rate = error_measure(predicted, labels)
print model + " error rate: ", error_rate
|
'''
run multiple tasks with a command
usage:
1. modify the function define_task()
2. call run_tasks()
'''
import os
import json
import subprocess
from glob import glob
from os.path import dirname
from time import sleep
used_devices = []
def exec_cmd(cmd):
r = os.popen(cmd)
msg = r.read()
r.close()
if 'json' in cmd:
return json.loads(msg)
else:
return msg
def get_device(msg):
for gpu in msg['gpus']:
index = gpu['index']
state = len(gpu['processes'])
if state == 0 and index not in used_devices:
return index
return -1
def run_one_task(cmd, device, log_file):
log_file_path = 'logs/{}.log'.format(log_file)
log_path = dirname(log_file_path)
if not os.path.exists(log_path):
os.mkdir(log_path)
complete_cmd = 'CUDA_VISIBLE_DEVICES={} nohup {} > logs/{}.log 2>&1 &'.format(device, cmd, log_file)
print(complete_cmd)
used_devices.append(device)
subprocess.Popen(complete_cmd, shell=True)
def run_tasks(task_pool, prefix='', sleep_time=1):
number = 0
while len(task_pool) > 0:
device = get_device(
exec_cmd('/home/huchi/anaconda3/envs/openprompt/bin/gpustat --json'))
current_task_name = task_pool[0].split()[-1].split('/')[-1]
if len(task_pool) == 0:
exit(0)
if device == -1:
print('GPUs are busy...')
sleep(sleep_time)
continue
elif not os.path.exists('logs/{}.log'.format(current_task_name)):
if len(task_pool) > 0:
run_one_task(task_pool[0], device, current_task_name)
number += 1
if 'search' not in task_pool[0]:
sleep(sleep_time)
task_pool.pop(0)
continue
else:
exit(0)
else:
task_pool.pop(0)
print('This task is done...')
def define_task():
tasks = []
base_cmd = '/home/huchi/anaconda3/envs/openprompt/bin/python -u experiments/cli.py '
for f in glob('experiments/*.yaml'):
cmd = base_cmd
cmd += '--config_yaml {}'.format(f)
tasks.append(cmd)
return tasks
tasks = define_task()
run_tasks(tasks, '', 1)
|
from django.urls import include, re_path
from softdelete.views import *
urlpatterns = [
re_path(r'^changeset/(?P<changeset_pk>\d+?)/undelete/$',
ChangeSetUpdate.as_view(),
name="softdelete.changeset.undelete"),
re_path(r'^changeset/(?P<changeset_pk>\d+?)/$',
ChangeSetDetail.as_view(),
name="softdelete.changeset.view"),
re_path(r'^changeset/$',
ChangeSetList.as_view(),
name="softdelete.changeset.list"),
]
import sys
if 'test' in sys.argv:
import django
from django.contrib import admin
admin.autodiscover()
if django.VERSION[0] >= 2:
from django.urls import path
urlpatterns.append(path('admin/', admin.site.urls))
urlpatterns.append(path('accounts/', include('django.contrib.auth.urls')))
else:
urlpatterns.append(re_path(r'^admin/', include(admin.site.urls)))
urlpatterns.append(re_path(r'^accounts/', include('django.contrib.auth.urls')))
|
####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2017 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.core.exceptions import ValidationError as DjangoValidationError
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db import models
from django.db.models.signals import post_delete
from django.forms import CharField
from jsonschema.exceptions import ValidationError as JSONValidationError
from jsonschema.validators import validate as json_validate
import contextlib
import json
import os
import re
import types
from Q.questionnaire.q_utils import Version, EnumeratedType, EnumeratedTypeList, sort_sequence_by_key
from Q.questionnaire.q_constants import *
##############################################################
# some clever ways of dealing w/ unsaved relationship fields #
##############################################################
@contextlib.contextmanager
def allow_unsaved_fk(model_class, field_names):
""""
temporarily allows the fk "model_class.field_name" to point to a model not yet saved in the db
that used to be the default behavior in Django <= 1.6
(see https://www.caktusgroup.com/blog/2015/07/28/using-unsaved-related-models-sample-data-django-18/)
"""
assert isinstance(field_names, list), "allow_unsaved_fk takes a list of fields, not a single field"
field_saved_values = {}
for field_name in field_names:
model_field = model_class._meta.get_field(field_name)
field_saved_values[model_field] = model_field.allow_unsaved_instance_assignment
model_field.allow_unsaved_instance_assignment = True
yield
for field, saved_value in field_saved_values.iteritems():
field.allow_unsaved_instance_assignment = saved_value
class QUnsavedManager(models.Manager):
"""
a manager to cope w/ UNSAVED models being used in m2m fields (actually, it is usually used w/ the reverse of fk fields)
(this is not meant to be possible in Django)
The manager accomplishes this by storing the would-be field content in an instance variable;
in the case of unsaved models, this is purely done to get around Django ickiness
in the case of saved models, this is done so that QuerySets are never cloned (which would overwrite in-progress data)
a side-effect of this technique is that the output of this manager is not chainable;
but the Q doesn't use standard Django methods for saving models (instead serializing from JSON), so I don't really care
"""
def get_cached_qs_name(self):
"""
overwrite this as needed for different types of managers
:return: a unique name to represent the cached queryset of saved & unsaved instances
"""
return "_cached_{0}".format(
self.field_name
)
def get_real_field_manager(self):
"""
overwrite this as needed for different types of managers
:return: the _real_ model manager used by this field
"""
field_name = self.field_name
return getattr(self.instance, field_name)
def count(self):
return len(self.get_query_set())
def all(self):
return self.get_query_set()
def get(self, *args, **kwargs):
filtered_qs = self.filter_potentially_unsaved(*args, **kwargs)
n_filtered_qs = len(filtered_qs)
if n_filtered_qs == 0:
msg = "{0} matching query does not exist".format(self.model)
raise ObjectDoesNotExist(msg)
elif n_filtered_qs > 1:
msg = "get() returned more than 1 {0} -- it returned {1}!".format(self.model, n_filtered_qs)
raise MultipleObjectsReturned(msg)
else:
return filtered_qs[0]
def order_by(self, key, **kwargs):
cached_qs = self.get_query_set()
sorted_qs = sorted(
cached_qs,
key=lambda o: getattr(o, key),
reverse=kwargs.pop("reverse", False),
)
return sorted_qs
def get_query_set(self):
instance = self.instance
cached_qs_name = self.get_cached_qs_name()
if not hasattr(instance, cached_qs_name):
field_manager = self.get_real_field_manager()
saved_qs = field_manager.all()
unsaved_qs = []
cached_qs = list(saved_qs) + unsaved_qs
setattr(instance, cached_qs_name, cached_qs)
return getattr(instance, cached_qs_name)
# unlike the above fns, I cannot simply overload the 'add' or 'remove' fns
# b/c managers are created dynamically in "django.db.models.fields.related.py#create_foreign_related_manager"
# Django is annoying
def add_potentially_unsaved(self, *objs):
instance = self.instance
cached_qs = self.get_query_set()
objs = list(objs)
unsaved_objs = [o for o in objs if
o.pk is None or instance.pk is None] # (unsaved can refer to either the models to add or the model to add to)
saved_objs = [o for o in objs if o not in unsaved_objs]
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj))
if obj not in cached_qs:
cached_qs.append(obj)
setattr(
instance,
self.get_cached_qs_name(),
cached_qs,
)
# even though I am not saving models w/ these custom managers in the normal Django way,
# I go ahead and add what I can using normal Django methods (just to avoid any confusion later)...
if saved_objs:
self.add(*saved_objs)
def remove_potentially_unsaved(self, *objs):
instance = self.instance
cached_qs = self.get_query_set()
objs = list(objs)
unsaved_objs = [o for o in objs if
o.pk is None or instance.pk is None] # (unsaved can refer to either the models to add or the model to add to)
saved_objs = [o for o in objs if o not in unsaved_objs]
for obj in objs:
cached_qs.remove(obj)
setattr(
instance,
self.get_cached_qs_name(),
cached_qs,
)
# even though I am not saving models w/ these custom managers in the normal Django way,
# I go ahead and remove what I can using normal Django methods (just to avoid any confusion later)...
if saved_objs:
self.remove(*saved_objs)
# and I have to define filter separately, b/c the parent 'filter' fn is used internally by other code
def filter_potentially_unsaved(self, *args, **kwargs):
cached_qs = self.get_query_set()
filtered_qs = filter(
lambda o: all([getattr(o, key) == value for key, value in kwargs.items()]),
cached_qs
)
return filtered_qs
class QUnsavedRelatedManager(QUnsavedManager):
use_for_related_fields = True
def get_cached_qs_name(self):
"""
overwritten from parent manager class
:return
"""
related_field = self.model.get_field(self.field_name).related
return "_cached_{0}".format(
related_field.name
)
def get_real_field_manager(self):
"""
overwritten from parent manager class
:return:
"""
related_field = self.model.get_field(self.field_name).related
return getattr(self.instance, related_field.name)
#######################################
# the types of fields used by the CIM #
#######################################
class QPropertyType(EnumeratedType):
def __str__(self):
return "{0}".format(self.get_type())
QPropertyTypes = EnumeratedTypeList([
QPropertyType("ATOMIC", "Atomic"),
QPropertyType("RELATIONSHIP", "Relationship"),
QPropertyType("ENUMERATION", "Enumeration"),
])
class QAtomicType(EnumeratedType):
def __str__(self):
return "{0}".format(self.get_type())
# TODO: GET DATE/DATETIME/TIME TO RENDER NICELY IN BOOTSTRAP...
QAtomicTypes = EnumeratedTypeList([
QAtomicType("DEFAULT", "Character Field (default)"),
# QAtomicType("STRING", "Character Field (default)"),
QAtomicType("TEXT", "Text Field (large block of text as opposed to a small string)"),
QAtomicType("BOOLEAN", "Boolean Field"),
QAtomicType("INTEGER", "Integer Field"),
QAtomicType("DECIMAL", "Decimal Field"),
QAtomicType("URL", "URL Field"),
QAtomicType("EMAIL", "Email Field"),
QAtomicType("DATE", "Date Field"),
QAtomicType("DATETIME", "Date Time Field"),
QAtomicType("TIME", "Time Field"),
])
from django.forms.widgets import *
from djng.forms.widgets import *
ATOMIC_PROPERTY_MAP = {
# maps the above QAtomicTypes to their corresponding widget classes,
# to be used by "forms.forms_realizations.QPropertyRealizationForm#customize"
QAtomicTypes.DEFAULT.get_type(): [TextInput, {}],
QAtomicTypes.TEXT.get_type(): [Textarea, {"rows": 4}],
QAtomicTypes.BOOLEAN.get_type(): [CheckboxInput, {}],
QAtomicTypes.INTEGER.get_type(): [NumberInput, {}],
QAtomicTypes.DECIMAL.get_type(): [NumberInput, {}],
QAtomicTypes.URL.get_type(): [URLInput, {}],
QAtomicTypes.EMAIL.get_type(): [EmailInput, {}],
# TODO: GET THESE WORKING IN BOOTSTRAP / DJANGULAR
QAtomicTypes.DATE.get_type(): [TextInput, {}],
QAtomicTypes.DATETIME.get_type(): [TextInput, {}],
QAtomicTypes.TIME.get_type(): [TextInput, {}],
}
class QNillableType(EnumeratedType):
def __str__(self):
return self.get_name()
QNillableTypes = EnumeratedTypeList([
QNillableType(nil_reason[0].upper(), "{0}:{1}".format(NIL_PREFIX, nil_reason[0]), nil_reason[1])
for nil_reason in NIL_REASONS
])
##################################
# field for storing JSON content #
##################################
class QJSONField(models.TextField):
"""
encodes JSON in a text field
optionally validates against a JSON Schema
which gets passed in as a callable (so I can bind the schema at run-time rather than hard-coding it beforehand)
"""
def __init__(self, *args, **kwargs):
self.json_schema_fn = kwargs.pop("schema", None)
if self.json_schema_fn:
assert callable(self.json_schema_fn)
super(QJSONField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
db to code; text to JSON object
"""
if value is None:
return None
try:
# sometimes it's not _clean_ JSON,
# (for example, fixtures pollute these strings w/ unicode garbage)
# so clean it up here...
clean_value = re.sub(r"(u')(.*?)(')", r'"\2"', value)
json_content = json.loads(clean_value)
if self.json_schema_fn:
json_validate(json_content, self.json_schema_fn())
return json_content
except ValueError:
msg = "Malformed content used in {0}: '{1}'.".format(
self.__class__.__name__,
clean_value
)
raise DjangoValidationError(msg)
except JSONValidationError as e:
msg = "Content used in {0} does not conform to schema: {1}".format(
self.__class__.__name__,
e.message
)
raise DjangoValidationError(msg)
def get_prep_value(self, value):
"""
code to db; JSON to text
"""
if value is None:
return None
try:
if self.json_schema_fn:
json_validate(value, self.json_schema_fn())
return json.dumps(value)
except ValueError:
msg = "Malformed content used in {0}: '{1}'.".format(
self.__class__.__name__,
value
)
raise DjangoValidationError(msg)
except JSONValidationError as e:
msg = "Content used in {0} does not conform to schema: {1}".format(
self.__class__.__name__,
e.message
)
raise DjangoValidationError(msg)
def from_db_value(self, value, expression, connection, context):
"""
does the same thing as "to_python",
it's just called in different situations b/c of a quirk w/ Django 1.8
(see https://docs.djangoproject.com/en/1.8/howto/custom-model-fields/)
"""
return self.to_python(value)
######################
# enumeration fields #
######################
# this is mostly the same as a QJSONField
# w/ some additional fns displaying things nicely - and interactively - in the form
ENUMERATION_OTHER_PREFIX = "other"
ENUMERATION_OTHER_CHOICE = "---OTHER---"
ENUMERATION_OTHER_PLACEHOLDER = "Please enter a custom value"
ENUMERATION_OTHER_DOCUMENTATION = "<em>Select this option to add a custom value for this property.</em>"
from django.forms.fields import MultipleChoiceField
class QEnumerationFormField(MultipleChoiceField):
# TODO: I WOULD RATHER JUST RELY ON QEnumerationFormField BEING SETUP CORRECTLY BY QEnumerationField BELOW
# TODO: BUT THE CODE IN "QPropertyRealization.__init__" DOESN'T SEEM TO WORK,
# TODO: SO I UPDATE THE FORM FIELD DIRECTLY IN "QPropertyRealizationForm.customize"
# TODO: SEE THE COMMENTS THERE FOR MORE INFO
def __init__(self, *args, **kwargs):
is_multiple = kwargs.pop("is_multiple", False)
complete_choices = kwargs.pop("complete_choices", [])
choices = [(c.get("value"), c.get("value")) for c in complete_choices]
kwargs["choices"] = choices
super(QEnumerationFormField, self).__init__(*args, **kwargs)
self._complete_choices = complete_choices
self._is_multiple = is_multiple
@property
def is_multiple(self):
# (need to pass a string b/c this will be used as the argument to an NG directive)
return json.dumps(self._is_multiple)
@property
def complete_choices(self):
# (need to pass a string b/c this will be used as the argument to an NG directive)
return json.dumps(self._complete_choices)
class QEnumerationField(QJSONField):
def __init__(self, *args, **kwargs):
super(QEnumerationField, self).__init__(*args, **kwargs)
self._complete_choices = []
self._is_multiple = False
@property
def is_multiple(self):
return self._is_multiple
@is_multiple.setter
def is_multiple(self, is_multiple):
self._is_multiple = is_multiple
@property
def complete_choices(self):
return sort_sequence_by_key(
self._complete_choices,
"order"
)
@complete_choices.setter
def complete_choices(self, complete_choices):
self._complete_choices = complete_choices
def formfield(self, **kwargs):
new_kwargs = {
"label": self.verbose_name,
"is_multiple": self.is_multiple,
"complete_choices": self.complete_choices,
}
new_kwargs.update(kwargs)
return QEnumerationFormField(**new_kwargs)
###############
# file fields #
###############
class OverwriteStorage(FileSystemStorage):
def get_available_name(self, name):
"""Returns a filename that's free on the target storage system, and
available for new content to be written to.
Found at http://djangosnippets.org/snippets/976/
This file storage solves overwrite on upload problem. Another
proposed solution was to override the save method on the model
like so (from https://code.djangoproject.com/ticket/11663):
def save(self, *args, **kwargs):
try:
this = MyModelName.objects.get(id=self.id)
if this.MyImageFieldName != self.MyImageFieldName:
this.MyImageFieldName.delete()
except: pass
super(MyModelName, self).save(*args, **kwargs)
"""
# If the filename already exists, remove it as if it was a true file system
if self.exists(name):
file_path = os.path.join(settings.MEDIA_ROOT, name)
os.remove(file_path)
return name
class QFileField(models.FileField):
"""
just like a standard Django FileField,
except it uses the above OverwriteStorage class,
and it deletes the file when the corresponding class instance is deleted
(so long as no other class members are using it)
"""
default_help_text = "Note that files with the same names will be overwritten"
def __init__(self, *args, **kwargs):
"""
ensure that OverwriteStorage is used,
and provide help_text (if none was specified)
:param args:
:param kwargs:
:return:
"""
help_text = kwargs.pop("help_text", self.default_help_text)
kwargs.update({
"storage": OverwriteStorage(),
"help_text": help_text
})
super(QFileField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
"""
attach the "post_delete" signal of the model class
to the "delete_file" fn of the field class
:param cls:
:param name:
:return: None
"""
super(QFileField, self).contribute_to_class(cls, name, **kwargs)
post_delete.connect(self.delete_file, sender=cls)
def delete_file(self, sender, **kwargs):
"""
delete the file iff no other class instance point to it
:param sender:
:return: None
"""
instance = kwargs.pop("instance")
instance_field_name = self.name
instance_field = getattr(instance, instance_field_name)
filter_parameters = {
instance_field_name: instance_field.name,
}
other_instances_with_same_file = sender.objects.filter(**filter_parameters)
if not len(other_instances_with_same_file):
# if there are no other instances w/ the same file...
# delete the file...
instance_field.delete(save=False) # save=False prevents model from re-saving itself
##################
# version fields #
##################
class QVersionFormField(CharField):
def clean(self, value):
# check string format (only numbers and the '.' character
if not re.match(r'^([0-9]\.?)+$', value):
msg = "Versions must be of the format 'major.minor.patch'"
raise DjangoValidationError(msg)
return value
class QVersionField(models.IntegerField):
# TODO: models w/ this field have to call refresh_from_db if set manually
# TODO: (ie: if set in tests)
def formfield(self, **kwargs):
default_kwargs = {
"form_class": QVersionFormField,
}
default_kwargs.update(kwargs)
return super(QVersionField, self).formfield(**default_kwargs)
def to_python(self, value):
"""
db to code; int to Version
"""
if isinstance(value, Version):
return value
if isinstance(value, basestring):
return Version(value)
if value is None:
return None
return Version(Version.int_to_string(value))
def get_prep_value(self, value):
"""
code to db; Version to int
"""
if isinstance(value, basestring):
return Version.string_to_int(value)
if value is None:
return None
return int(value)
def from_db_value(self, value, expression, connection, context):
"""
does the same thing as "to_python",
it's just called in different situations b/c of a quirk w/ Django 1.8
(see https://docs.djangoproject.com/en/1.8/howto/custom-model-fields/)
"""
return self.to_python(value)
def contribute_to_class(self, cls, name, **kwargs):
"""
adds "get/<field_name>_major/minor/patch" fns to the class
:param cls:
:param name:
:param kwargs:
:return:
"""
super(QVersionField, self).contribute_to_class(cls, name, **kwargs)
def _get_major(instance, field_name=name):
"""
notice how I pass the name of the field from the parent "contribute_to_class" fn;
this lets me access it from the instance
:param instance:
:param field_name:
:return:
"""
version_value = getattr(instance, field_name)
return version_value.major()
def _get_minor(instance, field_name=name):
"""
notice how I pass the name of the field from the parent "contribute_to_class" fn;
this lets me access it from the instance
:param instance:
:param field_name:
:return:
"""
version_value = getattr(instance, field_name)
return version_value.minor()
def _get_patch(instance, field_name=name):
"""
notice how I pass the name of the field from the parent "contribute_to_class" fn;
this lets me access it from the instance
:param instance:
:param field_name:
:return:
"""
version_value = getattr(instance, field_name)
return version_value.patch()
get_major_fn_name = u"get_{0}_major".format(name)
get_minor_fn_name = u"get_{0}_minor".format(name)
get_patch_fn_name = u"get_{0}_patch".format(name)
setattr(cls, get_major_fn_name, types.MethodType(_get_major, None, cls))
setattr(cls, get_minor_fn_name, types.MethodType(_get_minor, None, cls))
setattr(cls, get_patch_fn_name, types.MethodType(_get_patch, None, cls))
|
import sys
from loguru import logger
from flexget import options
from flexget.event import event
from flexget.plugin import plugins
from flexget.terminal import console
logger = logger.bind(name='doc')
def trim(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def print_doc(manager, options):
plugin_name = options.doc
plugin = plugins.get(plugin_name, None)
if plugin:
if not plugin.instance.__doc__:
console('Plugin %s does not have documentation' % plugin_name)
else:
console('')
console(trim(plugin.instance.__doc__))
console('')
else:
console('Could not find plugin %s' % plugin_name)
@event('options.register')
def register_parser_arguments():
parser = options.register_command('doc', print_doc, help='display plugin documentation')
parser.add_argument('doc', metavar='<plugin name>', help='name of plugin to show docs for')
|
# -*- coding: utf-8 -*-
__version__ = "0.9.9.2"
from .orm import Model, SoftDeletes, Collection, accessor, mutator, scope
from .database_manager import DatabaseManager
from .query.expression import QueryExpression
from .schema import Schema
from .pagination import Paginator, LengthAwarePaginator
import pendulum
pendulum.FormattableMixing._to_string_format = "%Y-%m-%d %H:%M:%S"
|
from torch.nn.parameter import Parameter
import torch
from torch.autograd import Function
from torch import tensor, nn
import math
import torch.nn.functional as F
import time
def test(a,b,cmp,cname=None):
if cname is None: cname=cmp.__name__
assert cmp(a,b),f"{cname}:\n{a}\n{b}"
def near(a,b): return torch.allclose(a, b, rtol=1e-3, atol=1e-5)
def test_near(a,b): test(a,b,near)
class convolutionFunction(Function):
@staticmethod
def forward(context,input,weight,bias,padding,stride,mask):
start = time.time()
context.padding = padding
context.stride = stride
context.mask = mask
weight = weight*mask
N,C,h,w = input.shape
out_channels,_,hf,wf = weight.shape
output_size = (h-hf+2*padding)//stride + 1
unfolded_input = torch._C._nn.im2col(input, (hf,wf),(1,1),(padding,padding),(stride,stride))
unfolded_weight = weight.view(out_channels,-1)
out = unfolded_weight @ unfolded_input
out = out.view(N,out_channels,output_size,output_size)
out = out[:,:,:,:] + bias[None,:,None,None]
context.save_for_backward(input,weight,bias,unfolded_input)
end = time.time()
return out
@staticmethod
def backward(context,grad_output):
input,weight,bias,unfolded_input = context.saved_tensors
stride = context.stride
padding = context.padding
mask = context.mask
n,_,h,w = input.shape
f,_,k,_ = weight.shape
grad_bias = grad_output.sum((0,2,3))
X_col = unfolded_input.permute(1,0,2)
X_col = X_col.reshape(X_col.shape[0],-1)
dout_reshaped = grad_output.permute(1, 0, 2, 3).reshape(f, -1)
dW = dout_reshaped @ X_col.T
grad_weight = dW.view(weight.shape)
weight = weight*mask
weight = weight.reshape(f,-1)
unfolded_grad_output = grad_output.permute(1,0,2,3)
unfolded_grad_output = unfolded_grad_output.reshape(f,-1)
dx = (weight.T)@unfolded_grad_output
dx = dx.T
dx = dx.reshape(n,-1,dx.shape[1])
dx = dx.permute(0,2,1)
grad_input = torch._C._nn.col2im(dx,(h,w),(k,k),(1,1),(padding,padding),(stride,stride))
return grad_input,grad_weight,grad_bias,None,None,None
class myconv2d(nn.Conv2d):
def __init__(self,in_channels,out_channels,kernel_size,padding,stride,mask=1,*kargs,**kwargs):
super(myconv2d, self).__init__(in_channels,out_channels,kernel_size,padding,stride,mask,*kargs, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.weight = Parameter(torch.Tensor(out_channels,in_channels,kernel_size,kernel_size).cuda()) #cuda
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(3))
self.bias = Parameter(torch.zeros(out_channels).cuda())
self.padding = padding
self.stride = stride
self.mask = mask
def forward(self,input):
return convolutionFunction().apply(input, self.weight,self.bias,self.padding,self.stride,self.mask)
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
import torch
from transformers.file_utils import ModelOutput
@dataclass
class ReOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
entities: Optional[Dict] = None
relations: Optional[Dict] = None
pred_relations: Optional[Dict] = None
|
from sqlalchemy import Table, Column, Integer, String, BigInteger, Boolean, ForeignKeyConstraint
from cassiopeia.data import Platform
from cassiopeia.dto.spectator import CurrentGameInfoDto
from cassiopeia.dto.common import DtoObject
from .common import metadata, SQLBaseObject, map_object, foreignkey_options
class CurrentGameParticipantDto(DtoObject):
pass
class SQLCurrentGameParticipant(SQLBaseObject):
_dto_type = CurrentGameParticipantDto
_table = Table("current_game_participant", metadata,
Column("current_game_platformId", String(7), primary_key=True),
Column("current_game_gameId", BigInteger, primary_key=True),
Column("participantId", Integer, primary_key=True),
Column("teamId", Integer),
Column("spell1Id", Integer),
Column("spell2Id", Integer),
Column("championId", Integer),
Column("profileIconId", Integer),
Column("summonerName", String(30)),
Column("bot", Boolean),
Column("summonerId", String(63)),
ForeignKeyConstraint(
["current_game_platformId", "current_game_gameId"],
["current_game.platformId", "current_game.gameId"]
))
map_object(SQLCurrentGameParticipant)
class CurrentGameBanDto(DtoObject):
pass
class SQLCurrentGameBan(SQLBaseObject):
_dto_type = CurrentGameBanDto
_table = Table("current_game_ban", metadata,
Column("current_game_platformId", String(7), primary_key=True),
Column("current_game_gameId", BigInteger, primary_key=True),
Column("pickTurn", Integer, primary_key=True),
Column("teamId", Integer),
Column("championId", Integer),
ForeignKeyConstraint(
["current_game_platformId", "current_game_gameId"],
["current_game.platformId", "current_game.gameId"],
**foreignkey_options
))
map_object(SQLCurrentGameBan)
class SQLCurrentGameInfo(SQLBaseObject):
_dto_type = CurrentGameInfoDto
_table = Table("current_game", metadata,
Column("platformId", String(7), primary_key=True),
Column("gameId", BigInteger, primary_key=True),
Column("gameStartTime", BigInteger),
Column("gameMode", String(10)),
Column("mapId", Integer),
Column("gameType", String(12)),
Column("gameQueueConfigId", Integer),
Column("gameLength", Integer),
Column("encryptionKey", String(32)),
Column("featured", Boolean),
Column("lastUpdate", BigInteger))
_relationships = {"bannedChampions": (SQLCurrentGameBan, {}), "participants": (SQLCurrentGameParticipant, {})}
def __init__(self, featured=False, **kwargs):
i = 1
for participant in kwargs["participants"]:
participant["participantId"] = i
i += 1
kwargs["encryptionKey"] = kwargs["observers"]["encryptionKey"]
kwargs["featured"] = featured
super().__init__(**kwargs)
def to_dto(self):
dto = super().to_dto()
dto["observers"] = {"encryptionKey": dto["encryptionKey"]}
dto["region"] = Platform(dto.pop("platformId")).region.value
return dto
map_object(SQLCurrentGameInfo)
|
"""
@file
@brief Exceptions raised during the installation of a module
"""
class MissingPackageOnPyPiException(Exception):
"""
raised when a package is not found on pipy
"""
pass
class MissingInstalledPackageException(Exception):
"""
raised when a package is not installed
"""
pass
class AnnoyingPackageException(Exception):
"""
raised when a package is not on pypi
"""
pass
class MissingVersionOnPyPiException(Exception):
"""
raised when a version is missing on pipy
"""
pass
class MissingVersionWheelException(Exception):
"""
raised when a version is missing as a wheel
"""
pass
class MissingWheelException(Exception):
"""
raised when a wheel is missing
"""
pass
class MissingReferenceException(Exception):
"""
raised when a module is not referenced by this package
"""
pass
class InstallError(Exception):
"""
raised when a package cannot be installed
"""
pass
class DownloadError(Exception):
"""
raised when a package cannot be downloaded
"""
pass
class ConfigurationError(Exception):
"""
raised when something is wrong the current configuration
"""
pass
class UpdatePipError(Exception):
"""
raised when pip cannot be update or reinstalled
"""
pass
class RunCmdError(Exception):
"""
raised when a command line cannot be run
"""
pass
class WrongVersionError(Exception):
"""
cannot interpret a version
"""
pass
class WrongWheelException(Exception):
"""
raised when the downloaded wheel seems wrong
"""
pass
class UnavailableCustomBuildError(Exception):
"""
raise when a module does not have a custom build
"""
pass
|
from catboost import CatBoostClassifier
def get_CatBoostClassifier(iterations=1000, learning_rate=0.01, min_data_in_leaf=30, eval_metric='AUC', cat_features=None):
return CatBoostClassifier(
iterations=iterations,
learning_rate=0.01,
min_data_in_leaf=min_data_in_leaf,
eval_metric=eval_metric,
cat_features=cat_features
)
|
SIMULTANEOUS_THREADS = 150
TIMEOUT = 7
WHOIS_THREADS = 10
|
from rotkehlchen.accounting.structures.balance import Balance
from rotkehlchen.assets.asset import EthereumToken
from rotkehlchen.assets.utils import symbol_to_asset_or_token
from rotkehlchen.chain.ethereum.interfaces.ammswap.types import EventType, LiquidityPoolEvent
from rotkehlchen.chain.ethereum.modules.balancer.types import BalancerBPTEventType, BalancerEvent
from rotkehlchen.chain.ethereum.trades import AMMSwap
from rotkehlchen.chain.ethereum.types import string_to_ethereum_address
from rotkehlchen.constants.assets import A_ETH, A_EUR, A_USD
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.exchanges.data_structures import Trade
from rotkehlchen.fval import FVal
from rotkehlchen.types import (
ApiKey,
ApiSecret,
AssetAmount,
ChecksumEthAddress,
Fee,
Location,
Price,
Timestamp,
TradeType,
)
def test_associated_locations(database):
"""Test that locations imported in different places are correctly stored in database"""
# Add trades from different locations
trades = [Trade(
timestamp=Timestamp(1595833195),
location=Location.CRYPTOCOM,
base_asset=A_ETH,
quote_asset=A_EUR,
trade_type=TradeType.BUY,
amount=AssetAmount(FVal('1.0')),
rate=Price(FVal('281.14')),
fee=Fee(ZERO),
fee_currency=A_USD,
link='',
notes='',
), Trade(
timestamp=Timestamp(1587825824),
location=Location.CRYPTOCOM,
base_asset=A_ETH,
quote_asset=A_EUR,
trade_type=TradeType.BUY,
amount=AssetAmount(FVal('50.0')),
rate=Price(FVal('3.521')),
fee=Fee(ZERO),
fee_currency=A_USD,
link='',
notes='',
), Trade(
timestamp=Timestamp(1596014214),
location=Location.BLOCKFI,
base_asset=A_ETH,
quote_asset=A_EUR,
trade_type=TradeType.BUY,
amount=AssetAmount(FVal('50.0')),
rate=Price(FVal('3.521')),
fee=Fee(ZERO),
fee_currency=A_USD,
link='',
notes='',
), Trade(
timestamp=Timestamp(1565888464),
location=Location.NEXO,
base_asset=A_ETH,
quote_asset=A_EUR,
trade_type=TradeType.BUY,
amount=AssetAmount(FVal('50.0')),
rate=Price(FVal('3.521')),
fee=Fee(ZERO),
fee_currency=A_USD,
link='',
notes='',
), Trade(
timestamp=Timestamp(1596014214),
location=Location.NEXO,
base_asset=A_ETH,
quote_asset=A_EUR,
trade_type=TradeType.BUY,
amount=AssetAmount(FVal('50.0')),
rate=Price(FVal('3.521')),
fee=Fee(ZERO),
fee_currency=A_USD,
link='',
notes='',
), Trade(
timestamp=Timestamp(1612051199),
location=Location.BLOCKFI,
base_asset=symbol_to_asset_or_token('USDC'),
quote_asset=symbol_to_asset_or_token('LTC'),
trade_type=TradeType.BUY,
amount=AssetAmount(FVal('6404.6')),
rate=Price(FVal('151.6283999982779809352223797')),
fee=None,
fee_currency=None,
link='',
notes='One Time',
), Trade(
timestamp=Timestamp(1595833195),
location=Location.POLONIEX,
base_asset=A_ETH,
quote_asset=A_EUR,
trade_type=TradeType.BUY,
amount=AssetAmount(FVal('1.0')),
rate=Price(FVal('281.14')),
fee=Fee(ZERO),
fee_currency=A_USD,
link='',
notes='',
), Trade(
timestamp=Timestamp(1596429934),
location=Location.COINBASE,
base_asset=A_ETH,
quote_asset=A_EUR,
trade_type=TradeType.BUY,
amount=AssetAmount(FVal('0.00061475')),
rate=Price(FVal('309.0687271248474989833265555')),
fee=Fee(ZERO),
fee_currency=A_USD,
link='',
notes='',
), Trade(
timestamp=Timestamp(1596429934),
location=Location.EXTERNAL,
base_asset=A_ETH,
quote_asset=A_EUR,
trade_type=TradeType.BUY,
amount=AssetAmount(FVal('1')),
rate=Price(FVal('320')),
fee=Fee(ZERO),
fee_currency=A_USD,
link='',
notes='',
)]
# Add multiple entries for same exchange + connected exchange
database.add_trades(trades)
kraken_api_key1 = ApiKey('kraken_api_key')
kraken_api_secret1 = ApiSecret(b'kraken_api_secret')
kraken_api_key2 = ApiKey('kraken_api_key2')
kraken_api_secret2 = ApiSecret(b'kraken_api_secret2')
binance_api_key = ApiKey('binance_api_key')
binance_api_secret = ApiSecret(b'binance_api_secret')
# add mock kraken and binance
database.add_exchange('kraken1', Location.KRAKEN, kraken_api_key1, kraken_api_secret1)
database.add_exchange('kraken2', Location.KRAKEN, kraken_api_key2, kraken_api_secret2)
database.add_exchange('binance', Location.BINANCE, binance_api_key, binance_api_secret)
# Add uniswap and sushiswap events
database.add_amm_events([
LiquidityPoolEvent(
tx_hash='0x47ea26957ce09e84a51b51dfdab6a4ac1c3672a372eef77b15ef7677174ac847',
log_index=23,
address=ChecksumEthAddress('0x3163Bb273E8D9960Ce003fD542bF26b4C529f515'),
timestamp=Timestamp(1590011534),
event_type=EventType.MINT_SUSHISWAP,
pool_address=ChecksumEthAddress('0xa2107FA5B38d9bbd2C461D6EDf11B11A50F6b974'),
token0=EthereumToken('0x514910771AF9Ca656af840dff83E8264EcF986CA'),
token1=EthereumToken('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2'),
amount0=FVal('3.313676003468974932'),
amount1=FVal('0.064189269269768657'),
usd_price=FVal('26.94433946158740371839009166230438'),
lp_amount=FVal('0.460858304063739927'),
),
])
database.add_amm_swaps([
AMMSwap(
tx_hash='0xa54bf4c68d435e3c8f432fd7e62b7f8aca497a831a3d3fca305a954484ddd7b2',
log_index=208,
address=ChecksumEthAddress('0xa2107FA5B38d9bbd2C461D6EDf11B11A50F6b974'),
from_address=string_to_ethereum_address('0xd9e1cE17f2641f24aE83637ab66a2cca9C378B9F'),
to_address=string_to_ethereum_address('0xC9cB53B48A2f3A9e75982685644c1870F1405CCb'),
timestamp=Timestamp(1609301469),
location=Location.UNISWAP,
token0=EthereumToken('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2'),
token1=EthereumToken('0xdAC17F958D2ee523a2206206994597C13D831ec7'),
amount0_in=AssetAmount(FVal('2.6455727132446468')),
amount1_in=AssetAmount(ZERO),
amount0_out=AssetAmount(ZERO),
amount1_out=AssetAmount(FVal('1936.810111')),
),
])
database.add_balancer_events([
BalancerEvent(
tx_hash='0xa54bf4c68d435e3c8f432fd7e62b7f8aca497a831a3d3fca305a954484ddd7b3',
log_index=23,
address=ChecksumEthAddress('0xa2107FA5B38d9bbd2C461D6EDf11B11A50F6b974'),
timestamp=Timestamp(1609301469),
event_type=BalancerBPTEventType.MINT,
pool_address_token=EthereumToken('0x514910771AF9Ca656af840dff83E8264EcF986CA'),
lp_balance=Balance(amount=FVal(2), usd_value=FVal(3)),
amounts=[
AssetAmount(FVal(1)),
AssetAmount(FVal(2)),
],
),
])
expected_locations = {
Location.KRAKEN,
Location.BINANCE,
Location.BLOCKFI,
Location.NEXO,
Location.CRYPTOCOM,
Location.POLONIEX,
Location.COINBASE,
Location.EXTERNAL,
Location.SUSHISWAP,
Location.UNISWAP,
Location.BALANCER,
}
assert set(database.get_associated_locations()) == expected_locations
|
import os
from setuptools import setup
from setuptools import find_packages
# Utility function to read the README file.
# From http://packages.python.org/an_example_pypi_project/setuptools.html.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup( name = "NodeBox for OpenGL",
version = "1.7",
description = "NodeBox for OpenGL (NOGL) is a free, cross-platform library "
"for generating 2D animations with Python programming code.",
long_description = read("README.txt"),
keywords = "2d graphics sound physics games multimedia",
license = "BSD",
author = "Tom De Smedt",
url = "http://www.cityinabottle.org/nodebox/",
packages = find_packages(),
package_data = {"nodebox.gui": ["theme/*"]},
install_requires = ["pyglet",],
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Environment :: X11 Applications",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Artistic Software",
"Topic :: Games/Entertainment",
"Topic :: Multimedia :: Graphics",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import time
import codecs
import subprocess
#import contextlib
#with contextlib.redirect_stdout(None):
# import pygame.mixer
#https://stackoverflow.com/questions/49871252/saving-text-to-speech-python
import win32com.client as wincl
import win32api
def qPlay(tempFile=None, sync=True):
if not tempFile is None:
#if os.name != 'nt':
# pygame.mixer.init()
# pygame.mixer.music.load(tempFile)
# pygame.mixer.music.play()
# if sync == True:
# while pygame.mixer.music.get_busy():
# time.sleep(0.1)
# pygame.mixer.music.stop()
#else:
cmd = ['sox', tempFile, '-d', '-q']
#cmd = ['sox', '-v', '3', tempFile, '-d', '-q', 'gain', '-n']
#cmd = ['sox', '-v', '3', tempFile, '-b', '8', '-u', '-r', '8000', '-c', '1', '-d', '-q', 'gain', '-n']
#cmd = ['sox', '-v', '3', tempFile, '-r', '8000', '-c', '1', '-d', '-q', 'gain', '-n']
p=subprocess.Popen(cmd)
if sync == True:
p.wait()
if __name__ == '__main__':
lng = 'ja-JP'
txtFile = 'temp/temp_msg.txt'
tmpFile = 'temp/temp_voice.wav' #Azure, HOYA
#tmpFile = 'temp/temp_voice.mp3' #Google, Watson
if len(sys.argv)>=2:
lng = sys.argv[1]
if len(sys.argv)>=3:
txtFile = sys.argv[2]
if len(sys.argv)>=4:
tmpFile = sys.argv[3]
if lng=='ja':
lng = 'ja-JP'
if lng=='en':
lng = 'en-US'
print('')
print('speech_output_win32.py')
print(' 1)language = ' + lng)
print(' 2)txtFile = ' + txtFile)
#print(' 3)tmpFile = ' + tmpFile)
txt = ''
rt = codecs.open(txtFile, 'r', 'shift_jis')
for t in rt:
txt = (txt + ' ' + str(t)).strip()
rt.close
rt = None
if os.path.exists(tmpFile):
os.remove(tmpFile)
try:
print(' ' + txt)
# MS Windows
t = '<speak version="1.0" xmlns="http://www.w3.org/2001/10/synthesis" xml:lang="en-US">'
t += '<voice xml:lang="' + lng + '" gender="female">'
t += txt
t += '</voice></speak>'
engine = wincl.Dispatch('SAPI.SpVoice')
#engine.Speak(t)
stream = wincl.Dispatch("SAPI.SpFileStream")
stream.open(tmpFile, 3, False)
#for speaker in engine.GetAudioOutputs():
# print(speaker.GetDescription())
engine.AudioOutputStream = stream
engine.Speak(t)
stream.close()
except:
print(' Error!', sys.exc_info()[0])
sys.exit()
if os.path.exists(tmpFile):
qPlay(tmpFile)
|
import sklearn
from sklearn.neural_network import MLPClassifier
X = [[0, 1], [1, 0], [1, 1]]
Y = [1, 1, 0]
clf = MLPClassifier(
solver="lbfgs", alpha=1e-3, hidden_layer_sizes=(5, 2), random_state=1
)
clf.fit(X, Y)
clf.predict([[2, 2], [0, 0], [-1, -2]])
print(clf.score([[2, 2], [0, 0], [-1, -2]], [1, 0, 0]))
for coef in clf.coefs_:
print(coef)
|
# Aula 18 - 03-12-2019
# Ao receber a seguinte lista, faça um metodo que retorne cada um destes itens de forma individual
# com cabaçalho dizendo em que posição estes itens estão dentro da lista principal:
# Exemplo:
# ############# posição 0 ##################
# Agua
# mamão
# ############# posição 1 ##################
# banana
# limão
#Regra: Não pode usar a função range e no máximo 2 print()
lista = [
['mamão','abacaxi','laranja','uva','pera','maçã','vergamota'],
['skol','kaiser','sol','schin','brahma','itaipava','bavaria'],
['alface crespa', 'alface lisa','rucula','almerão','repolho','salsinha',],
['rizoto','macarronada','polenta','guizado','dobradinha','revirado','pure'],
['feijão', 'erviha', 'lentilha','vagem','feijão branco','gão de bico','soja'],
['agua','cachoeira','rio','lagoa','sanga','brejo','laguna'],
['vento','ciclone','tufão','furacão','brisa','minuano','zefiro'],
['carro','moto','vespa','caminhão','sprinter','kombi','fusca'],
['calça','camisa','japona','jaqueta','camiseta','bone','regata']
]
def lista_iten(lista):
numero = 0
for lista_pequena in lista:
print(f"############# Posição: {numero} ################")
numero = numero +1
for abioluz in lista_pequena:
print(abioluz)
for i in abioluz:
print(i)
lista_iten(lista)
|
def min(*args, **kwargs):
temp = []
if len(args) > 1:
temp = args
else:
temp = args[0]
if len(kwargs) > 0:
for k,v in kwargs.items():
return sorted(temp,key=v)[0]
else:
return sorted(temp)[0]
def max(*args, **kwargs):
temp = []
if len(args) > 1:
temp = args
else:
temp = args[0]
if len(kwargs) > 0:
for k,v in kwargs.items():
return sorted(temp,key=v,reverse=True)[0]
else:
return sorted(temp,reverse=True)[0]
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert max(3, 2) == 3, "Simple case max"
assert min(3, 2) == 2, "Simple case min"
assert max([1, 2, 0, 3, 4]) == 4, "From a list"
assert min("hello") == "e", "From string"
assert max(2.2, 5.6, 5.9, key=int) == 5.6, "Two maximal items"
assert min([[1, 2], [3, 4], [9, 0]], key=lambda x: x[1]) == [9, 0], "lambda key"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
|
import numpy as np
from vg.compat import v2 as vg
__all__ = [
"transform_matrix_for_non_uniform_scale",
"transform_matrix_for_rotation",
"transform_matrix_for_translation",
"transform_matrix_for_uniform_scale",
]
def _convert_33_to_44(matrix):
"""
Transform from:
array([[1., 2., 3.],
[2., 3., 4.],
[5., 6., 7.]])
to:
array([[1., 2., 3., 0.],
[2., 3., 4., 0.],
[5., 6., 7., 0.],
[0., 0., 0., 1.]])
"""
vg.shape.check(locals(), "matrix", (3, 3))
result = np.pad(matrix, ((0, 1), (0, 1)), mode="constant")
result[3][3] = 1
return result
def transform_matrix_for_rotation(rotation, ret_inverse_matrix=False):
"""
Create a transformation matrix from the given 3x3 rotation matrix or a
Rodrigues vector.
With `ret_inverse_matrix=True`, also returns a matrix which provides
the reverse transform.
"""
from ._rodrigues import rodrigues_vector_to_rotation_matrix
if rotation.shape == (3, 3):
forward3 = rotation
else:
vg.shape.check(locals(), "rotation", (3,))
forward3 = rodrigues_vector_to_rotation_matrix(rotation)
forward = _convert_33_to_44(forward3)
if not ret_inverse_matrix:
return forward
# The inverse of a rotation matrix is its transpose.
inverse = forward.T
return forward, inverse
def transform_matrix_for_translation(translation, ret_inverse_matrix=False):
"""
Create a transformation matrix which translates by the provided
displacement vector.
Forward:
[[ 1, 0, 0, v_0 ],
[ 0, 1, 0, v_1 ],
[ 0, 0, 1, v_2 ],
[ 0, 0, 0, 1 ]]
Reverse:
[[ 1, 0, 0, -v_0 ],
[ 0, 1, 0, -v_1 ],
[ 0, 0, 1, -v_2 ],
[ 0, 0, 0, 1 ]]
Args:
vector (np.arraylike): A 3x1 vector.
"""
vg.shape.check(locals(), "translation", (3,))
forward = np.eye(4)
forward[:, -1][:-1] = translation
if not ret_inverse_matrix:
return forward
inverse = np.eye(4)
inverse[:, -1][:-1] = -translation
return forward, inverse
def transform_matrix_for_non_uniform_scale(
x_factor, y_factor, z_factor, allow_flipping=False, ret_inverse_matrix=False
):
"""
Create a transformation matrix that scales by the given factors along
`x`, `y`, and `z`.
Forward:
[[ s_0, 0, 0, 0 ],
[ 0, s_1, 0, 0 ],
[ 0, 0, s_2, 0 ],
[ 0, 0, 0, 1 ]]
Reverse:
[[ 1/s_0, 0, 0, 0 ],
[ 0, 1/s_1, 0, 0 ],
[ 0, 0, 1/s_2, 0 ],
[ 0, 0, 0, 1 ]]
Args:
x_factor (float): The scale factor to be applied along the `x` axis,
which should be positive.
y_factor (float): The scale factor to be applied along the `y` axis,
which should be positive.
z_factor (float): The scale factor to be applied along the `z` axis,
which should be positive.
allow_flipping (bool): When `True`, allows scale factors to be
positive or negative, though not zero.
ret_inverse_matrix (bool): When `True`, also returns a matrix which
provides the inverse transform.
"""
if x_factor == 0 or y_factor == 0 or z_factor == 0:
raise ValueError("Scale factors should be nonzero")
if not allow_flipping and (x_factor < 0 or y_factor < 0 or z_factor < 0):
raise ValueError("Scale factors should be greater than zero")
scale = np.array([x_factor, y_factor, z_factor])
forward = _convert_33_to_44(np.diag(scale))
if not ret_inverse_matrix:
return forward
inverse = _convert_33_to_44(np.diag(1.0 / scale))
return forward, inverse
def transform_matrix_for_uniform_scale(
scale_factor, allow_flipping=False, ret_inverse_matrix=False
):
"""
Create a transformation matrix that scales by the given factor.
Forward:
[[ s_0, 0, 0, 0 ],
[ 0, s_1, 0, 0 ],
[ 0, 0, s_2, 0 ],
[ 0, 0, 0, 1 ]]
Reverse:
[[ 1/s_0, 0, 0, 0 ],
[ 0, 1/s_1, 0, 0 ],
[ 0, 0, 1/s_2, 0 ],
[ 0, 0, 0, 1 ]]
Args:
factor (float): The scale factor.
ret_inverse_matrix (bool): When `True`, also returns a matrix which
provides the inverse transform.
"""
if scale_factor == 0:
raise ValueError("Scale factor should be nonzero")
if not allow_flipping and scale_factor < 0:
raise ValueError("Scale factor should be greater than zero")
return transform_matrix_for_non_uniform_scale(
scale_factor,
scale_factor,
scale_factor,
allow_flipping=allow_flipping,
ret_inverse_matrix=ret_inverse_matrix,
)
|
"""
## IR Markers message id position:
[3]----[4]
| |
| |
[2]----[1]
[1] (1,1,0)
[2] (-1,1,0)
[3] (-1,-1,0)
[4] (-1,1,0)
Gate width is 0.3 m
## Left Camera info:
height: 768
width: 1024
distortion_model: "plum_bob"
D: [0.0, 0.0, 0.0, 0.0, 0.0]
K: [548.4088134765625, 0.0, 512.0, 0.0, 548.4088134765625, 384.0, 0.0, 0.0, 1.0]
R: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
P: [548.4088134765625, 0.0, 512.0, 0.0, 0.0, 548.4088134765625, 384.0, 0.0, 0.0, 0.0, 1.0, 0.0]
"""
import numpy as np
import cv2
import math
from geometry_msgs.msg import Point, Pose, PoseArray
class MarkersEstimator:
def __init__(self):
# From camera info K and D
self.mtx = np.array([ [548.4088134765625, 0.0, 512.0], [0.0, 548.4088134765625, 384.0], [0.0, 0.0, 1.0]], dtype = "double")
self.dist = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype = "double")
self.gate_w = 30
def estimate_gate_markers(self, gate):
#print(gate)
#pts = np.empty(shape=(0,1))
pts = []
for g in gate:
pts.append(gate[g])
#print(pts)
# Find the rotation and translation vectors.
if len(pts) == 4:
pts = np.array(pts, dtype="double").reshape(4, 1, 2) #it needs to be reshaped for Ransac
# print(pts)
objp = np.array([ (self.gate_w,self.gate_w,0),
(-self.gate_w,self.gate_w,0),
(-self.gate_w,-self.gate_w,0),
(-self.gate_w,self.gate_w,0) ], dtype="double").reshape(4, 1, 3) #it needs to be reshaped for Ransac
# print(objp)
rval, rvec, tvec, inliers = cv2.solvePnPRansac(objp, pts, self.mtx, self.dist)
#rvecs, tvecs, inliers = cv2.solvePnP(objp, pts, self.mtx, self.dist)
if rval is False:
return None
rpy = rvec2rpy_ros(rvec)
pos = tvec2point_ros(tvec.flatten())
return {'rpy' : rpy,
'position' : pos}
def tvec2point_ros(tvec):
pos = (tvec.item(2) / 100, -tvec.item(0) / 100, -tvec.item(1) / 100) # z, -x, -y
return pos
'''
/** this conversion uses conventions as described on page:
* https://www.euclideanspace.com/maths/geometry/rotations/euler/index.htm
* Coordinate System: right hand
* Positive angle: right hand
* Order of euler angles: heading first, then attitude, then bank
* matrix row column ordering:
* [m00 m01 m02]
* [m10 m11 m12]
* [m20 m21 m22]*/
'''
def rvec2rpy_ros(rvec):
m, _ = cv2.Rodrigues(rvec)
# // Assuming the angles are in radians.
if (m[1, 0] > 0.998): # // singularity at north pole
yaw = math.atan2(m[0, 2], m[2, 2])
roll = math.PI / 2
pitch = 0
elif m[1, 0] < -0.998: # // singularity at south pole
yaw = math.atan2(m[0, 2], m[2, 2])
roll = -math.PI / 2
pitch = 0
else:
yaw = -math.atan2(-m[2, 0], m[0, 0]) + math.pi
pitch = math.atan2(m[2, 2], m[1, 2]) + math.pi / 2 # math.atan2(-m[1, 2], m[1, 1])
roll = -math.asin(m[1, 0])
return roll, pitch, yaw
|
#Exercícios Numpy-06
#*******************
import numpy as np
arr=np.zeros(10)
arr[5]=1
print(arr)
|
""" Module to access the Webhooks endpoints """
# pylint: disable=too-many-lines,too-many-locals,too-many-public-methods,too-few-public-methods
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel
from ...models import (
CreateIncomingWebhookJsonBody,
CreateOutgoingWebhookJsonBody,
IncomingWebhook,
OutgoingWebhook,
StatusOK,
UpdateIncomingWebhookJsonBody,
UpdateOutgoingWebhookJsonBody,
)
from ..base import ApiBaseClass
class WebhooksApi(ApiBaseClass):
"""Endpoints for creating, getting and updating webhooks."""
def get_incoming_webhooks(
self,
*,
page: Optional[int] = 0,
per_page: Optional[int] = 60,
team_id: Optional[str] = None,
) -> List[IncomingWebhook]:
"""List incoming webhooks
Get a page of a list of incoming webhooks. Optionally filter for a
specific team using query parameters.
Permissions:
`manage_webhooks` for the system or `manage_webhooks` for
the specific team.
Api Reference:
`GetIncomingWebhooks <https://api.mattermost.com/#operation/GetIncomingWebhooks>`_
"""
url = "/hooks/incoming"
params: Dict[str, Any] = {
"page": page,
"per_page": per_page,
"team_id": team_id,
}
params = {k: v for k, v in params.items() if v is not None}
request_kwargs = {
"url": url,
"params": params,
}
# pylint: disable-next=protected-access
with self.client._get_httpx_client() as httpx_client:
response = httpx_client.get(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = []
_response200 = response.json()
for response200_item_data in _response200:
response200_item = IncomingWebhook.parse_obj(response200_item_data)
response200.append(response200_item)
return response200
return response
def create_incoming_webhook(
self,
*,
json_body: Union[CreateIncomingWebhookJsonBody, Dict],
) -> IncomingWebhook:
"""Create an incoming webhook
Create an incoming webhook for a channel.
`manage_others_incoming_webhooks` for the team the webhook is in if the
user is different than the requester.
Permissions:
`manage_webhooks` for the team the webhook is in.
Api Reference:
`CreateIncomingWebhook <https://api.mattermost.com/#operation/CreateIncomingWebhook>`_
"""
url = "/hooks/incoming"
if isinstance(json_body, BaseModel):
json_json_body = json_body.dict(exclude_unset=True)
else:
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
}
# pylint: disable-next=protected-access
with self.client._get_httpx_client() as httpx_client:
response = httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 201:
response201 = IncomingWebhook.parse_obj(response.json())
return response201
return response
def get_incoming_webhook(
self,
hook_id: str,
) -> IncomingWebhook:
"""Get an incoming webhook
Get an incoming webhook given the hook id.
Permissions:
`manage_webhooks` for system or `manage_webhooks` for the
specific team or `manage_webhooks` for the channel.
Api Reference:
`GetIncomingWebhook <https://api.mattermost.com/#operation/GetIncomingWebhook>`_
"""
url = f"/hooks/incoming/{hook_id}"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
with self.client._get_httpx_client() as httpx_client:
response = httpx_client.get(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = IncomingWebhook.parse_obj(response.json())
return response200
return response
def update_incoming_webhook(
self,
hook_id: str,
*,
json_body: Union[UpdateIncomingWebhookJsonBody, Dict],
) -> IncomingWebhook:
"""Update an incoming webhook
Update an incoming webhook given the hook id.
Permissions:
`manage_webhooks` for system or `manage_webhooks` for the
specific team or `manage_webhooks` for the channel.
Api Reference:
`UpdateIncomingWebhook <https://api.mattermost.com/#operation/UpdateIncomingWebhook>`_
"""
url = f"/hooks/incoming/{hook_id}"
if isinstance(json_body, BaseModel):
json_json_body = json_body.dict(exclude_unset=True)
else:
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
}
# pylint: disable-next=protected-access
with self.client._get_httpx_client() as httpx_client:
response = httpx_client.put(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = IncomingWebhook.parse_obj(response.json())
return response200
return response
def delete_incoming_webhook(
self,
hook_id: str,
) -> StatusOK:
"""Delete an incoming webhook
Delete an incoming webhook given the hook id.
Permissions:
`manage_webhooks` for system or `manage_webhooks` for the
specific team or `manage_webhooks` for the channel.
Api Reference:
`DeleteIncomingWebhook <https://api.mattermost.com/#operation/DeleteIncomingWebhook>`_
"""
url = f"/hooks/incoming/{hook_id}"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
with self.client._get_httpx_client() as httpx_client:
response = httpx_client.delete(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = StatusOK.parse_obj(response.json())
return response200
return response
def get_outgoing_webhooks(
self,
*,
page: Optional[int] = 0,
per_page: Optional[int] = 60,
team_id: Optional[str] = None,
channel_id: Optional[str] = None,
) -> List[OutgoingWebhook]:
"""List outgoing webhooks
Get a page of a list of outgoing webhooks. Optionally filter for a
specific team or channel using query parameters.
Permissions:
`manage_webhooks` for the system or `manage_webhooks` for
the specific team/channel.
Api Reference:
`GetOutgoingWebhooks <https://api.mattermost.com/#operation/GetOutgoingWebhooks>`_
"""
url = "/hooks/outgoing"
params: Dict[str, Any] = {
"page": page,
"per_page": per_page,
"team_id": team_id,
"channel_id": channel_id,
}
params = {k: v for k, v in params.items() if v is not None}
request_kwargs = {
"url": url,
"params": params,
}
# pylint: disable-next=protected-access
with self.client._get_httpx_client() as httpx_client:
response = httpx_client.get(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = []
_response200 = response.json()
for response200_item_data in _response200:
response200_item = OutgoingWebhook.parse_obj(response200_item_data)
response200.append(response200_item)
return response200
return response
def create_outgoing_webhook(
self,
*,
json_body: Union[CreateOutgoingWebhookJsonBody, Dict],
) -> OutgoingWebhook:
"""Create an outgoing webhook
Create an outgoing webhook for a team.
`manage_others_outgoing_webhooks` for the team the webhook is in if the
user is different than the requester.
Permissions:
`manage_webhooks` for the team the webhook is in.
Api Reference:
`CreateOutgoingWebhook <https://api.mattermost.com/#operation/CreateOutgoingWebhook>`_
"""
url = "/hooks/outgoing"
if isinstance(json_body, BaseModel):
json_json_body = json_body.dict(exclude_unset=True)
else:
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
}
# pylint: disable-next=protected-access
with self.client._get_httpx_client() as httpx_client:
response = httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 201:
response201 = OutgoingWebhook.parse_obj(response.json())
return response201
return response
def get_outgoing_webhook(
self,
hook_id: str,
) -> OutgoingWebhook:
"""Get an outgoing webhook
Get an outgoing webhook given the hook id.
Permissions:
`manage_webhooks` for system or `manage_webhooks` for the
specific team or `manage_webhooks` for the channel.
Api Reference:
`GetOutgoingWebhook <https://api.mattermost.com/#operation/GetOutgoingWebhook>`_
"""
url = f"/hooks/outgoing/{hook_id}"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
with self.client._get_httpx_client() as httpx_client:
response = httpx_client.get(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = OutgoingWebhook.parse_obj(response.json())
return response200
return response
def update_outgoing_webhook(
self,
hook_id: str,
*,
json_body: Union[UpdateOutgoingWebhookJsonBody, Dict],
) -> OutgoingWebhook:
"""Update an outgoing webhook
Update an outgoing webhook given the hook id.
Permissions:
`manage_webhooks` for system or `manage_webhooks` for the
specific team or `manage_webhooks` for the channel.
Api Reference:
`UpdateOutgoingWebhook <https://api.mattermost.com/#operation/UpdateOutgoingWebhook>`_
"""
url = f"/hooks/outgoing/{hook_id}"
if isinstance(json_body, BaseModel):
json_json_body = json_body.dict(exclude_unset=True)
else:
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
}
# pylint: disable-next=protected-access
with self.client._get_httpx_client() as httpx_client:
response = httpx_client.put(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = OutgoingWebhook.parse_obj(response.json())
return response200
return response
def delete_outgoing_webhook(
self,
hook_id: str,
) -> StatusOK:
"""Delete an outgoing webhook
Delete an outgoing webhook given the hook id.
Permissions:
`manage_webhooks` for system or `manage_webhooks` for the
specific team or `manage_webhooks` for the channel.
Api Reference:
`DeleteOutgoingWebhook <https://api.mattermost.com/#operation/DeleteOutgoingWebhook>`_
"""
url = f"/hooks/outgoing/{hook_id}"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
with self.client._get_httpx_client() as httpx_client:
response = httpx_client.delete(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = StatusOK.parse_obj(response.json())
return response200
return response
def regen_outgoing_hook_token(
self,
hook_id: str,
) -> StatusOK:
"""Regenerate the token for the outgoing webhook.
Regenerate the token for the outgoing webhook.
Permissions:
`manage_webhooks` for system or `manage_webhooks` for the
specific team or `manage_webhooks` for the channel.
Api Reference:
`RegenOutgoingHookToken <https://api.mattermost.com/#operation/RegenOutgoingHookToken>`_
"""
url = f"/hooks/outgoing/{hook_id}/regen_token"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
with self.client._get_httpx_client() as httpx_client:
response = httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = StatusOK.parse_obj(response.json())
return response200
return response
|
from pathlib import Path
import pandas as pd
from cacp.util import to_latex
def process_comparison_result_winners_for_metric(metric: str, result_dir: Path) -> pd.DataFrame:
"""
Processes comparison results, finds winners for metric.
:param metric: comparison metric {auc, accuracy, precision, recall, f1}
:param result_dir: results directory
:return: DateFrame with winners for metric
"""
df = pd.read_csv(result_dir.joinpath('comparison.csv'))
algorithms = df['algorithm'].unique()
places = [i for i in range(min(len(algorithms), 3))]
winner_dir = result_dir.joinpath('winner').joinpath(metric)
winner_dir.mkdir(exist_ok=True, parents=True)
def count_places(place=0):
count = {a: 0 for a in algorithms}
names = {a: [] for a in algorithms}
for dataset, df_d in df.groupby(['dataset']):
df_d_a_m = df_d.groupby(['algorithm']).mean().sort_values(by=[metric], ascending=False)
best = df_d_a_m.iloc[place]
count[best.name] += 1
names[best.name].append(dataset)
return count, names
counts = []
for c, n in [count_places(i) for i in places]:
counts.append(c)
rows = []
for algorithm in algorithms:
row = [algorithm]
for p in places:
row.append(counts[p][algorithm])
rows.append(row)
columns = ['algorithm'] + ['1st', '2nd', '3rd'][: len(places)]
df_r = pd.DataFrame(columns=columns, data=rows)
df_r = df_r.sort_values(by=['1st'], ascending=False)
df_r.reset_index(drop=True, inplace=True)
df_r.index += 1
df_r.to_csv(winner_dir.joinpath('comparison_result.csv'), index=True)
winner_dir.joinpath('comparison_result.tex').open('w').write(
to_latex(
df_r,
caption=f'Ranking of compared algorithms for {metric}',
label=f'tab:places_{metric}',
)
)
return df_r
def process_comparison_result_winners(result_dir: Path):
"""
Processes comparison results, finds winners.
:param result_dir: results directory
"""
auc_wins = process_comparison_result_winners_for_metric('auc', result_dir).sort_values(by=['algorithm'])
acc_wins = process_comparison_result_winners_for_metric('accuracy', result_dir).sort_values(by=['algorithm'])
wins_df = auc_wins[['algorithm']].copy()
for c in acc_wins.columns[1:]:
wins_df['auc ' + c] = auc_wins[c].values
for c in acc_wins.columns[1:]:
wins_df['accuracy ' + c] = acc_wins[c].values
winner_dir = result_dir.joinpath('winner')
winner_dir.mkdir(exist_ok=True, parents=True)
wins_df = wins_df.sort_values(by=wins_df.columns[1:].values.tolist(), ascending=False)
wins_df.reset_index(drop=True, inplace=True)
wins_df.index += 1
wins_df.to_csv(winner_dir.joinpath('comparison.csv'), index=True)
winner_dir.joinpath('comparison.tex').open('w').write(
to_latex(
wins_df,
caption='Ranking of compared algorithms',
label='tab:places',
)
)
|
# coding: utf-8
"""
grafeas.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1beta1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ProvenanceCommand(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'env': 'list[str]',
'args': 'list[str]',
'dir': 'str',
'id': 'str',
'wait_for': 'list[str]'
}
attribute_map = {
'name': 'name',
'env': 'env',
'args': 'args',
'dir': 'dir',
'id': 'id',
'wait_for': 'waitFor'
}
def __init__(self, name=None, env=None, args=None, dir=None, id=None, wait_for=None): # noqa: E501
"""ProvenanceCommand - a model defined in Swagger""" # noqa: E501
self._name = None
self._env = None
self._args = None
self._dir = None
self._id = None
self._wait_for = None
self.discriminator = None
if name is not None:
self.name = name
if env is not None:
self.env = env
if args is not None:
self.args = args
if dir is not None:
self.dir = dir
if id is not None:
self.id = id
if wait_for is not None:
self.wait_for = wait_for
@property
def name(self):
"""Gets the name of this ProvenanceCommand. # noqa: E501
Required. Name of the command, as presented on the command line, or if the command is packaged as a Docker container, as presented to `docker pull`. # noqa: E501
:return: The name of this ProvenanceCommand. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ProvenanceCommand.
Required. Name of the command, as presented on the command line, or if the command is packaged as a Docker container, as presented to `docker pull`. # noqa: E501
:param name: The name of this ProvenanceCommand. # noqa: E501
:type: str
"""
self._name = name
@property
def env(self):
"""Gets the env of this ProvenanceCommand. # noqa: E501
Environment variables set before running this command. # noqa: E501
:return: The env of this ProvenanceCommand. # noqa: E501
:rtype: list[str]
"""
return self._env
@env.setter
def env(self, env):
"""Sets the env of this ProvenanceCommand.
Environment variables set before running this command. # noqa: E501
:param env: The env of this ProvenanceCommand. # noqa: E501
:type: list[str]
"""
self._env = env
@property
def args(self):
"""Gets the args of this ProvenanceCommand. # noqa: E501
Command-line arguments used when executing this command. # noqa: E501
:return: The args of this ProvenanceCommand. # noqa: E501
:rtype: list[str]
"""
return self._args
@args.setter
def args(self, args):
"""Sets the args of this ProvenanceCommand.
Command-line arguments used when executing this command. # noqa: E501
:param args: The args of this ProvenanceCommand. # noqa: E501
:type: list[str]
"""
self._args = args
@property
def dir(self):
"""Gets the dir of this ProvenanceCommand. # noqa: E501
Working directory (relative to project source root) used when running this command. # noqa: E501
:return: The dir of this ProvenanceCommand. # noqa: E501
:rtype: str
"""
return self._dir
@dir.setter
def dir(self, dir):
"""Sets the dir of this ProvenanceCommand.
Working directory (relative to project source root) used when running this command. # noqa: E501
:param dir: The dir of this ProvenanceCommand. # noqa: E501
:type: str
"""
self._dir = dir
@property
def id(self):
"""Gets the id of this ProvenanceCommand. # noqa: E501
Optional unique identifier for this command, used in wait_for to reference this command as a dependency. # noqa: E501
:return: The id of this ProvenanceCommand. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ProvenanceCommand.
Optional unique identifier for this command, used in wait_for to reference this command as a dependency. # noqa: E501
:param id: The id of this ProvenanceCommand. # noqa: E501
:type: str
"""
self._id = id
@property
def wait_for(self):
"""Gets the wait_for of this ProvenanceCommand. # noqa: E501
The ID(s) of the command(s) that this command depends on. # noqa: E501
:return: The wait_for of this ProvenanceCommand. # noqa: E501
:rtype: list[str]
"""
return self._wait_for
@wait_for.setter
def wait_for(self, wait_for):
"""Sets the wait_for of this ProvenanceCommand.
The ID(s) of the command(s) that this command depends on. # noqa: E501
:param wait_for: The wait_for of this ProvenanceCommand. # noqa: E501
:type: list[str]
"""
self._wait_for = wait_for
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProvenanceCommand, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProvenanceCommand):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
#Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-developer-guide/blob/master/LICENSE-SAMPLECODE.)
import boto3
if __name__ == "__main__":
sourceFile='source.jpg'
targetFile='target.jpg'
client=boto3.client('rekognition')
imageSource=open(sourceFile,'rb')
imageTarget=open(targetFile,'rb')
response=client.compare_faces(SimilarityThreshold=70,
SourceImage={'Bytes': imageSource.read()},
TargetImage={'Bytes': imageTarget.read()})
for faceMatch in response['FaceMatches']:
position = faceMatch['Face']['BoundingBox']
confidence = str(faceMatch['Face']['Confidence'])
print('The face at ' +
str(position['Left']) + ' ' +
str(position['Top']) +
' matches with ' + confidence + '% confidence')
imageSource.close()
imageTarget.close()
|
# coding=utf-8
"""
ENAS算法控制器模型
"""
import tensorflow as tf
import numpy as np
def build_controller_model(num_node, hidden_size, controller_temperature, controller_tanh_constant):
"""
Args:
type_embedding_dim: 节点类型嵌入维度
link_embedding_dim: 连接嵌入维度
num_node:节点总数
num_type:节点类型数量
hidden_size: LSTM输出维度
Returns:
controller模型,
返回两个状态矩阵
[b,num_node,num_node]的连接矩阵
和[num_node, num_type]的类型矩阵
"""
input_tensor = tf.keras.Input(shape=[1], name="input_tensor", dtype=tf.float32) # [B,1]
batch_size = tf.shape(input_tensor)[0]
link_embedding_layer = tf.keras.layers.Embedding(input_dim=num_node - 1, output_dim=hidden_size,
name="link_embedding_layer")
link_lstm_layer = tf.keras.layers.LSTM(hidden_size, return_sequences=False, return_state=True, trainable=True,
recurrent_activation=None, name="link_lstm")
init_link_input = tf.keras.layers.Dense(hidden_size, use_bias=False, activation=None, trainable=True,
name="init_link_inputs")
# 加性注意力层
link_atten_w_1 = tf.keras.layers.Dense(hidden_size, use_bias=False, activation=None, trainable=True, name="w_1")
link_atten_w_2 = tf.keras.layers.Dense(hidden_size, use_bias=False, activation=None, trainable=True, name="w_2")
link_atten_w_a = tf.keras.layers.Dense(1, use_bias=False, activation=None, trainable=True, name="w_a")
# 初始化输入
init_link_embedding = init_link_input(input_tensor) # [B, link_embedding_dim]
all_h = [tf.broadcast_to(tf.zeros(shape=[1, hidden_size]),
shape=[batch_size, hidden_size])] # 连接向量lstm层的输出 [j, B, link_embedding_dim]
all_h_w = [tf.broadcast_to(tf.zeros(shape=[1, hidden_size]),
shape=[batch_size, hidden_size])]
all_links = [tf.broadcast_to(tf.zeros(shape=[1, num_node]),
shape=[batch_size, num_node])] # 生成的连接向量 [b,1,n],最后会堆叠成[b,n,n]
all_ce_loss = [] # 损失[B,num_node-1]
all_prob = [] # [B,num_node-1(stack axis), num_node]
lstm_input = tf.expand_dims(init_link_embedding, 1) # [B,1, link_embedding_dim]
lstm_state = None
for j in range(2, num_node + 1):
_, link_c, link_h = link_lstm_layer(lstm_input,
initial_state=lstm_state) # [B, link_embedding_dim]
lstm_state = [link_c, link_h]
all_h.append(link_h) # [j, B, link_embedding_dim]
all_h_w.append(link_atten_w_1(link_h))
query = link_atten_w_2(link_h)
key = tf.transpose(tf.stack(all_h_w[:-1], axis=0), perm=[1, 0, 2]) # [B,j-1, link_embedding_dim]
query = tf.reshape(query, [batch_size, 1, hidden_size]) # [B,1, link_embedding_dim]
query = tf.nn.tanh(query + key) # [B,j-1, link_embedding_dim]
logits = link_atten_w_a(query) # [B,j-1, 1]
logits = logits / controller_temperature
logits = controller_tanh_constant * tf.nn.tanh(logits)
logits = tf.squeeze(logits, -1) # [B, j-1] 前置节点概率
prob = tf.pad(logits, [[0, 0], [0, num_node - j + 1]]) # [B, num_node]
all_prob.append(prob)
# 根据概率采样获得前置节点id和前置节点向量表示
input_node_id = tf.squeeze(tf.random.categorical(logits, 1), axis=[-1]) # [B]
link = tf.one_hot(input_node_id, depth=num_node) # [B,num_node]
link_embedding = link_embedding_layer(tf.expand_dims(input_node_id, -1)) # [B,1,link_embedding_dim]
# 计算损失
ce_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=tf.stop_gradient(input_node_id),
name=f"controller_ce_{j}") # [B]
all_links.append(link)
all_ce_loss.append(ce_loss)
lstm_input = link_embedding # [B, 1, link_embedding_dim]
all_prob = tf.stack(all_prob, 1) # [B, num_node-1, num_node]
all_links = tf.stack(all_links, 1)
all_ce_loss = tf.stack(all_ce_loss, axis=-1) # [B,num_node-1]
model = tf.keras.Model(inputs=[input_tensor],
outputs=[all_links, all_ce_loss, all_prob])
return model
|
# Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import os
import shutil
from PIL import Image
from tqdm import tqdm
def main() -> None:
image_dir = f"{args.output_dir}/train"
if os.path.exists(image_dir):
shutil.rmtree(image_dir)
os.makedirs(image_dir)
file_names = os.listdir(args.inputs_dir)
for file_name in tqdm(file_names, total=len(file_names)):
# Use PIL to read high-resolution image
image = Image.open(f"{args.inputs_dir}/{file_name}")
for pos_x in range(0, image.size[0] - args.image_size + 1, args.step):
for pos_y in range(0, image.size[1] - args.image_size + 1, args.step):
# crop box xywh
crop_image = image.crop([pos_x, pos_y, pos_x + args.image_size, pos_y + args.image_size])
# Save all images
crop_image.save(f"{image_dir}/{file_name.split('.')[-2]}_{pos_x}_{pos_y}.{file_name.split('.')[-1]}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Prepare database scripts.")
parser.add_argument("--inputs_dir", type=str, default="T91/original", help="Path to input image directory. (Default: `T91/original`)")
parser.add_argument("--output_dir", type=str, default="T91/SRCNN", help="Path to generator image directory. (Default: `T91/SRCNN`)")
parser.add_argument("--image_size", type=int, default=33, help="Low-resolution image size from raw image. (Default: 33)")
parser.add_argument("--step", type=int, default=14, help="Crop image similar to sliding window. (Default: 14)")
args = parser.parse_args()
main()
|
"""
Start active players for a range of dates
"""
import os
import sys
import yahooscraper as ys
from datetime import datetime, date, timedelta
from urllib.parse import urljoin
from utils import *
# Command-line args
DATE_LIMIT = date.today() + timedelta(days=365)
NUM_DAYS_DEFAULT = 1
NUM_DAYS_MAX = 100
OPTIONAL_ARGS.extend([
'<date (default: today, max: %s)>' % DATE_LIMIT.strftime('%Y-%m-%d'),
'<num_days (default: %d, max: %d)>' % (NUM_DAYS_DEFAULT, NUM_DAYS_MAX)
])
# Error messages
START_PLAYERS_ERROR_MSG = 'Failed to start players'
def start_active_players(session, league_id, team_id, start_date=None):
"""
Start active players and output results
"""
# Load team page
team_url = ys.fantasy.team.url('nba', league_id, team_id, start_date)
response = session.get(team_url)
# Press "Start Active Players" button
start_path = ys.fantasy.team.start_active_players_path(response.text)
start_url = urljoin(response.url, start_path)
response = session.get(start_url)
# If unsuccessful, report failure
formatted_date = ys.fantasy.team.date(response.text)
if not (200 <= response.status_code < 300):
print('- %s: Failed to start active players' % formatted_date)
# Report success and highlight available bench players
print('- %s: Started active players' % formatted_date)
alternates = ys.fantasy.team.alternates(response.text)
for player in alternates:
print(' - Alternate: %s (%s) [%s]' % (
player['name'],
player['details'],
player['opponent']))
def main():
username = os.getenv(USERNAME_ENV)
password = os.getenv(PASSWORD_ENV)
credentials_missing = username is None or password is None
num_args_incorrect = len(sys.argv) not in required_num_args()
if credentials_missing or num_args_incorrect:
usage()
league_id = sys.argv[1]
team_id = sys.argv[2]
start_date = date_from_argv(3, DATE_LIMIT)
num_days = int_from_argv(3 if start_date is None else 4, NUM_DAYS_MAX)
if start_date is None:
start_date = date.today()
if num_days is None:
num_days = NUM_DAYS_DEFAULT
try:
session = ys.login.authenticated_session(username, password)
except:
sys.exit(LOGIN_ERROR_MSG)
try:
output_team_info(session, league_id, team_id)
for _ in range(num_days):
start_active_players(session, league_id, team_id, start_date)
start_date = start_date + timedelta(days=1)
except:
sys.exit(START_PLAYERS_ERROR_MSG)
if __name__ == '__main__':
main()
|
"""
IO Module
===========
SCOUT's IO module is for reading and writing different volumetric image formats and preparing chunked arrays for
processing. SCOUT makes use of TIFF and Zarr file formats throughout the analysis pipeline, and the this module
is meant to consolidate these side-effecting IO operations.
"""
import tifffile
import multiprocessing
import numpy as np
import zarr
from tqdm import tqdm
from numcodecs import Blosc
from scout.utils import tifs_in_dir
def imread(path):
"""
Reads TIFF file into a numpy array in memory.
Parameters
-----------
path : str
Path to TIFF image to open
Returns
--------
image : ndarray
Image array
"""
return tifffile.imread(files=path)
def imsave(path, data, compress=1):
"""
Saves numpy array as a TIFF image.
Parameters
-----------
path : str
Path to TIFF image to create / overwrite
data : ndarray
Image data array
compress : int
Level of lossless TIFF compression (0-9)
"""
tifffile.imsave(file=path, data=data, compress=compress)
def imread_parallel(paths, nb_workers):
"""
Reads TIFF files into a numpy array in memory.
Parameters
------------
paths : list
A list of TIFF paths to read (order is preserved)
nb_workers : int
Number of parallel processes to use in reading images
Returns
--------
data : ndarray
Image data
"""
img = imread(paths[0])
with multiprocessing.Pool(nb_workers) as pool:
data = list(tqdm(pool.imap(imread, paths), total=len(paths)))
return np.asarray(data, dtype=img.dtype)
def imread_folder(path, nb_workers):
"""
Finds all TIFF images in a folder and loads them into a single array.
**Note:** all images must be the same shape to be able to stack them.
Parameters
----------
path : str
Path to directory with TIFF images in alphabetical order
nb_workers : int
Number of parallel processes to use in reading images
Returns
-------
data : ndarray
Image array
"""
paths, _ = tifs_in_dir(path)
data = imread_parallel(paths, nb_workers)
return data
def open(path, nested=True, mode="a"):
"""
Opens a persistent Zarr array or NestedDirectoryStore located at `path`.
Parameters
----------
path : str
Path to Zarr array or NestedDirectoryStore
nested : bool
Flag to indicate if path is for flat Zarr array or NestedDirectoryStore
mode : str
Read / write permissions mode
Returns
-------
arr : zarr Array
Reference to open Zarr array
"""
if nested:
store = zarr.NestedDirectoryStore(path)
return zarr.open(store, mode=mode)
else:
return zarr.open(path, mode=mode)
def new_zarr(path, shape, chunks, dtype, in_memory=False, **kwargs):
"""
Create new Zarr NestedDirectoryStore at `path`.
**NOTE:** Persistent Zarr arrays are stored on disk. To avoid data loss, be careful when calling `new_zarr`
on a path with an existing array.
Parameters
----------
path : str
Path to new zarr array
shape : tuple
Overall shape of the zarr array
chunks : tuple
Shape of each chunk for the zarr array
dtype : str
Data type of for the zarr array
kwargs : dict
Keyword args to passs to zarr.open()
Returns
-------
arr : zarr Array
Reference to open zarr array
"""
compressor = Blosc(cname="zstd", clevel=1, shuffle=Blosc.BITSHUFFLE)
if in_memory:
z_arr_out = zarr.zeros(
shape=shape, chunks=chunks, dtype=dtype, compressor=compressor, **kwargs
)
else:
store = zarr.NestedDirectoryStore(path)
z_arr_out = zarr.open(
store,
mode="w",
shape=shape,
chunks=chunks,
dtype=dtype,
compressor=compressor,
**kwargs
)
return z_arr_out
def new_zarr_like(path, arr, **kwargs):
"""
Creates a new zarr array like `arr`.
Parameters
----------
path : str
Path to new zarr array
arr : zarr Array
Reference to template zarr array
kwargs : dict
Keyword args to passs to zarr.open()
Returns
-------
new_arr : zarr Array
Reference to new zarr array
"""
return new_zarr(path, arr.shape, arr.chunks, arr.dtype, **kwargs)
|
'''
Python Basics with Numpy
This is a brief introduction to Python.
The script uses Python 3.
The script below is to get familiar with:
1. Be able to use iPython Notebooks
2. Be able to use numpy functions and numpy matrix/vector operations
3. Understand the concept of "broadcasting"
4. Be able to vectorize code
Let's get started!
'''
#==============================================================================================================
#1 - Building basic functions with numpy
#==============================================================================================================
'''
Numpy is the main package for scientific computing in Python. It is maintained by a large community (www.numpy.org).Several key numpy functions such as np.exp, np.log, and np.reshape are used everyday in ML and DL.
'''
#------------------------------------------
# 1.1 - sigmoid function, np.exp()
#------------------------------------------
'''Before using np.exp(), we will use math.exp() to implement the sigmoid function. we will then see why np.exp() is preferable to math.exp().
Exercise: Build a function that returns the sigmoid of a real number x. Use math.exp(x) for the exponential function.
Reminder: sigmoid(x)=11+e−xsigmoid(x)=11+e−x is sometimes also known as the logistic function. It is a non-linear function used not only in Machine Learning (Logistic Regression), but also in Deep Learning.
To refer to a function belonging to a specific package we could call it using package_name.function().
'''
import math
def basic_sigmoid(x):
"""
Compute sigmoid of x.
Arguments:
x -- A scalar
Return:
s -- sigmoid(x)
"""
s = 1 / (1 + math.exp(-x))
return s
#basic_sigmoid(3) #0.9525741268224334
'''
Actually, we rarely use the "math" library in deep learning because the inputs of the functions are real numbers. In deep learning we mostly use matrices and vectors. This is why numpy is more useful.
### One reason why we use "numpy" instead of "math" in Deep Learning ###
'''
#Uncomment and run this line to see the error
#x = [1, 2, 3]
#basic_sigmoid(x)
'''
In fact, if x=(x1,x2,...,xn)x=(x1,x2,...,xn) is a row vector then np.exp(x)np.exp(x) will apply the exponential function to every element of x. The output will thus be: np.exp(x)=(ex1,ex2,...,exn)np.exp(x)=(ex1,ex2,...,exn)
'''
import numpy as np
# example of np.exp
x = np.array([1, 2, 3])
print(np.exp(x)) # result is (exp(1), exp(2), exp(3))
'''
Furthermore, if x is a vector, then a Python operation such as s=x+3s=x+3 or s=1xs=1x will output s as a vector of the same size as x.
'''
# example of vector operation
x = np.array([1, 2, 3])
print (x + 3)
#[4 5 6]
'''Any time we need more info on a numpy function, look at the official documentation or write np.exp? (for example) to get quick access to the documentation.
'''
import numpy as np # this means we can access numpy functions by writing np.function() instead of numpy.function()
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size
Return:
s -- sigmoid(x)
"""
s = 1 / (1 + np.exp(-x) )
return s
#x = np.array([1, 2, 3])
#sigmoid(x)
#------------------------------------------
# 1.2 - Sigmoid gradient
#------------------------------------------
'''
we will need to compute gradients to optimize loss functions using backpropagation. Let's code our first gradient function.
Exercise: Implement the function sigmoid_grad() to compute the gradient of the sigmoid function with respect to its input x.
The formula is:
sigmoid_derivative(x)=σ′(x)=σ(x)(1−σ(x))
'''
def sigmoid_derivative(x):
"""
Compute the gradient (also called the slope or derivative) of the sigmoid function with respect to its input x.
we can store the output of the sigmoid function into variables and then use it to calculate the gradient.
Arguments:
x -- A scalar or numpy array
Return:
ds -- wer computed gradient.
"""
s = sigmoid(x)
ds = s*(1-s)
return ds
#x = np.array([1, 2, 3])
#print ("sigmoid_derivative(x) = " + str(sigmoid_derivative(x)))
#------------------------------------------
# 1.3 - Reshaping arrays
#------------------------------------------
'''
Two common numpy functions used in deep learning are np.shape and np.reshape().
X.shape is used to get the shape (dimension) of a matrix/vector X.
X.reshape(...) is used to reshape X into some other dimension.
For example, in computer science, an image is represented by a 3D array of shape (length,height,depth=3)(length,height,depth=3). However, when we read an image as the input of an algorithm we convert it to a vector of shape (length∗height∗3,1)(length∗height∗3,1). In other words, we "unroll", or reshape, the 3D array into a 1D vector.
Let us implement image2vector() that takes an input of shape (length, height, 3) and returns a vector of shape (length*height*3, 1). For example, if we would like to reshape an array v of shape (a, b, c) into a vector of shape (a*b,c) we would do:
v = v.reshape((v.shape[0]*v.shape[1], v.shape[2])) # v.shape[0] = a ; v.shape[1] = b ; v.shape[2] = c
'''
def image2vector(image):
"""
Argument:
image -- a numpy array of shape (length, height, depth)
Returns:
v -- a vector of shape (length*height*depth, 1)
"""
v = image.reshape(image.shape[0]*image.shape[1]*image.shape[2],1)
return v
# This is a 3 by 3 by 2 array, typically images will be (num_px_x, num_px_y,3) where 3 represents the RGB values
image = np.array([[[ 0.67826139, 0.29380381],
[ 0.90714982, 0.52835647],
[ 0.4215251 , 0.45017551]],
[[ 0.92814219, 0.96677647],
[ 0.85304703, 0.52351845],
[ 0.19981397, 0.27417313]],
[[ 0.60659855, 0.00533165],
[ 0.10820313, 0.49978937],
[ 0.34144279, 0.94630077]]])
print ("image2vector(image) = " + str(image2vector(image)))
#------------------------------------------
# 1.4 - Normalizing rows
#------------------------------------------
'''
Another common technique we use in Machine Learning and Deep Learning is to normalize our data. It often leads to a better performance because gradient descent converges faster after normalization. Here, by normalization we mean changing x to x/∥x∥ (dividing each row vector of x by its norm).
we can divide matrices of different sizes and it works fine: this is called broadcasting
Let us implement normalizeRows() to normalize the rows of a matrix. After applying this function to an input matrix x, each row of x should be a vector of unit length (meaning length 1).
'''
def normalizeRows(x):
"""
Implement a function that normalizes each row of the matrix x (to have unit length).
Argument:
x -- A numpy matrix of shape (n, m)
Returns:
x -- The normalized (by row) numpy matrix. we are allowed to modify x.
"""
# Compute x_norm as the norm 2 of x. Use np.linalg.norm(..., ord = 2, axis = ..., keepdims = True)
x_norm = np.linalg.norm(x,axis=1,keepdims=True)
# Divide x by its norm.
x = x / x_norm
return x
x = np.array([
[0, 3, 4],
[1, 6, 4]])
print("normalizeRows(x) = " + str(normalizeRows(x)))
'''
Note: In normalizeRows(), we can try to print the shapes of x_norm and x, and then rerun the assessment. we'll find out that they have different shapes. This is normal given that x_norm takes the norm of each row of x. So x_norm has the same number of rows but only 1 column. So how did it work when we divided x by x_norm? This is called broadcasting and we'll talk about it now!
'''
#------------------------------------------
# 1.5 - Broadcasting and the softmax function
#------------------------------------------
'''
A very important concept to understand in numpy is "broadcasting". It is very useful for performing mathematical operations between arrays of different shapes. For the full details on broadcasting, we can read the official broadcasting documentation.
Let us implement a softmax function using numpy. we can think of softmax as a normalizing function used when wer algorithm needs to classify two or more classes.
Read more about this at https://en.wikipedia.org/wiki/Softmax_function
'''
def softmax(x):
"""Calculates the softmax for each row of the input x.
wer code should work for a row vector and also for matrices of shape (n, m).
Argument:
x -- A numpy matrix of shape (n,m)
Returns:
s -- A numpy matrix equal to the softmax of x, of shape (n,m)
"""
# Apply exp() element-wise to x. Use np.exp(...).
x_exp = np.exp(x)
# Create a vector x_sum that sums each row of x_exp. Use np.sum(..., axis = 1, keepdims = True).
x_sum = np.sum(x_exp,axis=1,keepdims=True)
# Compute softmax(x) by dividing x_exp by x_sum. It should automatically use numpy broadcasting.
s = x_exp/x_sum
return s
x = np.array([
[9, 2, 5, 0, 0],
[7, 5, 0, 0 ,0]])
print("softmax(x) = " + str(softmax(x)))
'''
If we print the shapes of x_exp, x_sum and s above and rerun the assessment cell, we will see that x_sum is of shape (2,1) while x_exp and s are of shape (2,5). x_exp/x_sum works due to python broadcasting.
'''
#==============================================================================================================
# 2) Vectorization
#==============================================================================================================
'''
In deep learning, we deal with very large datasets. Hence, a non-computationally-optimal function can become a huge bottleneck in wer algorithm and can result in a model that takes ages to run. To make sure that wer code is computationally efficient, we will use vectorization. Below are a few examples that demonstarte this
'''
import time
x1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0]
x2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0]
### CLASSIC DOT PRODUCT OF VECTORS IMPLEMENTATION ###
tic = time.process_time()
dot = 0
for i in range(len(x1)):
dot+= x1[i]*x2[i]
toc = time.process_time()
print ("dot = " + str(dot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms")
### CLASSIC OUTER PRODUCT IMPLEMENTATION ###
tic = time.process_time()
outer = np.zeros((len(x1),len(x2))) # we create a len(x1)*len(x2) matrix with only zeros
for i in range(len(x1)):
for j in range(len(x2)):
outer[i,j] = x1[i]*x2[j]
toc = time.process_time()
print ("outer = " + str(outer) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms")
### CLASSIC ELEMENTWISE IMPLEMENTATION ###
tic = time.process_time()
mul = np.zeros(len(x1))
for i in range(len(x1)):
mul[i] = x1[i]*x2[i]
toc = time.process_time()
print ("elementwise multiplication = " + str(mul) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms")
### CLASSIC GENERAL DOT PRODUCT IMPLEMENTATION ###
W = np.random.rand(3,len(x1)) # Random 3*len(x1) numpy array
tic = time.process_time()
gdot = np.zeros(W.shape[0])
for i in range(W.shape[0]):
for j in range(len(x1)):
gdot[i] += W[i,j]*x1[j]
toc = time.process_time()
print ("gdot = " + str(gdot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms")
x1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0]
x2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0]
### VECTORIZED DOT PRODUCT OF VECTORS ###
tic = time.process_time()
dot = np.dot(x1,x2)
toc = time.process_time()
print ("dot = " + str(dot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms")
### VECTORIZED OUTER PRODUCT ###
tic = time.process_time()
outer = np.outer(x1,x2)
toc = time.process_time()
print ("outer = " + str(outer) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms")
### VECTORIZED ELEMENTWISE MULTIPLICATION ###
tic = time.process_time()
mul = np.multiply(x1,x2)
toc = time.process_time()
print ("elementwise multiplication = " + str(mul) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms")
### VECTORIZED GENERAL DOT PRODUCT ###
tic = time.process_time()
dot = np.dot(W,x1)
toc = time.process_time()
print ("gdot = " + str(dot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms")
'''
As you may have noticed, the vectorized implementation is much cleaner and more efficient. For bigger vectors/matrices, the differences in running time become even bigger.
Note that np.dot() performs a matrix-matrix or matrix-vector multiplication. This is different from np.multiply() and the * operator (which is equivalent to .* in Matlab/Octave), which performs an element-wise multiplication.
'''
#------------------------------------------
# 2.1 Implement the L1 and L2 loss functions
#------------------------------------------
'''
Let us implement the numpy vectorized version of the L1 loss. we may find the function abs(x) (absolute value of x) useful.
The loss is used to evaluate the performance of wer model. The bigger wer loss is, the more different wer predictions (ŷ y^) are from the true values (y). In deep learning, we use optimization algorithms like Gradient Descent to train our model and to minimize the cost.
L1 loss is defined as:
L1(ŷ ,y)=∑i=0m|y(i)−ŷ (i)
'''
def L1(yhat, y):
"""
Arguments:
yhat -- vector of size m (predicted labels)
y -- vector of size m (true labels)
Returns:
loss -- the value of the L1 loss function defined above
"""
loss = np.sum(abs(yhat-y))
return loss
yhat = np.array([.9, 0.2, 0.1, .4, .9])
y = np.array([1, 0, 0, 1, 1])
print("L1 = " + str(L1(yhat,y)))
'''
Let us also implement the numpy vectorized version of the L2 loss. There are several way of implementing the L2 loss but we may find the function np.dot() useful. As a reminder, if x=[x1,x2,...,xn]x=[x1,x2,...,xn], then np.dot(x,x) = ∑nj=0x2j∑j=0nxj2.
L2 loss is defined as
L2(ŷ ,y)=∑i=0m(y(i)−ŷ (i))2
'''
def L2(yhat, y):
"""
Arguments:
yhat -- vector of size m (predicted labels)
y -- vector of size m (true labels)
Returns:
loss -- the value of the L2 loss function defined above
"""
loss = np.sum(np.dot(yhat-y,yhat-y))
#Also possible loss = np.sum((yhat-y)**2)
return loss
yhat = np.array([.9, 0.2, 0.1, .4, .9])
y = np.array([1, 0, 0, 1, 1])
print("L2 = " + str(L2(yhat,y)))
# BONUS : A quick reshape hack is to use -1 as the parameter
a = np.random.rand(3,2,2,3)
b = a.reshape(3,-1).T
'''
The shape of b is converted to (3,12). The number of columns is calculated automatically.
'''
|
#!/usr/bin/env python3
# encoding: utf-8
import flask
from werkzeug.exceptions import NotFound, InternalServerError
app = flask.Flask(__name__)
@app.route('/cats/<breed>')
def cats(breed):
"""wonky code ahead!"""
return
@app.errorhandler(404)
def not_found(e):
# shame that i have to do it like this, instead of just returning
# like e.status_code or smth
if isinstance(e, NotFound):
status_code = 404
elif isinstance(e, InternalServerError):
status_code = 500
else:
raise e
return 'ya dun guffed ({})'.format(status_code), status_code
# looks like you have to separate them :(
@app.errorhandler(500)
def internal_server_error(e):
return 'ya dun guffed ({})'.format(500)
if __name__ == '__main__':
app.run()
|
def v(kod):
if (n == len(kod)):
print(kod)
return
v(kod + '0')
v(kod + '1')
n = int(input());
v('')
|
# search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
ANS Problem 1: Exploration is what I expected. Pacman does not explore all nodes; it finds the first path to the deepest node. This is a least cost solution.
"""
nv = [] # Initialize empty array for visited nodes
s = util.Stack() # Initialize LIFO stack for fringe
s.push((problem.getStartState(), ())) # Add initial location to stack
while not s.isEmpty(): # Loop until stack is empty
cn = s.pop() # Current node in search state
cs, cp = cn[0], cn[1] # current state, current plan
if problem.isGoalState(cs): # checks if current state is goal state
return list(cp) # returns plan if true
if not cs in nv: # Executes only if pacman hasn't already been to this state before
nv.append(cs) # Adds node to list of visited nodes
for path in problem.getSuccessors(cs): # all paths known for current state
np = list(cp) # grab new plan from current plan
np.append(path[1]) # add path to the new plan
nn = (path[0], tuple(np)) # new node is the child
if not path[0] in nv: # if the new node isn't already visited
s.push(nn) # Push new node to stack
util.raiseNotDefined() # If no solution is found, throw an error
def breadthFirstSearch(problem):
"""
Search the shallowest nodes in the search tree first.
ANS Problem 2: BFS provides least cost solution."""
nv = [] # Initialize empty array for visited nodes
s = util.Queue() # Initialize queue for fringe
s.push((problem.getStartState(), ())) # Add initial location to stack
while not s.isEmpty(): # Loop until stack is empty
cn = s.pop() # Current node in search state
cs, cp = cn[0], cn[1] # current state, current plan
if problem.isGoalState(cs): # checks if current state is goal state
return list(cp) # returns plan if true
if not cs in nv: # Executes only if pacman hasn't already been to this state before
nv.append(cs) # Adds node to list of visited nodes
for path in problem.getSuccessors(cs): # all paths known for current state
np = list(cp) # grab new plan from current plan
np.append(path[1]) # add path to the new plan
nn = (path[0], tuple(np)) # new node is the child
if not path[0] in nv: # if the new node isn't already visited
s.push(nn) # Push new node to stack
util.raiseNotDefined() # If no solution is found, throw an error
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
nv = [] # Initialize empty array for visited nodes
s = util.PriorityQueue() # Initialize priority queue for fringe
s.push((problem.getStartState(), ()), 0) # Add initial location to stack with cost of 0
while not s.isEmpty(): # Loop until stack is empty
cn = s.pop() # Current node in search state
cs, cp = cn[0], cn[1] # current state, current plan
if problem.isGoalState(cs): # checks if current state is goal state
return list(cp) # returns plan if true
if not cs in nv: # Executes only if pacman hasn't already been to this state before
nv.append(cs) # Adds node to list of visited nodes
for path in problem.getSuccessors(cs): # all paths known for current state
np = list(cp) # grab new plan from current plan
np.append(path[1]) # add path to the new plan
nn = (path[0], tuple(np)) # new node is the child
if not path[0] in nv: # if the new node isn't already visited
s.push(nn, problem.getCostOfActions(np))# Push new node to stack with associated cost
util.raiseNotDefined() # If no solution is found, throw an error
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""
Search the node that has the lowest combined cost and heuristic first.
ANS Problem 4: For an open maze, A* search is optimal. It moves to the goal state in leasst moves possible.
"""
nv = [] # Initialize empty array for visited nodes
s = util.PriorityQueue() # Initialize priority queue for fringe
s.push((problem.getStartState(), ()), 0) # Add initial location to stack with cost of 0
while not s.isEmpty(): # Loop until stack is empty
cn = s.pop() # Current node in search state
cs, cp = cn[0], cn[1] # current state, current plan
if problem.isGoalState(cs): # checks if current state is goal state
return list(cp) # returns plan if true
if not cs in nv: # Executes only if pacman hasn't already been to this state before
nv.append(cs) # Adds node to list of visited nodes
for path in problem.getSuccessors(cs): # all paths known for current state
np = list(cp) # grab new plan from current plan
np.append(path[1]) # add path to the new plan
nn = (path[0], tuple(np)) # new node is the child
if not path[0] in nv: # if the new node isn't already visited
s.push(nn, problem.getCostOfActions(np) + (heuristic(path[0], problem)))# Push new node to stack with associated cost
util.raiseNotDefined() # If no solution is found, throw an error
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
|
#print('hello!!')
num1 = 20
num2 = 10
def multiply(num1,num2):
product = num1*num2
return product
#print (multiply(num1, num2))
#for i in range(1,11):
# print (i)
def evenindex1() -> str():
word = input('enter the word - ')
print (word)
output = ''
for i in range(len(word)):
if i%2 == 0:
output = output+word[i]
return output
#print (evenindex1())
def evenindex2()-> str():
word = input('enter the word - ')
print (word)
output = ''
for i in range(0,len(word)-1,2):
output = output + word[i]
return output
def evenindex3() -> str():
word = input('enter the word -')
print(word)
output = ''
#x= list(word)
for i in word[0:len(word)-1:2]:
output = output + i
return output
def inputword()->str():
output = input('enter the word -')
print(output)
return output
def remove_first_nchars(n):
word = inputword()
output = word[4:]
return output
def remove_last_nchars(n):
word = inputword()
output = word[:4]
return output
#print(remove_first_nchars(4))
def text_to_arrays(text):
wronginputarrayelements = ['[',']',',']
#print(text[0]+'--'+ text[-1])
if text[0] in wronginputarrayelements:
text = text[1:]
#print(text)
if text[-1] in wronginputarrayelements:
text = text[:-1]
#print(text)
#print('see'+text)
array = list(map(str.strip, text.strip().split(',')))
print('array is', array)
return array
def workwitharrays(what):
input_array = input('input array - ')
array = text_to_arrays(input_array)
print(*array)
if what == 'wordcount':
wordcheck = input('word check -')
output1 = input_array.count(wordcheck)
output2 =0
for i in range(len(input_array)-1):
#print(output2)
#print(input_array[i:i+len(wordcheck)])
output2 += input_array[i:i+len(wordcheck)] == wordcheck
return output1, output2
if what == 'divisible':
import numpy
divisibleby = input('divisible by number -')
divisibleby = int(divisibleby)
testarray = []
for each in array:
if each.isnumeric(): testarray.append(each)
newarray = list(map(int, testarray))
newarray = numpy.array(newarray)
print(newarray)
output = newarray#/divisibleby
if what == 'first_last_same':
if array[0]==array[-1]:
print('first',str(array[0]) , 'and last element', str(array[-1]), 'of the array-' , *array , 'are same')
output = 'first',str(array[0]) , 'and last element', str(array[-1]), 'of the array-' , *array , 'are same'
else:
print('first',str(array[0]) , 'and last element', str(array[-1]), 'of the array-' , *array , 'are different')
output = 'first',str(array[0]) , 'and last element', str(array[-1]), 'of the array-' , *array , 'are different'
return output
#text1 = '[10, 20, 30, 40, 10]'
#text2 = 'Emma is good developer. Emma is a writer'
#print(workwitharrays('wordcount'))
def patterns(type):
if type == 'trainglenumbers':
n = input('lenght of triangle-')
intn = int(n)
i=1
for i in range(intn):
for j in range(i):
print(i, end=' ')
print('\n')
return output
if type=='palindrome':
n = input('check palindrome number-')
intn = int(n)
revn = n[::-1]
if intn == int(revn):
output = 'yeah, its a palindrome'
else:
output = 'Nope, try a different number'
return output
if type =='revnum':
n=input('Enter the number to be reversed with spaces in between-')
revn = n[::-1]
output = ' '.join(revn)
return output
#print(patterns('revnum'))
def tax():
income = int(input('Enter the income in Dollars-'))
taxable = income - 10000
tax=0
if income<= 10000:
output = tax
if taxable>=10000:
tax = ((taxable - 10000)*20/100)+(10000*10/100)
elif taxable<10000 and taxable> 0:
tax= taxable*10/100
output = tax
return output
print(tax())
|
import random
import pathlib
import os
from re import X
from shutil import copyfile
import argparse
import glob
from pathlib import Path
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]
#
def copy_files(files, input_images_path, input_labels_path, output_images_path, output_label_path):
for image_name in files:
val_name = os.path.splitext(image_name)[0] + '.txt'
output_image_file_path = os.path.join(output_images_path, image_name)
input_image_file_path = os.path.join(input_images_path, image_name)
output_label_file_path = os.path.join(output_label_path, val_name)
input_label_file_path = os.path.join(input_labels_path, val_name)
copyfile(input_image_file_path, output_image_file_path)
copyfile(input_label_file_path, output_label_file_path)
def run(input_dir=ROOT / 'data/images', output_dir=ROOT / 'output'):
print(output_dir)
output_trains_images = output_dir / 'trains/images'
output_valids_images = output_dir / 'valids/images'
output_trains_labels = output_dir / 'trains/labels'
output_valids_labels = output_dir / 'valids/labels'
# make dir first
print(output_trains_images)
pathlib.Path(output_trains_images).mkdir(parents=True, exist_ok=True)
pathlib.Path(output_valids_images).mkdir(parents=True, exist_ok=True)
pathlib.Path(output_trains_labels).mkdir(parents=True, exist_ok=True)
pathlib.Path(output_valids_labels).mkdir(parents=True, exist_ok=True)
images_path = Path(input_dir).resolve()
labels_path = images_path.parents[0] / 'labels'
images_names = os.listdir(str(images_path))
random.shuffle(images_names)
train_len = int(len(images_names) * 0.8)
train_images = images_names[:train_len]
val_images = images_names[train_len:]
copy_files(train_images, images_path, labels_path, output_trains_images, output_trains_labels)
copy_files(val_images, images_path, labels_path, output_valids_images, output_valids_labels)
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--input-dir', type=str, default=ROOT / 'data/images', help='yolo images folder')
parser.add_argument('--output-dir', type=str, default=ROOT / 'output', help='output dir')
opt = parser.parse_args()
print(opt)
return opt
def main():
opt = parse_opt()
run(**vars(opt))
if __name__ == "__main__":
main()
|
print("Hello EFO")
print("")
print("Final")
|
from collections import defaultdict, OrderedDict, Counter
from datetime import datetime
from functools import wraps
from html.parser import HTMLParser
from operator import attrgetter
from random import randrange, shuffle
from shutil import rmtree, make_archive
from string import ascii_letters, ascii_uppercase, digits
from subprocess import PIPE, CalledProcessError, SubprocessError, TimeoutExpired
from sys import stdin, stdout, stderr
from textwrap import dedent
import argparse
import contextlib
import os.path
import re
import tempfile
import yaml
import zipfile
from argcomplete import autocomplete
from jinja2 import Template
from natsort import natsorted
from ..black_magic import *
from .contest_details import *
from .details import *
from .formats import *
from .passwords import *
from .programs import *
from .seating import *
from .testscripts import *
from .utils import *
class CommandError(Exception): ...
VERSION = "0.2"
##########################################
# TODO use the 'logging' library
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=cformat_text(dedent('''\
Programming contest utilities.
Two main use cases are the following:
- For one-off scripting tasks, e.g., testing a solution against a bunch of data.
- (for problems) [*[kg gen]*], [*[kg test]*], [*[kg run]*], [*[kg subtasks]*], [*[kg compile]*]
- (for contests) [*[kg seating]*], [*[kg passwords]*]
- (others) [*[kg convert]*], [*[kg convert-sequence]*]
- For developing problems/contests from scratch (writing generators, validators, checkers, etc.)
- (for problems) [*[kg init]*], [*[kg make]*], [*[kg gen]*], [*[kg test]*], [*[kg run]*], [*[kg compile]*]
- (for contests) [*[kg contest]*]
See the individual --help texts for each command, e.g., [*[kg init --help]*].
''')))
parser.add_argument('--krazy', action='store_true', help="Go krazy. (Don't use unless drunk)")
# TODO add 'verbose' option here
subparsers = parser.add_subparsers(
help='which operation to perform',
dest='main_command',
metavar='{konvert,konvert-sequence,subtasks,gen,test,run,make,joke,init,kompile,kontest,seating,passwords}')
subparsers.required = True
##########################################
# convert one format to another
convert_p = subparsers.add_parser('konvert',
aliases=['convert'],
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Convert test data from one format to another',
description=cformat_text(dedent('''\
Convert test data from one contest/judge system format to another.
$ [*[kg convert --from [src_fmt] [src_folder] --to [dest_fmt] [dest_folder]]*]
For example,
$ [*[kg convert --from polygon path/to/polygon-package --to hackerrank path/to/hr/i-o-folders]*]
$ [*[kg convert --from hackerrank path/to/hr/i-o-folders --to polygon path/to/polygon-package]*]
'polygon' and 'hackerrank' can be abbreviated as 'pg' and 'hr', respectively. There is also the
'kompgen'/'kg' format, which is the format used when creating a problem from scratch using KompGen.
A few details on the "formats":
- Polygon I/O pairs look like: tests/* and tests/*.a
- HackerRank I/O pairs look like: input/input*.txt and output/output*.txt
- KompGen I/O pairs look like: tests/*.in and tests/*.ans
You can think of "kg convert" as similar to two calls to "kg convert-sequence", one for the input
files, and another for the output files, with some additional validity checks (e.g., for HackerRank,
input/inputFOO.txt is rejected) and reindexing (e.g., Polygon starts at "1", e.g., tests/1, but
HackerRank starts at "00", e.g., input/input00.txt).
''')))
convert_p.add_argument('--from', nargs=2, help='source format and location', dest='fr',
metavar=('FROM_FORMAT', 'FROM_FOLDER'), required=True)
convert_p.add_argument('--to', nargs=2, help='destination format and location',
metavar=('TO_FORMAT', 'TO_FOLDER'), required=True)
@set_handler(convert_p)
def kg_convert(format_, args):
if args.main_command == 'convert':
info_print("You spelled 'konvert' incorrectly. I'll let it slide for now.", file=stderr)
convert_formats(args.fr, args.to)
def convert_formats(src, dest, *, src_kwargs={}, dest_kwargs={}):
sformat, sloc = src
dformat, dloc = dest
src_format = get_format(argparse.Namespace(format=sformat, loc=sloc, input=None, output=None), read='io', **src_kwargs)
dest_format = get_format(argparse.Namespace(format=dformat, loc=dloc, input=None, output=None), write='io', **dest_kwargs)
copied_i = []
copied_o = []
info_print("Copying now...")
for (srci, srco), (dsti, dsto) in zip(src_format.thru_io(), dest_format.thru_expected_io()):
copy_file(srci, dsti)
copy_file(srco, dsto)
copied_i.append(dsti)
copied_o.append(dsto)
succ_print("Copied", len(copied_i) + len(copied_o), "files")
return copied_i, copied_o
##########################################
# convert one file sequence to another
convert2_p = subparsers.add_parser('konvert-sequence',
aliases=['convert-sequence'],
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Convert a file sequence with a certain pattern to another',
description=cformat_text(dedent('''\
Convert a file sequence with a certain pattern to another.
$ [*[kg convert-sequence --from [source_pattern] --to [target_pattern]]*]
This converts every file matched by [source_pattern] into a file that matches [target_pattern].
The output files will be inferred from the corresponding input files. "*" in patterns are
wildcards, and they will be matched automatically.
For example,
$ [*[kg convert-sequence --from "input/input*.txt" --to "tests/0*.in"]*]
will convert input/input00.txt to tests/000.in, input/input11.txt to tests/011.in, etc.
Quotes are required (at least on Linux), otherwise bash will replace it with the
actual matched filenames. (not sure about Windows)
There can even be multiple "*"s in --to and --from. The only requirement is that they have an equal
number of "*"s. Parts matched by "*"s will be transferred to the corresponding "*" in the other
pattern.
''')))
convert2_p.add_argument('--from', help='source file pattern', dest='fr', required=True)
convert2_p.add_argument('--to', help='destination file pattern', required=True)
@set_handler(convert2_p)
def kg_convert2(format_, args):
if args.main_command == 'convert-sequence':
info_print("You spelled 'konvert-sequence' incorrectly. I'll let it slide for now.", file=stderr)
convert_sequence(args.fr, args.to)
def convert_sequence(src, dest):
format_ = get_format(argparse.Namespace(format=None, loc=None, input=src, output=dest), read='i', write='o')
copied = 0
info_print("Copying now...")
for srcf, destf in format_.thru_io():
copy_file(srcf, destf)
copied += 1
succ_print("Copied", copied, "files")
##########################################
# detect subtasks
subtasks_p = subparsers.add_parser('subtasks',
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Detect the subtasks of input files',
description=cformat_text(dedent('''\
Detect the subtasks of input files, for problems with subtasks.
You need either a detector program or a validator program.
$ [*[kg subtasks -i [input_pattern] -f [detector_program]]*]
This prints all subtasks of every file. [detector_program] must be a program that takes an input
from stdin and prints the distinct subtasks in which it is valid, as separate tokens in stdout.
$ [*[kg subtasks -i [input_pattern] -vf [validator_program]]*]
This is a bit slower but simpler. [validator_program] must be a program that takes an input from
stdin and the subtask name as the first command line argument, and exits with code 0 iff the input
is valid from that subtask.
This is useful if you want to automatically know which subtask each file belongs to; sometimes, a
file you generated may be intended for a subtask but actually violates the constraints, so this lets
you detect those cases.
For example,
$ [*[kg subtasks -i "tests/*.in" -f Detector.java]*]
$ [*[kg subtasks -i "tests/*.in" -vf validator.cpp]*]
Quotes are required (at least on Linux), otherwise bash will replace it with the
actual matched filenames. (not sure about Windows)
The programming language of the detector/validator is inferred from the extension. You can also pass
a full command using -c or -vc, for example,
$ [*[kg subtasks -i "tests/*.in" -c pypy3 detector.py]*]
$ [*[kg subtasks -i "tests/*.in" -vc runhaskell validator.hs]*]
You can also run this for just one file, e.g.,
$ [*[kg subtasks -i data/sample.in -f Detector.java]*]
There can even be multiple "*"s in -i.
If you wrote your problem using "kg init", then you may omit "-i", "-f" and "-vf"; they will default
to the KompGen format ("tests/*.in"), and other details will be parsed from details.json, so
"[*[kg subtasks]*]" without options would just work. (You can still pass them of course.)
If your command (-c or -vc) requires leading dashes, then the argument parser might interpret them as
options to "kg subtasks" itself. To work around this, prepend "___" (triple underscore) to each part
containing a "-". The "___" will be ignored. For example,
$ [*[kg subtasks -i "tests/*.in" -vc java ___-Xss128m Validator]*]
''')))
subtasks_p.add_argument('-F', '--format', '--fmt', help='format of data')
subtasks_p.add_argument('-l', '--loc', default='.', help='location to run commands on')
subtasks_p.add_argument('-d', '--details', help=argparse.SUPPRESS)
subtasks_p.add_argument('-i', '--input', help='input file pattern')
subtasks_p.add_argument('-o', '--output', help='output file pattern')
subtasks_p.add_argument('-c', '--command', nargs='+', help='detector command')
subtasks_p.add_argument('-f', '--file', help='detector file')
subtasks_p.add_argument('-s', '--subtasks', default=[], nargs='+', help='list of subtasks')
subtasks_p.add_argument('-vc', '--validator-command', nargs='+', help='validator command')
subtasks_p.add_argument('-vf', '--validator-file', help='validator file')
# TODO support "compiler through validator"
@set_handler(subtasks_p)
def kg_subtasks(format_, args):
if not args.format: args.format = format_
format_ = get_format(args, read='i')
details = Details.from_format_loc(args.format, args.details, relpath=args.loc)
subtasks = args.subtasks or list(map(str, details.valid_subtasks))
detector = _get_subtask_detector_from_args(args, purpose='subtask computation', details=details)
compute_subtasks(subtasks, detector, format=format_, include_test_groups=True)
def _get_subtask_detector_from_args(args, *, purpose, details=None):
if details is None:
details = Details.from_format_loc(args.format, args.details, relpath=args.loc)
# build detector
validator = None
detector = Program.from_args(args.file, args.command)
if not detector: # try validator
validator = Program.from_args(args.validator_file, args.validator_command)
detector = detector_from_validator(validator)
assert (not detector) == (not validator)
# try detector from details
if not detector: detector = details.subtask_detector
# can't build any detector!
if not detector: raise CommandError(f"Missing detector/validator (for {purpose})")
# find subtask list
if validator and not args.subtasks: # subtask list required for detectors from validator
raise CommandError(f"Missing subtask list (for {purpose})")
return detector
def _collect_subtasks(input_subs):
@wraps(input_subs)
def _input_subs(subtasks, *args, **kwargs):
subtset = set(subtasks)
# iterate through inputs, run our detector against them
subtasks_of = OrderedDict()
all_subtasks = set()
files_of_subtask = {sub: set() for sub in subtset}
test_groups = {}
for input_, subs in input_subs(subtasks, *args, **kwargs):
subtasks_of[input_] = set(subs)
if not subtasks_of[input_]:
raise CommandError(f"No subtasks found for {input_}")
if subtset and not (subtasks_of[input_] <= subtset):
raise CommandError("Found invalid subtasks! "
+ ' '.join(map(repr, natsorted(subtasks_of[input_] - subtset))))
all_subtasks |= subtasks_of[input_]
for sub in subtasks_of[input_]: files_of_subtask[sub].add(input_)
info_print(f"Subtasks found for {input_}:", end=' ')
key_print(*natsorted(subtasks_of[input_]))
test_groups[' '.join(natsorted(subtasks_of[input_]))] = set(subtasks_of[input_])
info_print("Distinct subtasks found:", end=' ')
key_print(*natsorted(all_subtasks))
if subtset:
assert all_subtasks <= subtset
if all_subtasks != subtset:
warn_print('Warning: Some subtasks not found:', *natsorted(subtset - all_subtasks), file=stderr)
info_print("Subtask dependencies:")
depends_on = {sub: {sub} for sub in subtset}
for sub in natsorted(subtset):
if files_of_subtask[sub]:
deps = [dep for dep in natsorted(subtset) if dep != sub and files_of_subtask[dep] <= files_of_subtask[sub]]
print(info_text("Subtask"), key_text(sub), info_text("contains the ff subtasks:"), key_text(*deps))
for dep in deps: depends_on[dep].add(sub)
if kwargs.get('include_test_groups'):
representing = {}
represented_by = {}
for sub in natsorted(all_subtasks):
candidates = {key: group for key, group in test_groups.items() if sub in group and group <= depends_on[sub]}
if candidates:
try:
group_key = next(key
for key, group in candidates.items()
if all(other <= group for other in candidates.values()))
if group_key in represented_by:
del representing[represented_by[group_key]]
representing[sub] = group_key
represented_by[group_key] = sub
except StopIteration:
pass
info_print("Test groups:")
for group_key in natsorted(test_groups):
if group_key in represented_by:
print(key_text(group_key), info_text("AKA subtask"), key_text(represented_by[group_key]))
else:
key_print(group_key)
for sub in natsorted(all_subtasks):
if sub in representing:
print(info_text("Subtask"), key_text(sub), info_text("is represented by test group:"),
key_text(representing[sub]))
else:
warn_print('Warning: No test group represents subtask', sub, file=stderr)
info_print("Test group dependencies:")
for key, group in natsorted(test_groups.items()):
deps = [depkey for depkey, dep in natsorted(test_groups.items()) if dep != group and group < dep]
if key in represented_by:
print(info_text("Test group"), key_text(key), info_text("AKA subtask"), key_text(represented_by[key]),
info_text("contains the ff subtask groups:"))
else:
print(info_text("Test group"), key_text(key), info_text("contains the ff test groups:"))
for depkey in deps:
if depkey in represented_by:
print(key_text(depkey), info_text("AKA subtask"), key_text(represented_by[depkey]))
else:
print(key_text(depkey))
return subtasks_of, all_subtasks
return _input_subs
@_collect_subtasks
def extract_subtasks(subtasks, subtasks_files, *, format=None, inputs=None):
if not inputs: inputs = []
thru_expected_inputs = format.thru_expected_inputs() if format else None
def get_expected_input(i):
while i >= len(inputs):
if not thru_expected_inputs: raise CommandError("Missing format or input")
inputs.append(next(thru_expected_inputs))
return inputs[i]
input_ids = set()
for lf, rg, subs in subtasks_files:
for index in range(lf, rg + 1):
if index in input_ids: raise CommandError(f"File {index} appears multiple times in subtasks_files")
input_ids.add(index)
yield get_expected_input(index), {*map(str, subs)}
@_collect_subtasks
def compute_subtasks(subtasks, detector, *, format=None, relpath=None, include_test_groups):
subtset = set(subtasks)
# iterate through inputs, run our detector against them
detector.do_compile()
for input_ in format.thru_inputs():
with open(input_) as f:
try:
result = detector.do_run(*subtasks, stdin=f, stdout=PIPE, check=True)
except CalledProcessError as cpe:
err_print(f"The detector raised an error for {input_}", file=stderr)
raise CommandError(f"The detector raised an error for {input_}") from cpe
yield input_, set(result.stdout.decode('utf-8').split())
##########################################
# generate output data
gen_p = subparsers.add_parser('gen',
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Run a program against several files as input, and generate an output file for each',
description=cformat_text(dedent('''\
Run a program against several files as input, and generate an output file for each.
A common use is to generate output data from the input data using the solution program.
$ [*[kg gen -i [input_pattern] -o [output_pattern] -f [program]]*]
This generates the files [output_pattern] by running [program] for every file in [input_pattern].
[program] must be a program that takes an input from stdin and prints the output in stdout.
The output files will be inferred from the corresponding input files. "*" in patterns are
wildcards, and they will be matched automatically.
For example,
$ [*[kg gen -i "tests/*.in" -o "tests/*.ans" -f Solution.java]*]
Here, input files "tests/*.in" will be converted to output files "tests/*.ans", with the part in
the "*" carrying over. For example, "tests/005.in" corresponds to "tests/005.ans".
Quotes are required (at least on Linux), otherwise bash will replace it with the actual matched
filenames. (not sure about Windows)
The programming language of the program is inferred from the extension. You can also pass a full
command using -c, for example,
$ [*[kg gen -i "tests/*.in" -o "tests/*.ans" -c pypy3 solution.py]*]
You can also run this for just one file, e.g.,
$ [*[kg gen -i data/sample.in -o data/temp.txt -f solution.cpp]*]
There can even be multiple "*"s in -i and -o. The only requirement is that they have an equal
number of "*"s. Parts matched by "*"s will be transferred to the corresponding "*" in the other
pattern.
If you wrote your problem using "kg init", then you may omit "-i", "-o" and "-f"; they will
default to the KompGen format ("tests/*.in" and "tests/*.ans"), and other details will be parsed
from details.json, so for example, "[*[kg gen]*]" without options would just work. (You can still pass
them of course.)
If your command (-c) requires leading dashes, then the argument parser might interpret them as
options to "kg gen" itself. To work around this, prepend "___" (triple underscore) to each part
containing a "-". The "___" will be ignored. For example,
$ [*[kg gen -c java ___-Xss128m Solution]*]
''')))
gen_p.add_argument('-F', '--format', '--fmt', help='format of data')
gen_p.add_argument('-l', '--loc', default='.', help='location to run commands on')
gen_p.add_argument('-d', '--details', help=argparse.SUPPRESS)
gen_p.add_argument('-i', '--input', help='input file pattern')
gen_p.add_argument('-o', '--output', help='output file pattern')
gen_p.add_argument('-c', '--command', nargs='+', help='solution/data_maker command')
gen_p.add_argument('-f', '--file', help='solution/data_maker file')
gen_p.add_argument('-jc', '--judge-command', nargs='+', help='judge command')
gen_p.add_argument('-jf', '--judge-file', help='judge file')
# TODO Add "clear matched" option, but explicitly ask if delete them?
@set_handler(gen_p)
def kg_gen(format_, args):
if not args.format: args.format = format_
format_ = get_format(args, read='i', write='o')
details = Details.from_format_loc(args.format, args.details, relpath=args.loc)
judge_data_maker = Program.from_args(args.file, args.command)
model_solution = None
if not judge_data_maker:
model_solution = details.model_solution
judge_data_maker = details.judge_data_maker
judge = Program.from_args(args.judge_file, args.judge_command) or details.checker
if not judge: raise CommandError("Missing judge")
generate_outputs(format_, judge_data_maker, model_solution=model_solution,
judge=judge, interactor=details.interactor)
def generate_outputs(format_, data_maker, *, model_solution=None, judge=None, interactor=None):
if not data_maker: raise CommandError("Missing solution")
data_maker.do_compile()
if judge: judge.do_compile()
if model_solution and model_solution != data_maker: model_solution.do_compile()
if interactor: interactor.do_compile()
data_maker_name = 'model_solution' if model_solution == data_maker else 'data_maker'
for input_, output_ in format_.thru_io():
touch_container(output_)
print(info_text('WRITING', input_, '-->'), key_text(output_))
try:
if data_maker.attributes.get('interacts') and interactor:
results = data_maker.do_interact(interactor, time=True, check=True,
interactor_args=[input_, output_],
interactor_kwargs=dict(time=True, check=True),
)
else:
with open(input_) as inp, open(output_, 'w') as outp:
data_maker.do_run(stdin=inp, stdout=outp, time=True, check=True)
except InteractorException as ie:
err_print(f"The interactor raised an error with the {data_maker_name} for {input_}", file=stderr)
raise CommandError(f"The interactor raised an error with the {data_maker_name} for {input_}") from ie
except SubprocessError as se:
err_print(f"The {data_maker_name} raised an error for {input_}", file=stderr)
raise CommandError(f"The {data_maker_name} raised an error for {input_}") from se
if judge and model_solution:
@contextlib.contextmanager # so that the file isn't closed
def model_output():
if model_solution == data_maker:
yield output_
else:
with tempfile.NamedTemporaryFile(delete=False) as tmp:
info_print(f"Running model solution on {input_}")
try:
if interactor:
results = model_solution.do_interact(interactor, time=True, check=True,
interactor_args=[input_, tmp.name],
interactor_kwargs=dict(time=True, check=True),
)
else:
with open(input_) as inp:
model_solution.do_run(stdin=inp, stdout=tmp, time=True, check=True)
except InteractorException as ie:
err_print(f"The interactor raised an error with the model_solution for {input_}", file=stderr)
raise CommandError(f"The interactor raised an error with the model_solution for {input_}") from ie
except SubprocessError as se:
err_print(f"The interaction raised an error for {input_}", file=stderr)
raise CommandError(f"The interaction raised an error for {input_}") from se
yield tmp.name
with model_output() as model_out:
try:
judge.do_run(*map(os.path.abspath, (input_, model_out, output_)), check=True)
except CalledProcessError as cpe:
err_print(f"The judge did not accept {output_}", file=stderr)
raise CommandError(f"The judge did not accept {output_}") from cpe
##########################################
# test against output data
test_p = subparsers.add_parser('test',
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Test a program against given input and output files',
description=cformat_text(dedent('''\
Test a program against given input and output files.
$ [*[kg test -i [input_pattern] -o [output_pattern] -f [solution_program]]*]
This runs [solution_program] for every file in [input_pattern], and compares it against the
corresponding files in [output_pattern]. [solution_program] must be a program that takes an
input from stdin and prints the output in stdout.
The output files will be inferred from the corresponding input files. "*" in patterns are
wildcards, and they will be matched automatically.
For example,
$ [*[kg test -i "tests/*.in" -o "tests/*.ans" -f Solution.java]*]
Here, input files "tests/*.in" will be matched with output files "tests/*.ans", with the part in
the "*" carrying over. For example, "tests/005.in" corresponds to "tests/005.ans".
Quotes are required (at least on Linux), otherwise bash will replace it with the
actual matched filenames. (not sure about Windows)
The programming language of the program is inferred from the extension. You can also pass a full
command using -c, for example,
$ [*[kg test -i "tests/*.in" -o "tests/*.ans" -c pypy3 solution.py]*]
You can also run this for just one file, e.g.,
$ [*[kg test -i data/sample.in -o data/temp.txt -f solution.cpp]*]
There can even be multiple "*"s in -i and -o. The only requirement is that they have an equal
number of "*"s. Parts matched by "*"s will be transferred to the corresponding "*" in the other
pattern.
If your program has a custom checker file, you may pass it via the -jf ("judge file") option.
For example,
$ [*[kg test -i "tests/*.in" -o "tests/*.ans" -f Solution.java -jf checker.py]*]
Here, checker.py takes three command line arguments "input_path", "output_path" and "judge_path",
and exits with 0 iff the answer is correct. It may print anything in stdout/stderr.
You may also pass a full checker command via -jc, similar to -c for the solution file.
If you wrote your problem using "kg init", then you may omit "-i", "-o", "-f" and "-jf"; they will
default to the KompGen format ("tests/*.in" and "tests/*.ans"), and other details will be parsed
from details.json, so for example, "[*[kg test]*]" without options would just work. (You can still pass
them of course.)
If your command (-c or -jc) requires leading dashes, then the argument parser might interpret
them as options to "kg test" itself. To work around this, prepend "___" (triple underscore) to each
part containing a "-". The "___" will be ignored. For example,
$ [*[kg test -c java ___-Xss128m Solution -jc java ___-Xss128m Checker]*]
''')))
test_p.add_argument('-F', '--format', '--fmt', help='format of data')
test_p.add_argument('-l', '--loc', default='.', help='location to run commands on')
test_p.add_argument('-d', '--details', help=argparse.SUPPRESS)
test_p.add_argument('-i', '--input', help='input file pattern')
test_p.add_argument('-o', '--output', help='output file pattern')
test_p.add_argument('-c', '--command', nargs='+', help='solution command')
test_p.add_argument('-f', '--file', help='solution file')
test_p.add_argument('-jc', '--judge-command', nargs='+', help='judge command')
test_p.add_argument('-jf', '--judge-file', help='judge file')
test_p.add_argument('-js', '--judge-strict-args', action='store_true',
help="whether the checker is strict and doesn't work if "
"extra arguments are given to it")
test_p.add_argument('-s', '--subtasks', default=[], nargs='+', help='list of subtasks')
test_p.add_argument('-vc', '--validator-command', nargs='+', help='validator command, for subtask grading')
test_p.add_argument('-vf', '--validator-file', help='validator file, for subtask grading')
test_p.add_argument('-ic', '--interactor-command', nargs='+', help='interactor command, if the problem is interactive')
test_p.add_argument('-if', '--interactor-file', help='interactor file, if the problem is interactive')
test_p.add_argument('-tl', '--time-limit', type=float, help="the problem's time limit (or -1 for no limit); "
"the code will be terminated if it exceeds 4x this time")
@set_handler(test_p)
def kg_test(format_, args):
if not args.format: args.format = format_
format_ = get_format(args, read='io')
details = Details.from_format_loc(args.format, args.details, relpath=args.loc)
solution = Program.from_args(args.file, args.command) or details.model_solution
if not solution: raise CommandError("Missing solution")
judge = Program.from_args(args.judge_file, args.judge_command) or details.checker
if not judge: raise CommandError("Missing judge")
interactor = Program.from_args(args.interactor_file, args.interactor_command) or details.interactor
time_limit = args.time_limit
if time_limit is None: time_limit = details.time_limit
if time_limit == -1: time_limit = float('inf')
print(info_text('Using problem time limit:'), key_text(time_limit), info_text('sec.'))
judge_strict_args = args.judge_strict_args
solution.do_compile()
judge.do_compile()
if interactor: interactor.do_compile()
scoresheet = {}
for index, (input_, output_) in enumerate(format_.thru_io()):
def get_score():
nonlocal judge_strict_args
get_score.running_time = None
with tempfile.NamedTemporaryFile(delete=False) as tmp:
with tempfile.NamedTemporaryFile(delete=False) as result_tmp:
info_print("\nFile", str(index).rjust(3), 'CHECKING AGAINST', input_)
interactor_res = None
try:
if interactor:
solution_res, interactor_res = solution.do_interact(interactor,
time=True, check=True,
interactor_args=(input_, tmp.name),
interactor_kwargs=dict(check=False),
time_limit=time_limit,
)
else:
with open(input_) as inp:
solution_res = solution.do_run(
stdin=inp,
stdout=tmp,
time=True,
check=True,
time_limit=time_limit,
)
except TimeoutExpired:
pass
except CalledProcessError:
err_print('The solution issued a runtime error...')
return False, 0
finally:
# save the running time now, since we're monkeying around...
if hasattr(solution, 'last_running_time'):
get_score.running_time = solution.last_running_time
# Check if the interactor issues WA by itself. Don't invoke the judge
if getattr(interactor_res, 'returncode', 0):
err_print('The interactor did not accept the interaction...')
return False, 0
def run_judge():
jargs = list(map(os.path.abspath, (input_, tmp.name, output_)))
if not judge_strict_args:
jargs += [result_tmp.name, '-c', solution.filename, '-t', str(index), '-v']
return judge.do_run(*jargs, check=False).returncode
info_print("Checking the output...")
returncode = run_judge()
if returncode == 3 and not judge_strict_args: # try again but assume the judge is strict
info_print("The error above might just be because of testlib... trying to judge again")
judge_strict_args = True
returncode = run_judge()
correct = returncode == 0
try:
with open(result_tmp.name) as result_tmp_file:
score = json.load(result_tmp_file)['score']
except Exception as exc:
score = 1 if correct else 0 # can't read score. use binary scoring
if get_score.running_time is None:
warn_print("Warning: The running time cannot be extracted from this run.")
elif get_score.running_time > time_limit:
err_print(f"The solution exceeded the time limit of {time_limit:.3f}sec; "
f"it didn't finish after {get_score.running_time:.3f}sec...")
if score > 0: info_print(f"It would have gotten a score of {score} otherwise...")
return False, 0
return correct, score
correct, score = get_score()
scoresheet[index] = {
'input': input_,
'correct': correct,
'score': score,
'running_time': get_score.running_time,
}
if correct:
succ_print("File", str(index).rjust(3), 'correct')
else:
err_print("File", str(index).rjust(3), 'WRONG' + '!'*11)
if not 0 <= score <= 1:
warn_print(f"Warning: The score '{score}' is invalid; it must be in the interval [0, 1].")
def abbreviate_indices(indices):
if not indices: return 'none'
return compress_t_sequence(','.join(map(str, sorted(indices))))
def print_file_list(description, indices):
if indices:
info_print(f"{len(indices):3} file(s) {description}:", abbreviate_indices(indices))
else:
info_print(f"{len(indices):3} file(s) {description}")
def write_raw_summary():
""" print the raw files gotten correct and wrong """
corrects = [index for index, score_row in sorted(scoresheet.items()) if score_row['correct']]
wrongs = [index for index, score_row in sorted(scoresheet.items()) if not score_row['correct']]
running_times = filter(None, (score_row['running_time'] for score_row in scoresheet.values()))
max_time = max(running_times) if running_times else None
decor_print()
decor_print('.'*42)
beginfo_print('SUMMARY:')
print_file_list('gotten correct', corrects)
print_file_list('gotten wrong ', wrongs)
(succ_print if len(corrects) == len(scoresheet) else err_print)(len(corrects), end=' ')
(succ_print if len(corrects) == len(scoresheet) else info_print)(f'out of {len(scoresheet)} files correct')
if max_time is None:
warn_print('Warning: No running time was extracted from any run')
else:
info_print(f'Max running time: {max_time:.2f}sec')
decor_print('.'*42)
@memoize
def get_all_subtask_details():
print()
info_print('Obtaining subtask info...')
subtasks = args.subtasks or list(map(str, details.valid_subtasks))
if os.path.isfile(details.subtasks_files):
inputs = [score_row['input'] for index, score_row in sorted(scoresheet.items())]
subtasks_of, all_subtasks = extract_subtasks(subtasks, details.load_subtasks_files(), inputs=inputs)
else:
detector = _get_subtask_detector_from_args(args, purpose='subtask scoring', details=details)
subtasks_of, all_subtasks = compute_subtasks(subtasks, detector, format=format_, include_test_groups=False)
def get_max_score(sub):
max_score = details.valid_subtasks[int(sub)].score if isinstance(details.valid_subtasks, dict) else 1
if max_score is None: max_score = 1
return max_score
# normal grading
all_subtasks = {sub: {
'weight': get_max_score(sub),
'indices': [],
'scores': [],
'running_times': [],
} for sub in all_subtasks}
for index, score_row in sorted(scoresheet.items()):
for sub in subtasks_of[score_row['input']]:
all_subtasks[sub]['indices'].append(index)
all_subtasks[sub]['scores'].append(score_row['score'])
all_subtasks[sub]['running_times'].append(score_row['running_time'])
# compute scores per subtask using the per-subtask scoring policy
for sub, sub_details in all_subtasks.items():
if details.scoring_per_subtask == '!min':
sub_details['score'] = min(score for score in sub_details['scores'])
elif details.scoring_per_subtask == '!ave':
sub_details['score'] = sum(sub_details['scores']) / len(sub_details['scores'])
else:
raise ValueError(f"Unknown/Unsupported per-subtask scoring policy: {details.scoring_per_subtask}")
sub_details['weighted_score'] = sub_details['weight'] * sub_details['score']
sub_details['max_running_time'] = max(sub_details['running_times']) if sub_details['running_times'] else None
return all_subtasks
def get_score_for(group_scores):
if details.scoring_overall == '!sum':
return sum(weight * score for weight, score in group_scores)
if details.scoring_overall == '!ave':
return sum(weight * score for weight, score in group_scores) / sum(weight for weight, score in group_scores)
if details.scoring_overall == '!min':
return min(weight * score for weight, score in group_scores)
raise ValueError(f"Unknown/Unsupported overall scoring policy: {details.scoring_overall}")
write_raw_summary()
if format_.name and details.valid_subtasks:
# groups are subtasks
group_scores = [(sub_details['weight'], sub_details['score'])
for sub, sub_details in natsorted(get_all_subtask_details().items())
]
else:
# groups are individual files
group_scores = [(details.scoring_default_weight, score_row['score'])
for index, score_row in sorted(scoresheet.items())
]
scoring_result = get_score_for(group_scores)
max_scoring_result = get_score_for([(weight, 1) for weight, score in group_scores])
# print the subtask grades
if format_.name and details.valid_subtasks:
# print the raw summary again (because get_subtasks has huge output)
write_raw_summary()
beginfo_print('SUBTASK REPORT:')
for sub, sub_details in natsorted(get_all_subtask_details().items()):
score = sub_details['weighted_score']
weight = sub_details['weight']
max_running_time = sub_details['max_running_time']
times = []
print(
info_text("Subtask ="),
key_text(str(sub).rjust(4)),
info_text(": Score = "),
(
succ_text if score >= weight else
info_text if score > 0 else
err_text
)(f"{score:8.3f}"),
info_text(f" out of {weight:8.3f}"),
(
warn_text(" No running time was extracted")
if max_running_time is None else
info_text(f" w/ max running time: {max_running_time:.2f}sec")
), sep='')
if not 0 <= score <= weight:
warn_print(f"Warning: The score {score} is invalid: "
f"it must be in the interval [0, {weight}]")
# print the overall score
print()
print(info_text("Total Score =",
(succ_text if scoring_result >= max_scoring_result else
info_text if scoring_result > 0 else
err_text)(f"{scoring_result:8.3f}"),),
info_text(f" out of {max_scoring_result:8.3f}"),
sep='')
info_print(f'using the scoring policy {details.logical_scoring}')
##########################################
# just run the solution
run_p = subparsers.add_parser('run',
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Run a program against input files (and print to stdout)',
description=cformat_text(dedent('''\
Run a program against a set of input files, and print the result to stdout.
$ [*[kg run -i [input_pattern] -f [solution_program]]*]
This runs [solution_program] for every file in [input_pattern], and simply forwards its results
to everything to stdout and stderr.
For example,
$ [*[kg run -i "tests/*.in" -f Solution.java]*]
Quotes are required (at least on Linux), otherwise bash will replace it with the
actual matched filenames. (not sure about Windows)
The programming language of the program is inferred from the extension. You can also pass a full
command using -c, for example,
$ [*[kg run -i "tests/*.in" -c pypy3 solution.py]*]
You can also run this for just one file, e.g.,
$ [*[kg run -i data/sample.in -f solution.cpp]*]
There can even be multiple "*"s in -i.
This is useful, for example, if you want to validate a bunch of test files:
$ [*[kg run -i "tests/*.in" -f Validator.java]*]
If you wrote your problem using "kg init", then you may omit "-i" and "-f"; they will default to
the KompGen format ("tests/*.in"), and other details will be parsed from details.json, so
"[*[kg run]*]" without options would just work. (You can still pass them of course.)
If your command (-c) requires leading dashes, then the argument parser might interpret them as
options to "kg run" itself. To work around this, prepend "___" (triple underscore) to each part
containing a "-". The "___" will be ignored. For example,
$ [*[kg run -c java ___-Xss128m MyProgram]*]
''')))
run_p.add_argument('-F', '--format', '--fmt', help='format of data')
run_p.add_argument('-l', '--loc', default='.', help='location to run commands on')
run_p.add_argument('-d', '--details', help=argparse.SUPPRESS)
run_p.add_argument('-i', '--input', help='input file pattern')
run_p.add_argument('-o', '--output', help='output file pattern')
run_p.add_argument('-c', '--command', nargs='+', help='solution command')
run_p.add_argument('-f', '--file', help='solution file')
@set_handler(run_p, stderr)
def kg_run(format_, args):
if not args.format: args.format = format_
format_ = get_format(args, read='i')
details = Details.from_format_loc(args.format, args.details, relpath=args.loc)
solution = Program.from_args(args.file, args.command) or details.model_solution
if not solution: raise CommandError("Missing solution")
solution.do_compile()
for input_ in format_.thru_inputs():
with open(input_) as inp:
info_print('RUNNING FOR', input_, file=stderr)
try:
solution.do_run(stdin=inp, time=True, check=True)
except CalledProcessError:
err_print('The program issued a runtime error...', file=stderr)
##########################################
# make everything !!!
make_p = subparsers.add_parser('make',
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Create all test data (input+output files) and validate',
description=cformat_text(dedent('''\
Create all test data (input+output files), detect subtasks, and perform checks/validations.
This command is intended for problems created using "kg init". In that case, it will parse the
relevant information from the details.json file.
This generates input files from testscript and generator files:
$ [*[kg make inputs]*]
$ [*[kg make inputs --validation]*] # if you want validation
This generates output files from input files (similar to "kg gen"):
$ [*[kg make outputs]*]
$ [*[kg make outputs --checks]*] # if you want to run the checker
This detects the subtasks (similar to "kg subtasks") and writes it to the "subtasks_files" file
in JSON format:
$ [*[kg make subtasks]*]
More usage examples:
$ [*[kg make all]*] # does all of the above.
$ [*[kg make inputs outputs --checks]*] # only inputs and outputs, no validation, with checker.
Other combinations are also allowed.
You will probably want to run "kg make all" after finalizing all files---generators, validator,
checker, etc.---and make sure it finishes without errors. (unless this takes too long...)
''')))
make_p.add_argument('makes', nargs='+', help='what to make. (all, inputs, etc.)')
make_p.add_argument('-l', '--loc', default='.', help='location to run commands on')
make_p.add_argument('-d', '--details', help=argparse.SUPPRESS)
make_p.add_argument('-V', '--validation', action='store_true', help="Validate the input files against the validators")
make_p.add_argument('-C', '--checks', action='store_true', help="Check the output file against the checker")
@set_handler(make_p)
def _kg_make(format_, args):
if not is_same_format(format_, 'kg'):
raise CommandError(f"You can't use '{format_}' format to 'make'.")
details = Details.from_format_loc(format_, args.details, relpath=args.loc)
kg_make(args.makes, args.loc, format_, details, validation=args.validation, checks=args.checks)
def kg_make(omakes, loc, format_, details, validation=False, checks=False):
makes = set(omakes)
valid_makes = {'all', 'inputs', 'outputs', 'subtasks'}
if not (makes <= valid_makes):
raise CommandError(f"Unknown make param(s): {ctext(*sorted(makes - valid_makes))}")
if 'all' in makes:
makes |= valid_makes
validation = checks = True
if 'inputs' in makes:
decor_print()
decor_print('~~ '*14)
beginfo_print('MAKING INPUTS...' + ("WITH VALIDATION..." if validation else 'WITHOUT VALIDATION'))
if not details.testscript:
raise CommandError("Missing testscript")
with open(details.testscript) as scrf:
script = scrf.read()
fmt = get_format_from_type(format_, loc, write='i', clear='i')
if validation:
validator = details.validator
validator.do_compile()
for filename in run_testscript(fmt.thru_expected_inputs(), script, details.generators, relpath=loc):
if validation:
info_print('Validating', filename)
with open(filename) as file:
validator.do_run(stdin=file, check=True)
succ_print('DONE MAKING INPUTS.')
if 'outputs' in makes:
decor_print()
decor_print('~~ '*14)
beginfo_print('MAKING OUTPUTS...' + ("WITH CHECKS..." if checks else 'WITHOUT CHECKS'))
fmt = get_format_from_type(format_, loc, read='i', write='o', clear='o')
generate_outputs(
fmt, details.judge_data_maker,
model_solution=details.model_solution,
judge=details.checker if checks else None,
interactor=details.interactor)
succ_print('DONE MAKING OUTPUTS.')
if 'subtasks' in makes:
decor_print()
decor_print('~~ '*14)
beginfo_print('MAKING SUBTASKS...')
if not details.valid_subtasks:
if 'subtasks' in omakes:
raise CommandError("valid_subtasks list required if you wish to make subtasks")
else:
info_print("no valid_subtasks found, so actually, subtasks will not be made. move along.")
else:
if not details.subtasks_files:
raise CommandError(f"A 'subtasks_files' entry in {details.source} is required at this step.")
detector = details.subtask_detector
if not detector: raise CommandError("Missing detector/validator")
# find subtask list
subtasks = list(map(str, details.valid_subtasks))
if details.validator and not subtasks: # subtask list required for detectors from validator
raise CommandError("Missing subtask list")
# iterate through inputs, run our detector against them
subtasks_of, all_subtasks = compute_subtasks(
subtasks, detector,
format=get_format_from_type(format_, loc, read='i'), relpath=loc, include_test_groups=True)
info_print(f'WRITING TO {details.subtasks_files}')
details.dump_subtasks_files(construct_subs_files(subtasks_of))
succ_print('DONE MAKING SUBTASKS.')
def construct_subs_files(subtasks_of):
prev, lf, rg = None, 0, -1
for idx, file in enumerate(subtasks_of):
assert rg == idx - 1
subs = subtasks_of[file]
assert subs
if prev != subs:
if prev: yield lf, rg, sorted(map(int, prev))
prev, lf = subs, idx
rg = idx
if prev: yield lf, rg, sorted(map(int, prev))
##########################################
q_p = subparsers.add_parser('joke',
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Print a non-funny joke',
description=rand_cformat_text(dedent('''\
Print a non-funny joke.
I'm sorry if they're not as funny as your jokes.
'''))
+ cformat_text('[^[Any]^] [*[help]*] [#[would]#] [.[be].] [%[very]%] [@[much]@] [+[appreciated]+]...'))
qs = [
'10kg > 1kg > 100g > 10g > log > log log > sqrt log log > 1',
'Spacewaker',
# add your jokes here plz
]
@set_handler(q_p)
def kg_q(format_, args):
import random
key_print(random.choice(qs))
##########################################
# make a new problem
init_p = subparsers.add_parser('init',
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Create a new problem, formatted kg-style',
description=cformat_text(dedent('''\
Create a new problem, formatted KompGen-style.
Use this if you're planning to write everything from scratch (with the help of KompGen).
$ [*[kg init [problemname]]*]
This creates a folder [problemname] and prepopulates it with templates. [problemname]
should only have underscores, dashes, letters, and digits.
It also accepts a few options. Examples:
Basic usage. set up a problem with code "my-problem".
$ [*[kg init my-problem]*]
Set the title. Can be changed later (in details.json)
$ [*[kg init my-problem --title "My Cool Problem"]*]
"Minimal" setup, i.e., fewer and shorter prepopulated files.
$ [*[kg init my-problem --minimal]*]
Set up a problem with 5 subtasks.
$ [*[kg init my-problem --subtasks 5]*]
Include a checker in the prepopulated files.
$ [*[kg init my-problem --checker]*]
Set the time limit to 7sec. Can be changed later (in details.json)
$ [*[kg init my-problem --time-limit 7]*]
You can also combine options, e.g.,
$ [*[kg init my-problem --subtasks 5 --minimal --checker -tl 7 -t "My Cool Problem"]*]
''')))
init_p.add_argument('problemcode', help='Problem code. Must not contain special characters.')
init_p.add_argument('-l', '--loc', default='.', help='where to make the problem')
init_p.add_argument('-t', '--title', help='Problem title. (Default is generated from problemcode)')
init_p.add_argument('-s', '--subtasks', type=int, default=0, help='Number of subtasks. (0 if binary)')
init_p.add_argument('-m', '--minimal', action='store_true', help="Only put the essentials.")
init_p.add_argument('-c', '--checker', action='store_true', help="Include a checker")
init_p.add_argument('-tl', '--time-limit', type=int, default=2, help='Time limit.')
valid_problemcode = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9_-]*[a-zA-Z0-9]$')
@set_handler(init_p)
def kg_init(format_, args):
if not is_same_format(format_, 'kg'):
raise CommandError(f"You can't use '{format_}' format to 'init'.")
prob = args.problemcode
if not valid_problemcode.match(prob):
raise CommandError("No special characters allowed for the problem code, "
"and the first and last characters must be a letter or a digit.")
src = os.path.join(kg_problem_template, 'kg')
dest = os.path.join(args.loc, prob)
print(info_text('The destination folder will be'), key_text(dest))
if os.path.exists(dest):
raise CommandError("The folder already exists!")
if args.subtasks < 0:
raise CommandError("Subtask count must be >= 0")
touch_dir(dest)
subtask_list = [OrderedDict(id=index, score=10) for index in range(1, args.subtasks + 1)]
env = {
'problem_title': args.title or ' '.join(re.split(r'[-_. ]+', prob)).title().strip(),
'minimal': args.minimal,
'checker': args.checker,
'subtasks': args.subtasks,
# Jinja's tojson doesn't seem to honor dict order, so let's just use json.dumps
"subtask_list": [OrderedDict(id=index, score=10) for index in range(1, args.subtasks + 1)],
# TODO find a way to indent only up to a certain level
'subtask_list_json': "[" + ','.join('\n ' + json.dumps(sub) for sub in subtask_list) + "\n]",
'time_limit': args.time_limit,
"version": VERSION,
}
fmt = Format(os.path.join(src, '*'), os.path.join(dest, '*'), read='i', write='o')
for inp, outp in fmt.thru_io():
if not os.path.isfile(inp): continue
if os.path.splitext(inp)[1] == '.j2':
res = kg_render_template(inp, **env)
outp, ext = os.path.splitext(outp)
assert ext == '.j2'
else:
with open(inp) as inpf:
res = inpf.read()
touch_container(outp)
if res.strip('\n'):
info_print(f'Writing {os.path.basename(outp)}')
if not res.endswith('\n'): res += '\n'
with open(outp, 'w') as outpf:
outpf.write(res)
succ_print('DONE!')
##########################################
# compile source codes for upload
compile_p = subparsers.add_parser('kompile',
aliases=['compile'],
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Preprocess python source codes to be ready to upload',
description=cformat_text(dedent('''\
Preprocess python source codes to be ready to upload.
$ [*[kg kompile -f [program_files]]*]
For example,
$ [*[kg kompile -f validator.py]*]
$ [*[kg kompile -f gen_random.py]*]
$ [*[kg kompile -f checker.py]*]
Or simultaneously,
$ [*[kg kompile -f validator.py gen_random.py checker.py]*]
If you wrote your problem using "kg init", then the usage is very simple:
$ [*[kg kompile]*]
It will kompile all relevant files in details.json: model solution, data maker, validator,
generators, and checker (if they exist).
Explanation:
Python files written using KompGen usually imports from other files (and from the "kg" library
itself), but most contest/judge systems only accept single files. This command "inlines" the
imports automatically, so that the result is a single file.
Any "import star" line ending with the string "### @import" will be replaced inline with the
code from that file. This works recursively.
Only "kg" library commands and files that are explicitly added (in details.json and/or via
--files/--extra-files) will be inlined. So, if you are importing from a separate file, ensure
that it is in "other_programs" (or "generators", "model_solution", etc.) or in --extra-files.
Only Python files will be processed; it is up to you to ensure that the non-python programs you
write will be compatible with the contest system/judge you are using.
The generated files will be in "kgkompiled/".
Other directives aside from "@import" are available; see the KompGen repo docs for more details.
''')))
compile_p.add_argument('formats', nargs='*',
help='contest formats to compile to (["hr", "pg", "pc2", "dom", "cms"], default ["pg"])')
compile_p.add_argument('-f', '--files', nargs='*',
help='files to compile (only needed if you didn\'t use "kg init")')
compile_p.add_argument('-ef', '--extra-files', nargs='*',
help='extra files imported via "@import" (only needed if you didn\'t use "kg init", '
'otherwise, please use "other_programs")')
compile_p.add_argument('-l', '--loc', default='.', help='location to run commands on')
compile_p.add_argument('-d', '--details', help=argparse.SUPPRESS)
compile_p.add_argument('-S', '--shift-left', action='store_true',
help='compress the program by reducing the indentation size from 4 spaces to 1 tab. '
'Use at your own risk. (4 is hardcoded because it is the indentation level of the '
'"kg" module.)')
compile_p.add_argument('-C', '--compress', action='store_true',
help='compress the program by actually compressing it. Use at your own risk.')
@set_handler(compile_p)
def _kg_compile(format_, args):
if args.main_command == 'compile':
info_print("You spelled 'kompile' incorrectly. I'll let it slide for now.", file=stderr)
kg_compile(
format_,
Details.from_format_loc(format_, args.details),
*(args.formats or ['pg']),
loc=args.loc,
shift_left=args.shift_left,
compress=args.compress,
files=args.files,
extra_files=args.extra_files,
)
def _get_cms_code(details, code_raw):
return details.cms_options.get('name', ''.join(re.split(r'[._-]', code_raw)))
def kg_compile(format_, details, *target_formats, loc='.', shift_left=False, compress=False, python3='python3',
dest_loc=None, files=[], extra_files=[], statement_file=None, global_statement_file=None):
valid_formats = {'hr', 'pg', 'pc2', 'dom', 'cms', 'cms-it'}
if not set(target_formats) <= valid_formats:
raise CommandError(f"Invalid formats: {set(target_formats) - valid_formats}")
if not is_same_format(format_, 'kg'):
raise CommandError(f"You can't use '{format_}' format to 'kompile'.")
# convert files to Programs
files = [Program.from_data(file, relpath=loc) for file in files or []]
extra_files = [Program.from_data(file, relpath=loc) for file in extra_files or []]
@memoize
def get_module(filename):
if filename and os.path.isfile(filename) and filename.endswith('.py'):
module, ext = os.path.splitext(os.path.basename(filename))
assert ext == '.py'
return module
@memoize
@listify
def load_module(module_id):
if module_id not in locations:
raise CommandError(f"Couldn't find module {module_id}! "
f"(Add it to {'other_programs' if problem_code else '--extra-files'}?)")
with open(locations[module_id]) as f:
for line in f:
if not line.endswith('\n'):
warn_print('Warning:', locations[module_id], "doesn't end with a new line.")
yield line.rstrip('\n')
def get_module_id(module, context):
nmodule = module
if nmodule.startswith('.'):
if context['module_id'] in kg_libs:
nmodule = 'kg' + nmodule
if nmodule.startswith('.'):
warn_print(f"Warning: Ignoring relative import for {module}", file=stderr)
nmodule = nmodule.lstrip('.')
return nmodule
# get the statement file
statement_file = (
statement_file or details.statement_compiled or global_statement_file or
details.statement_base or os.path.join(kg_problem_template, 'statement.pdf')
# the last one is a dummy statement file...because some platforms require a statement file
)
# extract problem code
# not really sure if this is the best way to extract the problem code.
# also, probably should be put elsewhere...
if details.relpath:
problem_code = os.path.basename(os.path.abspath(os.path.join(details.relpath, '.')))
elif details.source:
problem_code = os.path.basename(os.path.dirname(os.path.abspath(details.source)))
elif details.title:
problem_code = '-'.join(''.join(c if c.isalnum() else ' ' for c in details.title).lower().split())
else:
problem_code = None # probably a one-off application
# locate all necessary files
# kg libs
locations = {
'kg.formatters': 'formatters.py',
'kg.generators': 'generators.py',
'kg.validators': 'validators.py',
'kg.checkers': 'checkers.py',
'kg.utils': os.path.join('utils', '__init__.py'),
'kg.utils.hr': os.path.join('utils', 'hr.py'),
'kg.utils.utils': os.path.join('utils', 'utils.py'),
'kg.graphs': os.path.join('graphs', '__init__.py'),
'kg.graphs.utils': os.path.join('graphs', 'utils.py'),
'kg.graphs.generators': os.path.join('graphs', 'generators.py'),
'kg.grids': os.path.join('grids', '__init__.py'),
'kg.grids.utils': os.path.join('grids', 'utils.py'),
'kg.grids.generators': os.path.join('grids', 'generators.py'),
'kg.math': os.path.join('math', '__init__.py'),
'kg.math.geom2d': os.path.join('math', 'geom2d.py'),
'kg.math.primes': os.path.join('math', 'primes.py'),
}
locations = {lib: os.path.join(kg_path, path) for lib, path in locations.items()}
kg_libs = set(locations)
checkers = []
# detect checkers (try to be smart)
for file in files:
base, ext = os.path.splitext(os.path.basename(file.rel_filename))
if 'checker' in base and ext == '.py':
checkers.append(file)
if problem_code:
# current files
checkers.append(details.checker)
all_local = [details.validator, details.interactor, details.model_solution] + (
details.generators + details.other_programs + files + extra_files + checkers)
# files that start with 'grader.' (for cms mainly)
graders = [file for file in details.other_programs if os.path.basename(file.filename).startswith('grader.')]
cms_attachments = [os.path.join(loc, attachment) for attachment in details.cms_options.get('attachments', [])]
# files that need to be either translated (kg python codes) or just copied (everything else)
to_compiles = {
'pg': [details.validator, details.interactor] + checkers + details.generators,
'hr': checkers,
'pc2': [details.validator] + checkers,
'dom': [details.validator] + checkers,
'cms': [(checker, "checker") for checker in checkers] + graders,
'cms-it': [(checker, os.path.join("check", "checker")) for checker in checkers]
+ [
(grader, os.path.join("sol", os.path.basename(grader.rel_filename)))
for grader in graders
],
}
else:
all_local = files + extra_files
to_compiles = {}
if not problem_code and not files:
raise CommandError(f"Missing -f/--files. Run 'kg kompile -h' for more details.")
# keep only python files
all_local = [p for p in all_local if p and get_module(p.rel_filename)]
for p in all_local:
locations[get_module(p.rel_filename)] = p.rel_filename
# get subtasks files
subjson = details.subtasks_files
subtasks_files = []
if details.valid_subtasks:
subtasks_files = details.load_subtasks_files()
def subtask_score(sub):
if details.valid_subtasks[sub].score is not None:
return details.valid_subtasks[sub].score
else:
default = 1 # hardcoded for now
warn_print(f'Warning: no score value found for subtask {sub}... using the default {default} point(s)')
subtask_score.missing = True
return default
subtask_score.missing = False
# convert to various formats
for fmt, name, copy_files in [
('pg', 'Polygon', True),
('hr', 'HackerRank', True),
('pc2', 'PC2', False),
('dom', 'DOMjudge', False),
('cms', 'CMS', True),
('cms-it', 'CMS Italian', False),
]:
if fmt not in target_formats: continue
to_compile = files + to_compiles.get(fmt, [])
problem_template = os.path.join(kg_problem_template, fmt)
decor_print()
decor_print('.. '*14)
beginfo_print(f'Compiling for {fmt} ({name})')
dest_folder = dest_loc(loc, fmt) if dest_loc else os.path.join(loc, 'kgkompiled', fmt)
# clear dest_folder (scary...)
info_print('Clearing folder:', dest_folder, '...')
if os.path.isdir(dest_folder): rmtree(dest_folder)
touch_dir(dest_folder)
to_translate = {}
to_copy = {}
for g in to_compile:
target_name = None
if isinstance(g, tuple):
g, target_name = g
if not g: continue
if target_name is None:
target_name = os.path.basename(g.rel_filename)
if os.path.isfile(g.rel_filename):
(to_translate if get_module(g.rel_filename) else to_copy)[g.rel_filename] = target_name
else:
warn_print(f"Warning: {g.rel_filename} (in details.json) is not a file.", file=stderr)
if fmt == 'cms':
for attachment in cms_attachments:
to_copy[attachment] = os.path.join('attachments', os.path.basename(attachment))
if fmt in {'cms-it', 'cms'} and problem_code:
cms_code = _get_cms_code(details, problem_code)
if cms_code != problem_code:
info_print(f"Using the code name {cms_code!r} instead of {problem_code!r}.")
if 'name' not in details.cms_options:
warn_print(f"Warning: Using {cms_code!r} instead of {problem_code!r}. "
"(CMS problem code names should contain only letters and digits)")
targets = {}
found_targets = {}
for filename, target_name in to_translate.items():
module = get_module(filename)
target = os.path.join(dest_folder, target_name)
targets[module] = target
if target in found_targets:
warn_print(f"Warning: Files have the same destination file ({target}): "
f"{found_targets[target]} and {filename}", file=stderr)
found_targets[target] = filename
copy_targets = {}
for filename, target_name in to_copy.items():
target = os.path.join(dest_folder, target_name)
copy_targets[filename] = target
if target in found_targets:
warn_print(f"Warning: Files have the same destination file ({target}): "
f"{found_targets[target]} and {filename}", file=stderr)
found_targets[target] = filename
# copying
for filename in natsorted(to_copy):
target = copy_targets[filename]
info_print(f'[... non-python ...] converting {filename} to {target} (kopying only)', file=stderr)
touch_container(target)
copy_file(filename, target)
# translating
for filename in natsorted(to_translate):
module = get_module(filename)
info_print(f'[{module}] converting {filename} to {targets[module]} (kompiling)')
touch_container(targets[module])
lines = list(compile_lines(load_module(module),
module_id=module,
module_file=filename,
load_module=load_module,
get_module_id=get_module_id,
format=fmt,
details=details,
subtasks_files=subtasks_files,
snippet=False,
subtasks_only=False,
shift_left=shift_left,
compress=compress,
))
with open(targets[module], 'w') as f:
shebanged = False
for line in lines:
assert not line.endswith('\n')
if not shebanged and not line.startswith('#!'):
shebang_line = f"#!/usr/bin/env {python3}"
info_print(f'adding shebang line {shebang_line!r}')
print(shebang_line, file=f)
shebanged = True
print(line, file=f)
# make it executable
make_executable(targets[module])
# TODO for hackerrank, check that the last file for each subtask is unique to that subtask.
if fmt == 'hr' and details.valid_subtasks:
try:
hr_parse_subtasks(details.valid_subtasks, details.load_subtasks_files())
except HRError:
err_print("Warning: HackerRank parsing of subtasks failed.")
raise
# snippets for hackerrank upload
if fmt == 'hr':
for checker in checkers:
if get_module(checker.rel_filename):
# pastable version of grader
filename = checker.rel_filename
module = get_module(filename)
target = os.path.join(dest_folder, 'hr.pastable.version.' + os.path.basename(filename))
info_print(f'[{module}] writing snippet version of {filename} to {target}')
touch_container(target)
lines = list(compile_lines(load_module(module),
module_id=module,
module_file=filename,
load_module=load_module,
get_module_id=get_module_id,
format=fmt,
details=details,
subtasks_files=subtasks_files,
snippet=True,
subtasks_only=False,
shift_left=shift_left,
compress=compress,
))
with open(target, 'w') as f:
print("# NOTE: THIS SCRIPT IS MEANT TO BE PASTED TO HACKERRANK'S CUSTOM CHECKER, NOT RUN ON ITS OWN.",
file=f)
for line in lines:
assert not line.endswith('\n')
print(line, file=f)
target = os.path.join(dest_folder, 'hr.subtasks.only.' + os.path.basename(filename))
info_print(f'[{module}] writing the subtasks snippet of {filename} to {target}')
touch_container(target)
lines = list(compile_lines(load_module(module),
module_id=module,
module_file=filename,
load_module=load_module,
get_module_id=get_module_id,
format=fmt,
details=details,
subtasks_files=subtasks_files,
snippet=True,
subtasks_only=True,
write=False,
))
with open(target, 'w') as f:
print('# NOTE: THIS SCRIPT IS NOT MEANT TO BE RUN ON ITS OWN.', file=f)
for line in lines:
assert not line.endswith('\n')
print(line, file=f)
# convert testscript
if fmt == 'pg' and details.testscript:
filename = details.testscript
target = os.path.join(dest_folder, os.path.basename(filename))
info_print(f'[... non-python ...] converting testscript {filename} to {target}', file=stderr)
touch_container(target)
with open(details.testscript) as scrf:
script = scrf.read()
lines = list(convert_testscript(script, details.generators, relpath=loc))
with open(target, 'w') as f:
for line in lines:
assert not line.endswith('\n')
print(line, file=f)
# copy over the files
if copy_files and problem_code:
info_print('copying test data from', loc, 'to', dest_folder, '...')
# TODO code this better.
if fmt == 'cms':
input_files, output_files = convert_formats(
(format_, loc),
(fmt, dest_folder),
dest_kwargs=dict(subtasks=subtasks_files)
)
else:
input_files, output_files = convert_formats(
(format_, loc),
(fmt, dest_folder),
)
if fmt == 'dom' and problem_code:
# statement file
info_print('creating statement file...')
source_file = statement_file
target_file = os.path.join(dest_folder, 'statement.pdf')
copy_file(source_file, target_file)
# do special things for cms
if fmt == 'cms-it' and problem_code:
# statement file (required)
info_print('creating statement file...')
source_file = statement_file
target_file = os.path.join(dest_folder, 'statement', 'statement.pdf')
copy_file(source_file, target_file)
# test files
# need to replicate files that appear in multiple subtasks
i_os = get_format(argparse.Namespace(format=format_, loc=loc, input=None, output=None), read='io').thru_io()
if details.valid_subtasks:
i_o_reps = [i_os[index]
for sub in details.valid_subtasks
for low, high, subs in subtasks_files
if sub in subs
for index in range(low, high + 1)]
else:
i_o_reps = i_os
copied = 0
info_print("Copying now...")
for (srci, srco), (dsti, dsto) in zip(i_o_reps, CMSItFormat(dest_folder, write='io').thru_expected_io()):
copy_file(srci, dsti)
copy_file(srco, dsto)
copied += 2
succ_print(f"Copied {copied} files (originally {len(i_os)*2})")
# task.yaml
info_print('writing task.yaml')
if details.valid_subtasks:
input_count = sum((high - low + 1) * len(subs) for low, high, subs in subtasks_files)
else:
input_count = len(CMSItFormat(dest_folder, read='i').inputs)
kg_render_template_to(
os.path.join(problem_template, 'task.yaml.j2'),
os.path.join(dest_folder, 'task.yaml'),
problem_code=cms_code,
details=details,
input_count=input_count,
)
# gen/GEN
if details.valid_subtasks:
info_print('writing gen/GEN (subtasks)')
gen_file = os.path.join(dest_folder, 'gen', 'GEN')
touch_container(gen_file)
with open(gen_file, 'w') as f:
total_score = 0
index = 0
for sub in details.valid_subtasks:
score = subtask_score(sub)
total_score += score
print(f"# ST: {score}", file=f)
for low, high, subs in subtasks_files:
if sub in subs:
for it in range(low, high + 1):
index += 1
print(index, file=f)
if index != input_count:
raise CommandError("Count mismatch. This shouldn't happen :( Maybe subtasks_files is not up-to-date?")
if fmt == 'cms' and problem_code:
# copy statement file
info_print('creating statement file...')
source_file = statement_file
target_file = os.path.join(dest_folder, 'statement.pdf')
copy_file(source_file, target_file)
# create config file
config = {
'name': cms_code,
'title': details.title,
'time_limit': details.time_limit,
'task_type': 'Batch', # only Batch and OutputOnly for now.
# For OutputOnly, just override with cms_options.
# TODO support Communication
'checker': 'checker',
'statement': 'statement.pdf',
}
scoring_overall = details.scoring_overall
if details.binary:
if scoring_overall == '!min':
# this is just like a problem with a single subtask
config['score_type'] = 'GroupMin'
config['score_type_parameters'] = [[details.scoring_default_weight, '.*']]
total_score = sum(score for score, *rest in config['score_type_parameters'])
elif scoring_overall == '!sum':
# this is just like a problem with a separate subtask per file
def input_base_regex(input_file):
base, ext = os.path.splitext(os.path.basename(input_file))
if ext != '.in': raise CommandError(f"Expected input file extension '.in', got {ext}")
return re.escape(base)
config['score_type'] = 'GroupMin'
config['score_type_parameters'] = [[
details.scoring_default_weight, input_base_regex(input_file),
] for input_file in input_files
]
total_score = sum(score for score, *rest in config['score_type_parameters'])
elif scoring_overall == '!ave':
# this is just like !sum, but we can hardcode the score_type_parameters to 100/len(tests).
# The docs say 'score_type_parameters' should be an int, but that's a lie.
config['score_type'] = 'Sum'
config['score_type_parameters'] = 100 / len(input_files)
total_score = config['score_type_parameters'] * len(input_files)
else:
raise CommandError(
f"Unsupported scoring policy {scoring_overall} for binary task")
else:
if scoring_overall != '!sum':
warn_print(
f"WARNING: Unsupported scoring policy {scoring_overall} for "
"task with subtasks, defaulting to !sum")
scoring_overall = '!sum'
if scoring_overall == '!sum':
config['score_type'] = 'GroupMin'
config['score_type_parameters'] = [
[subtask_score(sub), rf".+_subs.*_{sub}_.*"]
for sub in details.valid_subtasks
]
total_score = sum(score for score, *rest in config['score_type_parameters'])
else:
raise CommandError(
f"Unsupported scoring policy {scoring_overall} for task with "
"subtasks")
if total_score == 100:
info_print('The total score is', total_score)
else:
warn_print(f'WARNING: The total score is {total_score}, but we want 100')
# override options
config.update(details.cms_options)
# make attachments 'basename'
if 'attachments' in config:
config['attachments'] = [os.path.basename(attachment) for attachment in config['attachments']]
# write config file
config_file = os.path.join(dest_folder, 'kg_cms_task.json')
info_print('writing config file...', config_file)
with open(config_file, 'w') as fl:
json.dump(config, fl, indent=4)
tests_folder = os.path.join(dest_folder, 'tests')
tests_zipname = os.path.join(dest_folder, 'cms_tests.zip')
info_print('making tests zip for CMS...', tests_zipname)
def get_arcname(filename):
assert os.path.samefile(tests_folder, os.path.commonpath([tests_folder, filename]))
return os.path.relpath(filename, start=tests_folder)
with zipfile.ZipFile(tests_zipname, 'w', zipfile.ZIP_DEFLATED) as zipf:
for inp, outp in CMSFormat(dest_folder, read='io').thru_io():
for fl in inp, outp:
zipf.write(fl, arcname=get_arcname(fl))
all_zipname = os.path.join(dest_folder, 'cms_all.zip')
info_print('making whole zip for CMS...', all_zipname)
with zipfile.ZipFile(all_zipname, 'w', zipfile.ZIP_DEFLATED) as zipf:
for fl in ([tests_zipname, config_file] + [
os.path.join(dest_folder, filename)
for filename in ['checker'] + [os.path.basename(grader.filename) for grader in graders]
]):
zipf.write(fl, arcname=os.path.basename(fl))
if fmt == 'pg' and problem_code:
zipname = os.path.join(dest_folder, 'upload_this_to_polygon_but_rarely.zip')
info_print('making zip for Polygon...', zipname)
tests_folder = os.path.join(dest_folder, 'tests')
def get_arcname(filename):
assert os.path.samefile(tests_folder, os.path.commonpath([tests_folder, filename]))
return os.path.relpath(filename, start=tests_folder)
with zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED) as zipf:
for inp in PGFormat(dest_folder, read='i').thru_inputs():
zipf.write(inp, arcname=get_arcname(inp))
if fmt == 'hr' and problem_code:
zipname = os.path.join(dest_folder, 'upload_this_to_hackerrank.zip')
info_print('making zip for HackerRank...', zipname)
def get_arcname(filename):
assert os.path.samefile(dest_folder, os.path.commonpath([dest_folder, filename]))
return os.path.relpath(filename, start=dest_folder)
with zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED) as zipf:
for inp, outp in HRFormat(dest_folder, read='io').thru_io():
for fl in inp, outp:
zipf.write(fl, arcname=get_arcname(fl))
succ_print(f'Done compiling problem "{problem_code}"" for {fmt} ({name})')
decor_print('.. '*14)
if subtask_score.missing:
warn_print('Warning: some subtask scores missing. You may want to turn "valid_subtasks" into a list that '
'looks like [{"id": 1, "score": 20}, {"id": 2, "score": 30}] ...')
if 'cms-it' in target_formats and details.valid_subtasks and total_score != 100:
err_print(f'ERROR: The total score is {total_score} but the Italian format requires a total score of 100.')
raise CommandError(f'The total score is {total_score} but the Italian format requires a total score of 100.')
##########################################
# compile a contest from a configuration file
contest_p = subparsers.add_parser('kontest',
aliases=['contest'],
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Compile a contest from a description JSON file',
description=cformat_text(dedent('''\
Compile a contest from a description JSON file.
This command is intended for contests whose problems are created using "kg init". In that case,
it will parse the relevant information from the corresponding details.json files.
$ [*[kg contest [format] [config_file]]*]
Here, [format] is the contest format, and [config_file] is a path to a json file containing the
contest metadata.
An example [config_file] can be seen in examples/contest.json.
In the case of pc2, this generates a folder in "kgkompiled/" containing the files relevant for the
contest (including seating arrangements [optional] and passwords) and which can be read by the PC^2
system. Loading it via PC^2 will automatically set up the whole contest. (Painless!)
This assumes that "kg make all" has been run for every problem. If you wish to run those
automatically as well, use
$ [*[kg contest [format] [config_file] --make-all]*]
Important note about the "--target-loc [target_loc]" option: The [target_loc] must be an absolute
path pointing to a folder and denotes the location where the contest folder going to be in the
contest system. The output of "kg contest" will still be generated in "kgkompiled/", but the output
itself will be configured as if it will be placed in [target_loc] when it is used. This is useful
since PC^2 requires absolute paths in its configuration.
See the KompGen repo docs for more details.
''')))
contest_p.add_argument('format', help='Contest format to compile to ("pc2", "dom", etc.)')
contest_p.add_argument('config', help='JSON file containing the contest configuration')
contest_p.add_argument('-m', '--make-all', action='store_true', help='Run "kg make all" in all problems')
contest_p.add_argument('-ns', '--no-seating', action='store_true', help='Skip the creation of the seating arrangement')
contest_p.add_argument('-t', '--target-loc', help='Specify the final location of the contest folder in the contest system')
contest_p.add_argument('-s', '--seed', type=int, help='Initial seed to use')
def problem_letters():
for l in count(1):
for c in ascii_uppercase:
yield l * c
@set_handler(contest_p)
def kg_contest(format_, args):
if args.main_command == 'contest':
info_print("You spelled 'kontest' incorrectly. I'll let it slide for now.", file=stderr)
if not is_same_format(format_, 'kg'):
raise CommandError(f"You can't use '{format_}' format to 'kontest'.")
valid_formats = {'pc2', 'cms', 'cms-it', 'dom'}
if args.format not in valid_formats:
raise CommandError(f"Unsupported contest format: {args.format}")
contest = ContestDetails.from_loc(args.config)
target_loc = args.target_loc or contest.target_loc or os.path.abspath('kgkompiled')
if not os.path.isabs(target_loc):
raise CommandError(f"--target-loc must be an absolute path: got {target_loc!r}")
info_print(f"Using target_loc = {target_loc!r}", file=stderr)
seedval = args.seed
if seedval is None: seedval = contest.seed
if seedval is None: seedval = randrange(10**18)
info_print(f"Using seedval = {seedval!r}", file=stderr)
rand = Random(seedval)
contest_folder = os.path.join('kgkompiled', contest.code)
# clear contest_folder (scary...)
info_print('Clearing folder:', contest_folder, '...')
if os.path.isdir(contest_folder): rmtree(contest_folder)
touch_dir(contest_folder)
decor_print()
decor_print('-'*42)
beginfo_print('Making passwords')
passwords, accounts = write_passwords_format(contest, args.format, seedval=seedval, dest=contest_folder)
succ_print('Done passwords')
contest_template = os.path.join(kg_contest_template, args.format)
if args.format == 'cms-it' or args.format == 'cms':
# identify key folders
contest_data_folder = os.path.join(contest_folder, 'contest')
# construct template environment
env = {
"datetime_created": datetime.now(),
"contest": contest,
"passwords": passwords,
}
# problem envs
found_codes = {}
codes = []
problem_details = []
for letter, problem_loc in zip(problem_letters(), contest.rel_problems):
details = Details.from_format_loc(format_, os.path.join(problem_loc, 'details.json'), relpath=problem_loc)
code_raw = os.path.basename(problem_loc)
code = _get_cms_code(details, code_raw)
if code in found_codes:
found_codes[code] += 1
code += str(found_codes[code])
else:
found_codes[code] = 1
codes.append(code)
problem_details.append(details)
decor_print()
decor_print('-'*42)
print(beginfo_text("Getting problem"), key_text(repr(code_raw)), beginfo_text(f"(from {problem_loc})"))
if code != code_raw:
info_print(f"Using the code name {code!r} instead of {code_raw!r}.")
if 'name' not in details.cms_options:
warn_print(f"Warning: Using {code!r} instead of {code_raw!r}. "
"(CMS problem code names should contain only letters and digits)")
if args.make_all:
info_print('Running "kg make all"...')
kg_make(['all'], problem_loc, format_, details)
info_print('Running "kg kompile"...')
def dest_loc(loc, fmt):
return os.path.join(contest_data_folder, code)
kg_compile(format_, details, args.format,
loc=problem_loc,
dest_loc=dest_loc,
global_statement_file=contest.rel_global_statements,
python3=contest.python3_command)
# cms-it specific stuff
if args.format == 'cms-it':
decor_print()
decor_print('-'*42)
beginfo_print('Writing contest config files')
info_print(f'Writing contest.yaml')
source = os.path.join(contest_template, 'contest.yaml.j2')
target = os.path.join(contest_data_folder, 'contest.yaml')
kg_render_template_to(source, target, **env)
if args.format == 'cms':
decor_print()
decor_print('-'*42)
beginfo_print('Writing contest config files')
# write config
config_file = os.path.join(contest_data_folder, 'kg_cms_contest.json')
warn_print(
"Note: For CMS, we're ignoring compilation and run options of languages. "
"We're only taking the names.")
config = {
"description": contest.title,
"name": contest.code,
"problems": codes,
"start": contest.start_time.timestamp(),
"stop": contest.end_time.timestamp(),
"duration": contest.duration.total_seconds(),
"timezone": contest.display_timezone,
"languages": [lang['lang'] for lang in contest.langs],
# compute score precision based on the individual problems' score precision
"score_precision": max(problem.cms_options.get('score_precision', 0) for problem in problem_details),
}
info_print('writing config file...', config_file)
with open(config_file, 'w') as fl:
json.dump(config, fl, indent=4)
# write users
users_file = os.path.join(contest_data_folder, 'kg_cms_users.json')
touch_container(users_file)
users = [{
"first_name": account.first_name,
"last_name": account.last_name,
"display_name": account.display_name,
"username": account.username,
"type": account.type,
"password": account.password,
} for account in accounts]
if contest.display_timezone:
for user in users:
user['timezone'] = contest.display_timezone
info_print('writing users file...', users_file)
with open(users_file, 'w') as fl:
json.dump(users, fl, indent=4)
if args.format == 'pc2' or args.format == 'dom':
# identify key folders
# folder in the judge computers where the files will eventually go in (needed for PC2)
target_folder = os.path.join(target_loc, contest.code)
if args.format == 'pc2':
problems_folder = os.path.join(contest_folder, 'CDP', 'config')
ext_data = os.path.join(contest_folder, 'ALLDATA')
target_problems_folder = os.path.join(target_folder, 'CDP', 'config')
target_ext_data = os.path.join(target_folder, 'ALLDATA')
else:
problems_folder = os.path.join(contest_folder, 'PROBLEMS')
target_problems_folder = os.path.join(target_folder, 'PROBLEMS')
target_ext_data = None
# construct template environment
if args.format == 'pc2' and not contest.site_password: raise CommandError(f"site_password required for {args.format}")
# TODO pass 'contest' instead of all these
env = {
"datetime_created": datetime.now(),
"contest": contest,
"filename": "{:mainfile}",
"filename_base": "{:basename}",
"alldata": target_ext_data,
"problems": [],
}
# load colors
def css_colors():
with open(os.path.join(kg_data_path, 'css_colors.txt')) as file:
css_colors = [line.strip() for line in file]
rand.shuffle(css_colors)
while True: yield from css_colors
css_colors = css_colors()
# problem envs
found_codes = {}
letters = []
for letter, problem_loc in zip(problem_letters(), contest.rel_problems):
details = Details.from_format_loc(format_, os.path.join(problem_loc, 'details.json'), relpath=problem_loc)
problem_code_raw = os.path.basename(problem_loc)
problem_code = ''.join(problem_code_raw.split('._-')) # TODO check if this is necessary.
if problem_code in found_codes:
found_codes[problem_code] += 1
problem_code += str(found_codes[problem_code])
else:
found_codes[problem_code] = 1
decor_print()
decor_print('-'*42)
print(beginfo_text("Getting problem"), key_text(repr(problem_code)), beginfo_text(f"(from {problem_loc})"))
if details.valid_subtasks:
warn_print(f"Warning: The problem has subtasks, but '{args.format}' contests only support binary tasks. "
"Ignoring subtasks.")
if args.make_all:
info_print('Running "kg make all"...')
kg_make(['all'], problem_loc, format_, details)
time_limit = int(round(details.time_limit))
if time_limit != details.time_limit:
raise TypeError(f"The time limit must be an integer for {args.format}: {problem_loc} {time_limit}")
letters.append(letter)
problem = {
'problem_loc': problem_loc,
'details': details,
'letter': letter,
'problem_code_raw': problem_code_raw,
'problem_code': problem_code,
'time_limit': time_limit,
'color': next(css_colors),
}
env['problems'].append(problem)
# TODO actually organize the code better so we don't have lots of variables in the same scope...
del letter, details, problem_loc, problem, time_limit, problem_code_raw, problem_code
def yaml_lang(lang):
# TODO fix this?
lenv = {key: (
value.format(**env) if isinstance(value, str) else
str(value).lower() if isinstance(value, bool) else value) for key, value in lang.items()}
run = lenv['run'] + ' '
spacei = run.index(' ')
lenv.update({
"run_first": run[:spacei],
"run_rest": run[spacei+1:-1]
})
return lenv
env['langs'] = [yaml_lang(lang) for lang in contest.langs]
if args.format == 'pc2':
decor_print()
decor_print('-'*42)
beginfo_print('Writing contest config files')
for file in ['contest.yaml', 'problemset.yaml']:
info_print('Writing', file)
source = os.path.join(contest_template, file + '.j2')
target = os.path.join(problems_folder, file)
kg_render_template_to(source, target, **env)
decor_print()
decor_print('-'*42)
beginfo_print('Writing problem files')
for problem in env['problems']:
letter = problem['letter']
problem_code = problem['problem_code']
details = problem['details']
problem_loc = problem['problem_loc']
kg_compile(format_, details, args.format, loc=problem_loc, python3=contest.python3_command)
# put validator in input_validators/, and checker to output_validators/
for name, targ in [
('validator', 'input_validators'),
('checker', 'output_validators'),
]:
src = getattr(details, name)
# TODO handle the case where src is not Python.
# We need to compile it and "pass the compiled file" somehow.
srcf = os.path.join(problem_loc, 'kgkompiled', args.format, os.path.basename(src.filename))
rel_targf = os.path.join(problem_code, targ, os.path.basename(src.filename))
targf = os.path.join(problems_folder, rel_targf)
info_print('Copying', srcf, 'to', targf)
copy_file(srcf, targf)
make_executable(targf)
problem[name] = os.path.join(target_problems_folder, rel_targf)
# write config files
# TODO fix problem statement integration
problem_files = ['problem.yaml']
if args.format == 'dom':
problem_files.append('domjudge-problem.ini')
else:
problem_files.append(os.path.join('problem_statement', 'problem.tex'))
for file in problem_files:
info_print('Writing', file, 'for', problem_code)
source = os.path.join(contest_template, os.path.basename(file) + '.j2')
target = os.path.join(problems_folder, problem_code, file)
kg_render_template_to(source, target, **{**env, **problem})
# copy statement. find one with compatible ending
statements = [s for s in [details.statement_compiled, contest.rel_global_statements, details.statement_base] if s]
if args.format == 'dom' and statements:
for source in statements:
base, ext = os.path.splitext(source)
if ext in {'.pdf', '.html', '.txt'}: break
else:
source = statements[0] # just take the first one
base, ext = os.path.splitext(source)
target = os.path.join(problems_folder, problem_code, 'problem_statement', 'statement' + ext)
copy_file(source, target)
target = os.path.join(problems_folder, problem_code, 'problem' + ext)
copy_file(source, target)
if args.format == 'dom':
for name, targ in [
('validator', 'input_validators'),
('checker', 'output_validators'),
]:
# TODO assumes python. careful: abs path issues
# TODO make this better
# I think this part is easier to adjust to handle non-python code
# needs to use src.compile and src.run and should use a 'build' file instead of just 'run'
# (but again be careful of abs path issues)
info_print('Creating run file for', name)
source_f = os.path.join(contest_template, 'run.j2')
target_f = os.path.join(problems_folder, problem_code, targ, 'run')
kg_render_template_to(source_f, target_f, **{**env, **problem})
make_executable(target_f)
dest = os.path.join(contest_folder, 'UPLOADS', 'UPLOAD_1ST_executables', f'{name}_{problem_code}')
info_print('Zipping', name, 'to', f'{dest}.zip')
make_archive(dest, 'zip', os.path.join(problems_folder, problem_code, targ))
# copy model solution
info_print("Copying model solution")
source = problem['details'].model_solution.rel_filename
target = os.path.join(problems_folder, problem_code, 'submissions', 'accepted', os.path.basename(source))
copy_file(source, target)
# copy test data
info_print(f"Copying data for {problem_code}...")
try:
src_format = KGFormat(problem['problem_loc'], read='io')
except FormatError as exc:
raise CommandError(f"No tests found for '{problem['problem_loc']}'. Please run 'kg make all' "
"to generate the files, or call 'kg kontest' with the '-m' option.") from exc
data_locs = [
os.path.join(problems_folder, problem_code, 'data', 'secret'),
]
if args.format == 'pc2':
data_locs.append(os.path.join(ext_data, problem_code))
for data_loc in data_locs:
info_print("Copying to", data_loc)
dest_format = KGFormat(write='io', tests_folder=data_loc)
copied = 0
for (srci, srco), (dsti, dsto) in zip(src_format.thru_io(), dest_format.thru_expected_io()):
copy_file(srci, dsti)
copy_file(srco, dsto)
copied += 2
succ_print("Copied", copied, "files")
if args.format == 'dom':
# zip the whole problem folder (for upload)
dest = os.path.join(contest_folder, 'UPLOADS', 'UPLOAD_2ND_problems', letter)
info_print('Zipping the whole thing...')
info_print('target is', dest + '.zip')
make_archive(dest, 'zip', os.path.join(problems_folder, problem_code))
info_print('Done.')
if not args.no_seating and contest.seating:
decor_print()
decor_print('-'*42)
beginfo_print('Writing seating arrangement')
write_seating(contest, seedval=seedval, dest=contest_folder)
if args.format == 'dom':
warn_print("Note: There seems to be no way to import contest configuration to DOMjudge, ")
warn_print("so you'll have to do that manually.")
decor_print()
decor_print('-'*42)
succ_print('See docs/CONTEST.md for the next steps to finish preparing the contest.')
##########################################
# manage seating arrangements
seating_args(subparsers)
##########################################
# Generate passwords
passwords_p = subparsers.add_parser('passwords',
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Assign passwords to a list of teams',
description=cformat_text(dedent('''\
Assign passwords to a list of teams.
$ [*[kg contest [teams_file]]*]
Here, [teams_file] is a path to a json file containing the team information. Outputs the contest
data in kgkompiled/.
It can simply contain a JSON list of strings denoting team names, like:
[*[[
"Quiwarriors 1",
"Quiwarriors 2",
"Fuchsia Moth",
"Re:Programmers"
]]*]
They can also be grouped by school; see examples/teams.json for an example. The printable html
files generated in kgkompiled/ will include the school name in the output.
''')))
passwords_p.add_argument('teams', help='JSON file containing the team and school details')
passwords_p.add_argument('-s', '--seed', type=int, help='Initial seed to use')
passwords_p.add_argument('-c', '--code', '--contest-code', help='Contest code')
passwords_p.add_argument('-t', '--title', '--contest-title', help='Contest title')
passwords_p.add_argument('-a', '--account-format', default='team{idx}', help='Account name format')
@set_handler(passwords_p)
def kg_passwords(format_, args):
with open(args.teams) as f: team_schools = ContestDetails.get_team_schools(json.load(f))
team_names = [team for ts in team_schools for team in ts['teams']]
passwords, seed = create_passwords(team_names, seedval=args.seed)
def get_team_schools():
for ts in team_schools:
for idx, team in enumerate(ts['teams'], 1):
yield ts, team, idx
def get_accounts():
for idx, (school, team_name, school_idx) in enumerate(get_team_schools(), 1):
account_name = args.account_format.format(
idx=idx,
school_idx=school_idx,
school_name=school['school'],
team_name=team_name,
first1=team_name.split()[0][0],
first=team_name.split()[0],
last1=team_name.split()[-1][0],
last=team_name.split()[-1],
)
yield Account(
username=account_name,
display_name=team_name,
password=passwords[team_name],
type='team',
index=idx,
type_index=idx,
school=school['school'],
school_short=school.get('school_short'),
country_code=school.get('country_code'),
)
beginfo_print(f'Writing passwords for {len(team_names)} teams')
write_passwords(list(get_accounts()), dest='kgkompiled',
seedval=' or '.join({str(x) for x in [args.seed, seed] if x is not None}),
code=args.code, title=args.title)
succ_print(f'Passwords done')
##########################################
autocomplete(parser)
def main(format='kg'):
args = parser.parse_args()
if args.krazy: set_krazy(True)
logf = stderr
try:
logf = args.default_file
decor_print('\n' + '='*42 + '\n', file=logf)
args.handler(format, args)
decor_print('\n' + '='*42 + '\n', file=logf)
succ_print('THE COMMAND FINISHED SUCCESSFULLY.', file=logf)
except Exception:
err_print('THE COMMAND FAILED.', file=logf)
raise
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class CreateSubUserInfo(object):
def __init__(self, name, password, phone, email, description=None, createAk=None, needResetPassword=None, consoleLogin=None, autoGeneratePassword=None):
"""
:param name: 子用户名,支持4~20位的字母,数字以及-和_,以字母开头
:param description: (Optional) 描述,0~256个字符
:param password: 按照密码策略设置,默认8~20位,至少包含一个小写字母、大写字母和数字
:param phone: 手机号码,区号-手机号
:param email: 邮箱
:param createAk: (Optional) 是否创建accessKey,默认false
:param needResetPassword: (Optional) 子用户首次登录是否需要重置密码,默认false
:param consoleLogin: (Optional) 子用户是否支持控制台登录,默认true
:param autoGeneratePassword: (Optional) 是否自动生成密码,默认false
"""
self.name = name
self.description = description
self.password = password
self.phone = phone
self.email = email
self.createAk = createAk
self.needResetPassword = needResetPassword
self.consoleLogin = consoleLogin
self.autoGeneratePassword = autoGeneratePassword
|
#!/usr/bin/python3
import itertools
T=int(input())
for t in range(T):
n=int(input())
if(n<2):
print(0)
continue
a=int(input())
b=int(input())
L=[(0, 0)]
for i in range(1, n):
S=[]
for p in itertools.product(L, [a, b]):
# print(p)
S.append((p[0][0]+p[0][1], p[1]))
L=set(S)
# print(L)
print(*sorted(set([p[0]+p[1] for p in L])))
|
# Pongo by @blogmywiki / Giles Booth
# player B code
import radio
from microbit import *
from music import play, POWER_UP, JUMP_DOWN, NYAN, FUNERAL
a_bat = 2 # starting position of player A bat
b_bat = 2 # starting position of player B bat
bat_map = {0: 4, 1: 3, 2: 2, 3: 1, 4: 0}
ball_x = 2 # starting position of ball
ball_y = 2
a_points = 0
b_points = 0
winning_score = 5
game_over = False
radio.on() # like the roadrunner
def parse_message():
global a_bat, incoming, bat_map, ball_x, ball_y, a_points, b_points
msg_type = incoming[:1] # find out what kind of message we have received
msg = incoming[1:] # strip initial letter from message
if msg_type == 'p':
display.set_pixel(a_bat, 0, 0)
their_bat = int(msg) # mirror their bat position
a_bat = bat_map[their_bat]
if msg_type == 'x':
display.set_pixel(ball_x, ball_y, 0)
ball_x = bat_map[int(msg)]
if msg_type == 'y':
display.set_pixel(ball_x, ball_y, 0)
ball_y = bat_map[int(msg)]
if msg_type == 'a':
a_points = int(msg)
play(JUMP_DOWN, wait=False)
if msg_type == 'b':
b_points = int(msg)
play(POWER_UP, wait=False)
while not game_over:
display.set_pixel(b_bat, 4, 6)
display.set_pixel(a_bat, 0, 6)
display.set_pixel(ball_x, ball_y, 9) # draw ball
if button_a.was_pressed():
display.set_pixel(b_bat, 4, 0)
b_bat = b_bat - 1
if b_bat < 0:
b_bat = 0
radio.send(str(b_bat))
if button_b.was_pressed():
display.set_pixel(b_bat, 4, 0)
b_bat = b_bat + 1
if b_bat > 4:
b_bat = 4
radio.send(str(b_bat))
incoming = radio.receive()
if incoming:
parse_message()
if a_points == winning_score or b_points == winning_score:
game_over = True
if a_points < b_points:
play(NYAN, wait=False)
display.scroll('B wins!')
else:
play(FUNERAL, wait=False)
display.scroll('A wins!')
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class NatureCNN(nn.Module):
def __init__(self, params):
super(NatureCNN, self).__init__()
n_c = params['num_in_channels']
self.cnn_head = nn.Sequential(
nn.Conv2d(n_c, 32, 8, 4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2),
nn.ReLU(True),
nn.Conv2d(64, 32, 3, 1),
nn.ReLU(True)
)
self.fc = nn.Linear(7*7*32, 512)
def forward(self, x):
x = self.cnn_head(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc(x))
return x
|
#!/usr/bin/python3
import tkinter
from tkinter import *
def select():
selection = "Awesome option " + str(var.get())
label.config(text = selection)
window = Tk()
var = IntVar()
radio1 = Radiobutton(window, text = "Option 1", variable = var, value = 1, command = select)
radio1.pack( anchor = W )
radio2 = Radiobutton(window, text = "Option 2", variable = var, value = 2, command = select)
radio2.pack( anchor = W )
radio3 = Radiobutton(window, text = "Option 3", variable = var, value = 3, command = select)
radio3.pack( anchor = W)
radio4 = Radiobutton(window, text = "Option 4", variable = var, value = 4, command = select)
radio4.pack( anchor = W)
label = Label(window)
label.pack()
window.mainloop()
|
import pysam
import os
contig_bins = {}
outlength = {}
samfile = pysam.AlignmentFile(snakemake.input.long_bam, 'rb')
gotten_contigs = set()
maxbin = 0
outreads = {}
outbases = {}
outreads500 = {}
outbases500 = {}
for bins in os.listdir(snakemake.input.metabat_done[:-4]):
if not bins.startswith(
'binned_contigs') or bins == "binned_contigs.unbinned" or bins == "binned_contigs.lowDepth" or bins == "binned_contigs.tooShort":
continue
bin = bins.split('.')[1]
if int(bin) > maxbin:
maxbin = int(bin)
outlength[bin] = 0
outreads[bin] = set()
outbases[bin] = 0
outreads500[bin] = set()
outbases500[bin] = 0
with open(os.path.join(snakemake.input.metabat_done[:-4], bins)) as f:
for line in f:
contig_bins[line.rstrip()] = bin
gotten_contigs.add(line.rstrip())
outlength[bin] += samfile.get_reference_length(line.rstrip())
cutoff = 0.05
for read in samfile.fetch(until_eof=True):
start = True
if not read.cigartuples is None:
clipped_start = 0
clipped_end = 0
for i in read.cigartuples:
if i[0] == 4 or i[0] == 5:
if start:
clipped_start += i[1]
else:
clipped_end += i[1]
else:
start = False
if read.reference_name in contig_bins:
bin = contig_bins[read.reference_name]
length = read.infer_read_length()
if (clipped_start / length <= cutoff and clipped_end / length <= cutoff) or \
(clipped_start / length <= cutoff and read.reference_end > read.reference_length - 100) or \
(clipped_end / length <= cutoff and read.reference_start < 100) or \
(read.reference_start < 100 and read.reference_end > read.reference_length - 100):
outreads[bin].add(read.query_name)
outbases[bin] += length
try:
os.makedirs("data/binned_reads")
except FileExistsError:
pass
out_dict = {}
for i in outreads:
with open("data/binned_reads/r" + i + '.long.list', 'w') as read_list:
reads = outreads[i]
bases = outbases[i]
for j in reads:
read_list.write(j + '\n')
out_dict[i] = [i, "data/binned_reads/r" + i + '.long.list', str(outlength[i]), str(bases)]
samfile = pysam.AlignmentFile(snakemake.input.short_bam, 'rb')
outreads = {}
outbases = {}
for read in samfile.fetch(until_eof=True):
start = True
if read.is_proper_pair:
if read.reference_name in contig_bins:
bin = contig_bins[read.reference_name]
length = read.infer_read_length()
if not bin in outreads:
outreads[bin] = set()
outbases[bin] = 0
outreads[bin].add(read.query_name)
outbases[bin] += length
for i in outreads:
with open("data/binned_reads/r" + i + '.short.list', 'w') as read_list:
for j in outreads[i]:
read_list.write(j + '\n')
out_dict[i] += ["data/binned_reads/r" + i + '.short.list', str(outbases[i])]
with open(snakemake.output.list, 'w') as o:
for i in sorted(list(out_dict), key=int):
o.write('\t'.join(out_dict[i]) + '\n')
|
from ..libs import Gtk
from ..window import GtkViewport
from .base import Widget
class OptionContainer(Widget):
def create(self):
# We want a single unified widget; the vbox is the representation of that widget.
self.native = Gtk.Notebook()
self.native.interface = self.interface
self.native.connect("switch-page", self.gtk_on_switch_page)
def gtk_on_switch_page(self, widget, page, page_num):
if self.interface.on_select:
self.interface.on_select(
self.interface,
option=self.interface.content[page_num]
)
def add_content(self, label, widget):
widget.viewport = GtkViewport(widget.native)
# Add all children to the content widget.
for child in widget.interface.children:
child._impl.container = widget
self.native.append_page(widget.native, Gtk.Label(label=label))
# Tabs aren't visible by default;
# tell the notebook to show all content.
self.native.show_all()
def set_on_select(self, handler):
# No special handling required
pass
def remove_content(self, index):
if index == self.native.get_current_page():
# Don't allow removal of a selected tab
raise self.interface.OptionException(
'Currently selected option cannot be removed'
)
self.native.remove_page(index)
def set_option_enabled(self, index, enabled):
self.interface.factory.not_implemented('OptionContainer.set_option_enabled()')
def is_option_enabled(self, index):
self.interface.factory.not_implemented('OptionContainer.is_option_enabled()')
def set_option_label(self, index, value):
tab = self.native.get_nth_page(index)
self.native.set_tab_label(tab, Gtk.Label(label=value))
def get_option_label(self, index):
tab = self.native.get_nth_page(index)
return self.native.get_tab_label(tab).get_label()
def get_current_tab_index(self):
return self.native.get_current_page()
def set_current_tab_index(self, current_tab_index):
self.native.set_current_page(current_tab_index)
|
# -*- coding: utf-8 -*-
import pytest
from sourcelocation import FileLine
def test_equals():
x = FileLine('foo.c', 1)
y = FileLine('foo.c', 2)
assert x != y
assert x == x
assert y == y
a = FileLine('bar.c', 1)
b = FileLine('foo.c', 1)
assert a != b
|
import unittest
from calc import parse, evaluate, Stack, postfix_eval, ParseError, EvaluationError
# testcase = namedtuple("testcase", ["expression", "representation", "sum"])
testcases = [
# basic testing
("5+4", ['5', '4', '+'], 9),
("1-5", ['1', '5', '-'], -4),
("(4/2)", ['4', '2', '/'], 2),
("4*2", ['4', '2', '*'], 8),
("3+4+5+8", ['3', '4', '5', '8', '+', '+', '+'], 20),
("(4 + 8) / 4)", ['4', '8', '+', '4', '/'], 3),
("((4+8)/4)^2", ['4', '8', '+', '4', '/', '2', '^'], 9),
('((4+8)/4)^2^3', ['4', '8', '+', '4', '/', '2', '3', '^', '^'], 6561),
("314*2", ['314', '2', '*'], 628),
("0+5", ['0', '5', '+'], 5),
("2/3", ['2', '3', '/'], 0),
("2^(1/3)", ['2', '1', '3', '/', '^'], 1),
("4^(1/2)", ['4', '1', '2', '/', '^'], 1),
("(2-2)^4", ['2', '2', '-', '4', '^'], 0),
# mentioned testing
("5+5+32", ['5', '5', '32', '+', '+'], 42),
("3 * 4", ['3', '4', '*'], 12),
# edgecase testing
("0001 + 1002", ['0001', '1002', '+'], 1003),
("0/5", ['0', '5', '/'], 0),
("0*5", ['0', '5', '*'], 0)
]
class TestParse(unittest.TestCase):
"""tests the calc.parse method"""
test_raise_data = [
("c4lculat0r", ParseError),
("(|)", ParseError),
]
def test_func(self):
for test in testcases:
# cleanup Stackinput
parsed = list(i for i in parse(test[0]).dump() if i not in "()")
if parsed:
self.assertEqual(parsed, test[1])
def test_raise(self):
for test in self.test_raise_data:
with self.assertRaises(test[1]):
parse(test[0])
class TestEvaluate(unittest.TestCase):
"""tests the evaluate method"""
def test_func(self):
for test in testcases:
init_stack = Stack(test[1])
self.assertEqual(postfix_eval(init_stack), test[2])
if __name__ == '__main__':
unittest.main()
|
import pygame as pygame
import math
import colors # custom file that contains constants for colors
import helpers # custom helper functions
from algorithms import a_star, dijkstra, generate_walls, draw_borders
from button import Button
from tkinter import messagebox, Tk
WIDTH = 800
# set up all buttons with my custom class
a_star_button = Button(WIDTH * 0.05, WIDTH + 10,
colors.TEAL, "A* Algorithm", "A*")
dijkstra_button = Button(WIDTH * 0.2, WIDTH + 10,
colors.TEAL, "Dijkstra's Algorithm", "Dijkstra")
clear_board_button = Button(WIDTH * 0.85, WIDTH + 10,
colors.TEAL, "Clear board", "Clear")
generate_walls_button = Button(WIDTH * 0.45, WIDTH + 10,
colors.TEAL, "Generate walls", "Walls")
clear_visited_button = Button(WIDTH * 0.70, WIDTH + 10,
colors.TEAL, "Clear visited", "Visited")
buttons = []
# append the buttons to a list which is used later to check for clicks
buttons.append(a_star_button)
buttons.append(dijkstra_button)
buttons.append(clear_board_button)
buttons.append(generate_walls_button)
buttons.append(clear_visited_button)
def main(display, width, rows):
grid = helpers.make_grid(rows, WIDTH)
start_pos = None
end_pos = None
running = True
already_executed = False
generated_walls = False
# game loop
while running:
helpers.draw(display, grid, rows, width, buttons)
# event loop
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
running = False
# process mouse input
if pygame.mouse.get_pressed()[0]: # left mouseclick
# if click is in the bottom of the game screen (buttons)
if pos[1] > WIDTH:
# check each button for a click (needs refactoring)
for button in buttons:
if button.is_clicked(pos):
print("Clicked the button " + button.label)
if button.label == "A*":
if start_pos and end_pos and already_executed is False:
for row in grid:
for cell in row:
cell.update_neighbors(grid)
a_star(lambda: helpers.draw(display, grid,
rows, width), grid, start_pos, end_pos)
already_executed = True
elif already_executed is True:
Tk().wm_withdraw()
messagebox.showwarning(
"Invalid state", "You must clear the board first")
else:
Tk().wm_withdraw()
messagebox.showwarning(
"Invalid state", "You must set start and end positions")
if button.label == "Clear":
start_pos = None
end_pos = None
already_executed = False
generated_walls = False
grid = helpers.make_grid(rows, WIDTH)
if button.label == "Visited":
for row in grid:
for cell in row:
if cell.is_visited() or cell.is_available() or cell.is_path():
cell.reset()
already_executed = False
if button.label == "Dijkstra":
if start_pos and end_pos and already_executed is False:
for row in grid:
for cell in row:
cell.update_neighbors(grid)
# dijkstra(lambda: helpers.draw(display, grid,
# rows, width), grid, start_pos, end_pos)
dijkstra(lambda: helpers.draw(display, grid,
rows, width), grid, start_pos, end_pos)
already_executed = True
elif already_executed is True:
Tk().wm_withdraw()
messagebox.showwarning(
"Invalid state", "You must clear the board first")
else:
Tk().wm_withdraw()
messagebox.showwarning(
"Invalid state", "You must set start and end positions")
if button.label == "Walls" and generated_walls is False:
draw_borders(lambda: helpers.draw(display, grid,
rows, width), grid, start_pos, end_pos)
generate_walls(lambda: helpers.draw(display, grid,
rows, width), grid, start_pos, end_pos)
generated_walls = True
button.clicked = False
else:
row, col = helpers.get_mouse_pos(pos, rows, WIDTH)
cell = grid[row][col]
if not start_pos and cell != end_pos:
start_pos = cell
start_pos.set_start()
elif not end_pos and cell != start_pos:
end_pos = cell
end_pos.set_end()
elif cell != end_pos and cell != start_pos:
cell.set_barrier()
elif pygame.mouse.get_pressed()[2]: # right mouseclick
row, col = helpers.get_mouse_pos(pos, rows, WIDTH)
cell = grid[row][col]
cell.reset()
if cell == start_pos:
start_pos = None
if cell == end_pos:
end_pos = None
pygame.quit()
if __name__ == "__main__":
rows = 25
WINDOW = pygame.display.set_mode((WIDTH, WIDTH + 50))
pygame.display.set_caption("A* Pathfinding Visualization")
main(WINDOW, WIDTH, rows)
|
# ##### BEGIN MIT LICENSE BLOCK #####
#
# Copyright (c) 2020 Lukas Toenne
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ##### END MIT LICENSE BLOCK #####
# <pep8 compliant>
import bpy
import bmesh
import math
from mathutils import Matrix, Vector
from sys import float_info, int_info
import collections
from . import props
"""
Input point for the triangulator.
"""
class InputPoint:
"""
Unique ID for the point.
"""
id = 0
"""
Index of the point.
"""
index = 0
"""
Location of the point in object space.
"""
co = Vector((0, 0, 0))
"""
One of:
- ORIGINAL: Main vertex from the original point set
- REPEAT_POS: Point copy shifted in positive direction
- REPEAT_NEG: Point copy shifted in negative direction
Only edges between original points and original and positive points
will be kept, all other are removed.
"""
type = 'ORIGINAL'
def __init__(self, id, co, type):
self.id = id
self.co = co
self.type = type
degenerate_epsilon = 1.0e-6
"""
True if vertices are colinear or coinciding within the epsilon value.
"""
def is_degenerate_triangle(co0, co1, co2):
return abs((co1 - co0).cross(co2 - co0).z) < degenerate_epsilon
"""
True if the direction of edges between the 3 points is turning counter-clockwise.
"""
def is_ccw_safe(co0, co1, co2):
return (co1 - co0).cross(co2 - co0).z > degenerate_epsilon
"""
True if the direction of edges between the 3 points is turning clockwise.
"""
def is_cw_safe(co0, co1, co2):
return (co1 - co0).cross(co2 - co0).z < -degenerate_epsilon
"""
Get the adjacent and opposing vertices for an edge diagonal.
Returns vertices (a, b, c, d), where (a, c) is the edge and b, d are opposing vertices,
as well as the associated edges (in CCW order).
"""
def get_quad_verts(edge):
# Only valid for internal edges with two adjacent faces
assert(len(edge.link_loops) == 2)
# Edge verts order does not follow winding order!
# Loops have to be used for getting vertices in the correct order.
# The start of the vertex list is arbitrary though, so can just pick the first loop as starting point.
loop = edge.link_loops[0]
assert(loop.link_loop_next.vert == edge.link_loops[1].vert)
va = loop.vert
vd = loop.link_loop_prev.vert
ed = loop.link_loop_prev.edge
ec = loop.link_loop_prev.link_loop_prev.edge
loop = edge.link_loops[1]
assert(loop.link_loop_next.vert == edge.link_loops[0].vert)
vc = loop.vert
vb = loop.link_loop_prev.vert
eb = loop.link_loop_prev.edge
ea = loop.link_loop_prev.link_loop_prev.edge
return (va, vb, vc, vd), (ea, eb, ec, ed)
"""
Returns True if the Delaunay condition is satisfied:
Each vertex is outside the circumcircle for the other three.
Vertices must form a non-overlapping polygon (can be concave).
"""
def is_delaunay(verts):
a = verts[0].co
b = verts[1].co
c = verts[2].co
d = verts[3].co
M = Matrix((
(a.x, a.y, a.length_squared, 1),
(b.x, b.y, b.length_squared, 1),
(c.x, c.y, c.length_squared, 1),
(d.x, d.y, d.length_squared, 1),
))
return M.determinant() <= 0
"""
Pseudo-random deterministic hash from int.
"""
def random_hash_from_int(x):
x = ((x >> 16) ^ x) * 0x45d9f3b
x = ((x >> 16) ^ x) * 0x45d9f3b
x = (x >> 16) ^ x
return x & 0xffffffff
"""
Pseudo-random deterministic uniform 0..1 value from int.
"""
def random_uniform_from_int(x):
return float(random_hash_from_int(x)) / 0xffffffff
"""
Returns origin and scale matrix based on bounds of the polygon
"""
def _get_polygon_transform(face):
xmin = float_info.max
ymin = float_info.max
xmax = float_info.min
ymax = float_info.min
for loop in face.loops:
xmin = min(xmin, loop.vert.co.x)
xmax = max(xmax, loop.vert.co.x)
ymin = min(ymin, loop.vert.co.y)
ymax = max(ymax, loop.vert.co.y)
scale_x = 1.0/(xmax - xmin) if (xmax - xmin) > degenerate_epsilon else 1.0
scale_y = 1.0/(ymax - ymin) if (ymax - ymin) > degenerate_epsilon else 1.0
return Vector((xmin, ymin)), Matrix(((scale_x, 0), (0, scale_y)))
"""
Utility class for generating a triangle mesh that satisfies the Delaunay condition.
Voronoi diagrams can be constructed from the Delaunay mesh.
"""
class Triangulator:
"""
UV Layers to generate in the output mesh.
"""
uv_layers = set()
"""
If True then Voronoi cells will be created with a triangle fan instead of ngons.
"""
triangulate_cells = False
"""
Lower bounds of the point set.
Must be a tuple of two floats, or None if unbounded.
"""
bounds_min = None
"""
Upper bounds of the point set.
Must be a tuple of two floats, or None if unbounded.
"""
bounds_max = None
"""
Input point set, must be a list of InputPoint.
"""
points = None
"""
BMesh containing the Delaunay triangulation of input points.
"""
triangulation_bm = None
"""
BMesh containing the Voronoi graph for the triangulation.
"""
voronoi_bm = None
"""
List of circumcircles of the Delaunay triangulation.
Each entry is a tuple (center vector, radius).
"""
circumcircles = None
"""
Vertex group index for original points.
"""
vg_original = None
"""
Vertex group index for repeated points.
"""
vg_repeated = None
def __init__(self, uv_layers=set(), triangulate_cells=False, bounds_min=None, bounds_max=None):
self.uv_layers = uv_layers
self.triangulate_cells = triangulate_cells
self.bounds_min = bounds_min
self.bounds_max = bounds_max
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.points = None
self.circumcircles = None
self.vg_original = None
self.vg_repeated = None
if self.triangulation_bm:
self.triangulation_bm.free()
self.triangulation_bm = None
if self.voronoi_bm:
self.voronoi_bm.free()
self.voronoi_bm = None
"""
Set up object ID block data, such as vertex groups.
This must be done so internal bmesh data layers are preserved in the Mesh data block.
"""
def prepare_object(self, obj):
def ensure_vgroup(name):
vg = obj.vertex_groups.get(name)
if vg is None:
vg = obj.vertex_groups.new(name=name)
return vg
# Create vertex groups for point type
self.vg_original = ensure_vgroup("OriginalPoints").index
self.vg_repeated = ensure_vgroup("RepeatedPoints").index
"""
Sort the points list by distance from the center in preparation for the sweephull algorithm.
"""
def _radial_sort_points(self):
points = self.points
center = Vector((0, 0, 0))
for pt in points:
center += pt.co
center /= len(points)
points.sort(key = lambda pt: (pt.co - center).length_squared)
"""
Add vertices to the Delaunay triangulation mesh based on input points.
"""
def _add_point_vertices(self):
bm = self.triangulation_bm
points = self.points
if len(points) < 3:
return
self._radial_sort_points()
dvert_lay = bm.verts.layers.deform.verify()
# Create vertices for all points
for pt in points:
vert = bm.verts.new(pt.co)
dvert = vert[dvert_lay]
if self.vg_original is not None and pt.type == 'ORIGINAL':
dvert[self.vg_original] = 1.0
if self.vg_repeated is not None and pt.type in {'REPEAT_POS', 'REPEAT_NEG'}:
dvert[self.vg_repeated] = 1.0
"""
Construct a triangle mesh using the sweephull method.
The resulting mesh is non-overlapping, but does not satisfy the Delaunay condition yet.
"""
def _do_sweephull(self):
points = self.points
bm = self.triangulation_bm
if len(points) < 3:
return
bm.verts.ensure_lookup_table()
def is_ccw_safe_points(i, j, k):
return is_ccw_safe(points[i].co, points[j].co, points[k].co)
def is_cw_safe_points(i, j, k):
return is_cw_safe(points[i].co, points[j].co, points[k].co)
# Add first 3 verts as the first triangle
start_index = None
for i in range(len(points) - 2):
j = i + 1
k = i + 2
if is_ccw_safe_points(i, j, k):
bm.faces.new((bm.verts[i], bm.verts[j], bm.verts[k]))
convex_hull = [i, j, k]
start_index = i + 3
break
elif is_ccw_safe_points(k, j, i):
bm.faces.new((bm.verts[k], bm.verts[j], bm.verts[i]))
convex_hull = [k, j, i]
start_index = i + 3
break
for point_index in range(start_index, len(points)):
new_convex_hull = []
# Returns true if the point is on the outside of the convex hull edge (ci, cj).
# Indices are in the convex_hull array!
def is_edge_visible(ci, cj):
return is_cw_safe_points(point_index, convex_hull[ci], convex_hull[cj])
was_visible = is_edge_visible(-1, 0) # visibility of last edge
for c in range(len(convex_hull)):
next_c = (c + 1) % len(convex_hull)
# Is the convex hull edge visible from the new point?
is_visible = is_edge_visible(c, next_c)
hull_index = convex_hull[c]
hull_index_next = convex_hull[next_c]
# Connect to visible edges
if is_visible:
bm.faces.new((bm.verts[point_index], bm.verts[hull_index_next], bm.verts[hull_index]))
# Update the convex hull
# Discard vertex if both edge are visible from the new point
if not (was_visible and is_visible):
new_convex_hull.append(hull_index)
# Insert new point at start of visible section
if (not was_visible) and is_visible:
new_convex_hull.append(point_index)
was_visible = is_visible
convex_hull[:] = new_convex_hull
self.add_debug_mesh(bm, "SweepHull")
"""
Iteratively apply the edge flipping method to ensure the Delaunay condition is satisfied for each edge.
The input mesh must be non-overlapping.
"""
def _do_edgeflip(self):
bm = self.triangulation_bm
flipstack = collections.deque(maxlen=len(bm.edges))
for edge in bm.edges:
if not edge.is_boundary:
flipstack.appendleft(edge)
edge.tag = True
while flipstack:
diag = flipstack.pop()
diag.tag = False
verts, edges = get_quad_verts(diag)
# Flipping degenerate triangles can cause overlap
if is_degenerate_triangle(verts[0].co, verts[2].co, verts[3].co) or is_degenerate_triangle(verts[2].co, verts[0].co, verts[1].co):
continue
if not is_delaunay(verts):
bm.edges.remove(diag)
bm.faces.new((verts[0], verts[1], verts[3]))
bm.faces.new((verts[2], verts[3], verts[1]))
for edge in edges:
if not edge.tag and not edge.is_boundary:
flipstack.append(edge)
edge.tag = True
self.add_debug_mesh(bm, "EdgeFlip")
"""
Find and delete duplicate faces in the Delaunay triangulation.
Faces are duplicate if any vertex is in the negative repetition or all vertices are in the position repetition.
"""
def _prune_duplicate_faces(self):
points = self.points
bm = self.triangulation_bm
bm.verts.index_update()
duplicate_faces = []
for face in bm.faces:
has_original = False
has_repeat_neg = False
for vert in face.verts:
point = points[vert.index]
if point.type == 'ORIGINAL':
has_original = True
if point.type == 'REPEAT_NEG':
has_repeat_neg = True
if (not has_original) or has_repeat_neg:
duplicate_faces.append(face)
bmesh.ops.delete(bm, geom=duplicate_faces, context='FACES')
"""
Create base triangulation mesh, used for both final Delaunay output
as well as input mesh for Voronoi graph construction.
"""
def _create_triangulation(self, points):
self.points = points
# Assign point indices before sorting
for i, p in enumerate(points):
p.index = i
self.triangulation_bm = bmesh.new()
self._add_point_vertices()
self._do_sweephull()
self._do_edgeflip()
"""
Compute the circumcircles for all triangles.
"""
def _ensure_circumcircles(self):
if self.circumcircles is not None:
return
del_bm = self.triangulation_bm
self.circumcircles = collections.deque(maxlen=len(del_bm.faces))
for face in del_bm.faces:
a = face.verts[0].co
b = face.verts[1].co
c = face.verts[2].co
La = a.length_squared
Lb = b.length_squared
Lc = c.length_squared
Sx = 0.5 * Matrix(((La, a.y, 1), (Lb, b.y, 1), (Lc, c.y, 1))).determinant()
Sy = 0.5 * Matrix(((a.x, La, 1), (b.x, Lb, 1), (c.x, Lc, 1))).determinant()
norm = Matrix(((a.x, a.y, 1), (b.x, b.y, 1), (c.x, c.y, 1))).determinant()
r0 = Matrix(((a.x, a.y, La), (b.x, b.y, Lb), (c.x, c.y, Lc))).determinant()
if norm != 0:
co = Vector((Sx, Sy, 0)) / norm
r = math.sqrt(abs(r0/norm) + co.length_squared)
self.circumcircles.append((co, r))
else:
self.circumcircles.append(None)
"""
Constructs a triangle mesh that satisfies the Delaunay condition based on the input points.
"""
def construct_delaunay(self, points):
self._create_triangulation(points)
# Remove redundant mesh elements resulting from repetition
self._prune_duplicate_faces()
self._finalize_faces(self.triangulation_bm, 'DELAUNAY')
return self.triangulation_bm
"""
Creates a face for each cell of the Voronoi graph.
"""
def _create_cell_faces(self):
points = self.points
del_bm = self.triangulation_bm
voro_bm = self.voronoi_bm
# Create a vertex for each triangle circumcircle
center_verts = collections.deque(maxlen=len(self.circumcircles))
for circle in self.circumcircles:
if circle:
center_verts.append(voro_bm.verts.new(circle[0]))
else:
center_verts.append(None)
voro_loop = collections.deque()
for vert in del_bm.verts:
# Avoid duplicate faces
point = points[vert.index]
if point.type != 'ORIGINAL':
continue
if len(vert.link_loops) < 3:
continue
# Gather circumcenter vertices of adjacent faces
voro_loop.clear()
# Find the beginning of the loop fan, in case there is a boundary edge
loop_start = vert.link_loops[0] # arbitrary start if there is no boundary edge
loop_end = loop_start
for loop in vert.link_loops:
# Boundary edge means the loop points at itself in the radial list
if loop.link_loop_radial_next == loop:
loop_start = loop
# Loop on the next edge of the fan in ccw direction
next_edge_loop = loop.link_loop_prev
if next_edge_loop.link_loop_radial_next == next_edge_loop:
loop_end = next_edge_loop
loop = loop_start
while True:
center_vert = center_verts[loop.face.index]
if center_vert:
voro_loop.append(center_vert)
# Loop on the next edge of the fan in ccw direction
next_edge_loop = loop.link_loop_prev
loop = next_edge_loop.link_loop_radial_next
if loop == loop_end:
break
# Can still get a loop with <3 verts in corner cases (colinear vertices)
if len(voro_loop) >= 3:
voro_bm.faces.new(voro_loop)
self.add_debug_mesh(voro_bm, "VoronoiMesh")
"""
Constructs a Voronoi mesh based on input points a triangulated mesh.
The del_bm mesh must be a valid Delaunay triangulation.
"""
def construct_voronoi(self, points):
self._create_triangulation(points)
del_bm = self.triangulation_bm
del_bm.verts.index_update()
del_bm.faces.index_update()
self._ensure_circumcircles()
self.voronoi_bm = bmesh.new()
self._create_cell_faces()
self._finalize_faces(self.voronoi_bm, 'VORONOI')
return self.voronoi_bm
"""
Create final face geometry and add data layers.
"""
def _finalize_faces(self, bm, graph_type):
if graph_type not in {'DELAUNAY', 'VORONOI'}:
raise Exception("Invalid graph type {}".format(graph_type))
uv_layer_map = dict()
for uv_layer_id in self.uv_layers:
uv_layer_name = props.find_enum_name(props.output_uv_layers_items, uv_layer_id)
uv_layer_map[uv_layer_id] = bm.loops.layers.uv.new(uv_layer_name)
if 'BOUNDS' in uv_layer_map:
if self.bounds_min is None or self.bounds_max is None:
bounds_loc = Vector((0, 0))
bounds_mat = Matrix(((1, 0), (0, 1)))
else:
extent = Vector(self.bounds_max[:]) - Vector(self.bounds_min[:])
bounds_loc = Vector(self.bounds_min[:])
bounds_mat = Matrix(((1.0/extent.x if extent.x > degenerate_epsilon else 1, 0), (0, 1.0/extent.y if extent.y > degenerate_epsilon else 1)))
if 'CELL_CENTERED' in uv_layer_map:
self._ensure_circumcircles()
bm.verts.index_update()
bm.faces.index_update()
def assign_data_layers(face, orig_face_index, fan_loop, poly_loc, poly_mat):
for layer_id, layer in uv_layer_map.items():
if layer_id == 'POLYGON':
for loop in face.loops:
loop[layer].uv = poly_mat @ (loop.vert.co.xy - poly_loc)
elif layer_id == 'BOUNDS':
for loop in face.loops:
loop[layer].uv = bounds_mat @ (loop.vert.co.xy - bounds_loc)
elif layer_id == 'CELL_CENTERED':
if graph_type == 'DELAUNAY':
# Cell center is the center of the circum-circle
circle = self.circumcircles[orig_face_index]
center = Vector((0, 0)) if circle is None else circle[0].xy
elif graph_type == 'VORONOI':
# Cell center is the input point
center = self.points[orig_face_index].co.xy
for loop in face.loops:
loop[layer].uv = loop.vert.co.xy - center
elif layer_id == 'EDGE_CENTERED':
if fan_loop is not None:
c0 = fan_loop.vert.co.xy
c1 = fan_loop.link_loop_next.vert.co.xy
c2 = fan_loop.link_loop_next.link_loop_next.vert.co.xy
base = c2 - c1
base_length = base.length
span = c1 - c0
if base_length > degenerate_epsilon:
u1 = 0
v1 = span.dot(base) / base_length
u0_squared = span.length_squared - v1*v1
u0 = math.sqrt(u0_squared) if u0_squared > degenerate_epsilon else 0.0
v0 = 0
u2 = 0
v2 = base_length + v1
fan_loop[layer].uv = Vector((u0, v0))
fan_loop.link_loop_next[layer].uv = Vector((u1, v1))
fan_loop.link_loop_next.link_loop_next[layer].uv = Vector((u2, v2))
elif layer_id == 'POINT_INDEX':
if graph_type == 'VORONOI':
point = self.points[orig_face_index]
for loop in face.loops:
loop[layer].uv = Vector((point.index, 0))
elif layer_id == 'POINT_ID':
if graph_type == 'VORONOI':
point = self.points[orig_face_index]
for loop in face.loops:
loop[layer].uv = Vector((point.id, 0))
elif layer_id == 'RANDOM':
if graph_type == 'VORONOI':
point = self.points[orig_face_index]
r = random_uniform_from_int(point.id)
for loop in face.loops:
loop[layer].uv = Vector((r, 0))
if self.triangulate_cells:
# Cache the face list: triangulating cells will add more faces that must not be triangulated further!
orig_faces = bm.faces[:]
for face in orig_faces:
poly_loc, poly_mat = _get_polygon_transform(face) if 'POLYGON' in uv_layer_map else (None, None)
if graph_type == 'DELAUNAY':
# Fan around incenter point (intersection of half-angles).
# This ensures there is no discontinuity of the edge distance at the subdivisions.
c0 = face.verts[0].co
c1 = face.verts[1].co
c2 = face.verts[2].co
w0 = (c2 - c1).length
w1 = (c0 - c2).length
w2 = (c1 - c0).length
center = (c0 * w0 + c1 * w1 + c2 * w2) / (w0 + w1 + w2)
elif graph_type == 'VORONOI':
# Fan around cell point
center = self.points[face.index].co
center_vert = bm.verts.new(center)
for loop in face.loops:
tface = bm.faces.new((loop.vert, loop.link_loop_next.vert, center_vert))
assign_data_layers(tface, face.index, tface.loops[2], poly_loc, poly_mat)
bm.faces.remove(face)
else:
for face in bm.faces:
poly_loc, poly_mat = _get_polygon_transform(face) if 'POLYGON' in uv_layer_map else (None, None)
assign_data_layers(face, face.index, None, poly_loc, poly_mat)
"""
Debug function for recording intermediate mesh results.
Default implementation is a dummy, must be replaced externally.
"""
def add_debug_mesh(self, bm, name):
pass
|
"""hw2.py
by Tianqi Fang
11/06/2019
This module has a function that creates a dataframe from a URL which points to a CSV file.
Then, it has a function called test_create_dataframe with two inputs,
a pandas DataFrame and a list of column names, that make three tests.
The function returns True if the following three tests all hold at the same time:
1. The DataFrame contains only the columns that you specified as the second argument.
2. The values in each column have the same python type
3. There are at least 10 rows in the DataFrame.
"""
import pandas as pd
def read_url(url):
"""Read a url to return a pandas data frame."""
data_frame = pd.read_csv(url)
return data_frame
def test_create_dataframe(data_frame, list_col_name):
"""Read a pandas data frame and a list of column names
to do some tests and return true or false.
"""
if data_frame.columns.tolist() != list_col_name: # test 1: same column names as provided.
return False
for i in range(len(list(data_frame.dtypes))): # test 2: same type in each column.
for j in range(list(data_frame.count())[i]):
if not isinstance(data_frame.iloc[j, i], type(data_frame.iloc[0, i])):
return False
if data_frame.shape[0] < 10: # test 3: at least 10 rows
return False
return True
|
"""
Example ackermann_kinematic.py
Author: Joshua A. Marshall <joshua.marshall@queensu.ca>
GitHub: https://github.com/botprof/agv-examples
"""
# %%
# SIMULATION SETUP
import numpy as np
import matplotlib.pyplot as plt
from mobotpy.integration import rk_four
from mobotpy.models import Ackermann
# Set the simulation time [s] and the sample period [s]
SIM_TIME = 15.0
T = 0.1
# Create an array of time values [s]
t = np.arange(0.0, SIM_TIME, T)
N = np.size(t)
# %%
# RUN SIMULATION
# Initialize arrays that will be populated with our inputs and states
x = np.zeros((4, N))
u = np.zeros((2, N))
phi_L = np.zeros(N)
phi_R = np.zeros(N)
# Set the wheelbase and track of the vehicle [m]
ELL_W = 2.50
ELL_T = 1.75
# Set the initial pose [m, m, rad, rad], velocities [m/s, rad/s]
x[0, 0] = 0.0
x[1, 0] = 0.0
x[2, 0] = np.pi / 2.0
x[3, 0] = 0.0
u[0, 0] = 5.0
u[1, 0] = 0
# Let's now use the class Ackermann for plotting
vehicle = Ackermann(ELL_W, ELL_T)
# Run the simulation
for k in range(1, N):
x[:, k] = rk_four(vehicle.f, x[:, k - 1], u[:, k - 1], T)
phi_L[k] = np.arctan(
2 * ELL_W * np.tan(x[3, k]) / (2 * ELL_W - ELL_T * np.tan(x[3, k]))
)
phi_R[k] = np.arctan(
2 * ELL_W * np.tan(x[3, k]) / (2 * ELL_W + ELL_T * np.tan(x[3, k]))
)
u[0, k] = 5.0
u[1, k] = -0.25 * np.sin(2.0 * t[k])
# %%
# MAKE SOME PLOTS
# Change some plot settings (optional)
plt.rc("text", usetex=True)
plt.rc("text.latex", preamble=r"\usepackage{cmbright,amsmath,bm}")
plt.rc("savefig", format="pdf")
plt.rc("savefig", bbox="tight")
# Plot the states as a function of time
fig1 = plt.figure(1)
fig1.set_figheight(6.4)
ax1a = plt.subplot(411)
plt.plot(t, x[0, :])
plt.grid(color="0.95")
plt.ylabel(r"$x$ [m]")
plt.setp(ax1a, xticklabels=[])
ax1b = plt.subplot(412)
plt.plot(t, x[1, :])
plt.grid(color="0.95")
plt.ylabel(r"$y$ [m]")
plt.setp(ax1b, xticklabels=[])
ax1c = plt.subplot(413)
plt.plot(t, x[2, :] * 180.0 / np.pi)
plt.grid(color="0.95")
plt.ylabel(r"$\theta$ [deg]")
plt.setp(ax1c, xticklabels=[])
ax1c = plt.subplot(414)
plt.plot(t, phi_L * 180.0 / np.pi, "C1", label=r"$\phi_L$")
plt.plot(t, phi_R * 180.0 / np.pi, "C2", label=r"$\phi_R$")
plt.grid(color="0.95")
plt.ylabel(r"$\phi_L,\phi_R$ [deg]")
plt.xlabel(r"$t$ [s]")
plt.legend()
# Save the plot
plt.savefig("../agv-book/figs/ch3/ackermann_kinematic_fig1.pdf")
# Plot the position of the vehicle in the plane
fig2 = plt.figure(2)
plt.plot(x[0, :], x[1, :])
plt.axis("equal")
X_BL, Y_BL, X_BR, Y_BR, X_FL, Y_FL, X_FR, Y_FR, X_BD, Y_BD = vehicle.draw(
x[0, 0], x[1, 0], x[2, 0], phi_L[0], phi_R[0]
)
plt.fill(X_BL, Y_BL, "k")
plt.fill(X_BR, Y_BR, "k")
plt.fill(X_FR, Y_FR, "k")
plt.fill(X_FL, Y_FL, "k")
plt.fill(X_BD, Y_BD, "C2", alpha=0.5, label="Start")
X_BL, Y_BL, X_BR, Y_BR, X_FL, Y_FL, X_FR, Y_FR, X_BD, Y_BD = vehicle.draw(
x[0, N - 1], x[1, N - 1], x[2, N - 1], phi_L[N - 1], phi_R[N - 1]
)
plt.fill(X_BL, Y_BL, "k")
plt.fill(X_BR, Y_BR, "k")
plt.fill(X_FR, Y_FR, "k")
plt.fill(X_FL, Y_FL, "k")
plt.fill(X_BD, Y_BD, "C3", alpha=0.5, label="End")
plt.xlabel(r"$x$ [m]")
plt.ylabel(r"$y$ [m]")
plt.legend()
# Save the plot
plt.savefig("../agv-book/figs/ch3/ackermann_kinematic_fig2.pdf")
# Show all the plots to the screen
plt.show()
# %%
# MAKE AN ANIMATION
# Create and save the animation
ani = vehicle.animate(
x,
T,
phi_L,
phi_R,
True,
"../agv-book/gifs/ch3/ackermann_kinematic.gif",
)
# Show the movie to the screen
plt.show()
# # Show animation in HTML output if you are using IPython or Jupyter notebooks
# plt.rc('animation', html='jshtml')
# display(ani)
# plt.close()
|
"""
Augmenters that apply to a group of augmentations, like selecting
an augmentation from a list, or applying all the augmentations in
a list sequentially
To use the augmenters, clone the complete repo and use
`from vidaug import augmenters as va`
and then e.g. :
seq = va.Sequential([ va.HorizontalFlip(),
va.VerticalFlip() ])
List of augmenters:
* Sequential
* OneOf
* SomeOf
* Sometimes
* AllOf
"""
import numpy as np
import PIL
import random
class Sequential(object):
"""
Composes several augmentations together.
Args:
transforms (list of "Augmentor" objects): The list of augmentations to compose.
random_order (bool): Whether to apply the augmentations in random order.
"""
def __init__(self, transforms, random_order=False):
self.transforms = transforms
self.rand = random_order
def __call__(self, clip):
if self.rand:
rand_transforms = self.transforms[:]
random.shuffle(rand_transforms)
for t in rand_transforms:
clip = t(clip)
else:
for t in self.transforms:
clip = t(clip)
return clip
class AllOf(object):
"""
Selects all the augmentations from a list and apply then one at a time.
Args:
transforms (list of "Augmentor" objects): The list of augmentations to compose.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, clip):
output_clips = []
for t in self.transforms:
output_clip_of_t = t(clip)
output_clips.append(output_clip_of_t)
return output_clips
class OneOf(object):
"""
Selects one augmentation from a list.
Args:
transforms (list of "Augmentor" objects): The list of augmentations to compose.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, clip):
select = random.choice(self.transforms)
clip = select(clip)
return clip
class SomeOf(object):
"""
Selects a given number of augmentation from a list.
Args:
transforms (list of "Augmentor" objects): The list of augmentations.
N (int): The number of augmentations to select from the list.
random_order (bool): Whether to apply the augmentations in random order.
"""
def __init__(self, transforms, N, random_order=True):
self.transforms = transforms
self.rand = random_order
if N > len(transforms):
raise TypeError('The number of applied augmentors should be smaller than the given augmentation number')
else:
self.N = N
def __call__(self, clip):
if self.rand:
tmp = self.transforms[:]
selected_trans = [tmp.pop(random.randrange(len(tmp))) for _ in range(self.N)]
for t in selected_trans:
clip = t(clip)
return clip
else:
indices = [i for i in range(len(self.transforms))]
selected_indices = [indices.pop(random.randrange(len(indices)))
for _ in range(self.N)]
selected_indices.sort()
selected_trans = [self.transforms[i] for i in selected_indices]
for t in selected_trans:
clip = t(clip)
return clip
class Sometimes(object):
"""
Applies an augmentation with a given probability.
Args:
p (float): The probability to apply the augmentation.
transform (an "Augmentor" object): The augmentation to apply.
Example: Use this this transform as follows:
sometimes = lambda aug: va.Sometimes(0.5, aug)
sometimes(va.HorizontalFlip)
"""
def __init__(self, p, transform):
self.transform = transform
if (p > 1.0) | (p < 0.0):
raise TypeError('Expected p to be in [0.0 <= 1.0], ' +
'but got p = {0}'.format(p))
else:
self.p = p
def __call__(self, clip):
if random.random() < self.p:
clip = self.transform(clip)
return clip
|
import ntpath
import os
import torch
from torch import nn
from tqdm import tqdm
from configs import decode_config
from configs.munit_configs import get_configs
from configs.single_configs import SingleConfigs
from distillers.base_munit_distiller import BaseMunitDistiller
from metric import get_fid
from utils import util
from argparse import ArgumentParser
class MunitSupernet(BaseMunitDistiller):
@staticmethod
def modify_commandline_options(parser, is_train):
assert is_train
parser = super(MunitSupernet, MunitSupernet).modify_commandline_options(parser, is_train)
assert isinstance(parser, ArgumentParser)
parser.set_defaults(student_netG='super_munit', student_ngf=64)
return parser
def __init__(self, opt):
assert 'super' in opt.student_netG
super(MunitSupernet, self).__init__(opt)
self.best_fid_largest = 1e9
self.best_fid_smallest = 1e9
self.fids_largest, self.fids_smallest = [], []
if opt.config_set is not None:
assert opt.config_str is None
self.configs = get_configs(opt.config_set)
self.opt.eval_mode = 'both'
else:
assert opt.config_str is not None
self.configs = SingleConfigs(decode_config(opt.config_str))
self.opt.eval_mode = 'largest'
def forward(self, config, need_style_encoder=False):
opt = self.opt
batch_size = self.real_A.size(0)
style_dim = opt.style_dim
if isinstance(self.netG_student, nn.DataParallel):
self.netG_student.module.configs = config
else:
self.netG_student.configs = config
self.netG_student(self.real_B)
if need_style_encoder:
with torch.no_grad():
_, self.s_teacher = self.netG_teacher.encode(self.real_B, need_content=False)
_, self.s_student = self.netG_student.encode(self.real_B, need_content=False)
else:
self.s_teacher = torch.randn(batch_size, style_dim, 1, 1, device=self.device)
self.s_student = self.s_teacher
with torch.no_grad():
self.c_teacher, _ = self.netG_teacher.encode(self.real_A, need_style=False)
self.Tfake_B = self.netG_teacher.decode(self.c_teacher, self.s_teacher)
self.c_student, _ = self.netG_student.encode(self.real_A, need_style=False)
self.Sfake_B = self.netG_student.decode(self.c_student, self.s_student)
def forward_A(self, c):
return self.netA(c, {'channel': self.netA.out_channels})
def optimize_parameters(self, steps):
need_style_encoder = False if self.opt.student_no_style_encoder \
else steps % self.opt.style_encoder_step != 0
self.optimizer_D.zero_grad()
self.optimizer_G.zero_grad()
config = self.configs.sample()
self.forward(config=config,need_style_encoder=need_style_encoder)
util.set_requires_grad(self.netD, True)
self.backward_D()
util.set_requires_grad(self.netD, False)
self.backward_G(need_style_encoder=need_style_encoder)
self.optimizer_D.step()
self.optimizer_G.step()
def evaluate_model(self, step):
ret = {}
self.is_best = False
save_dir = os.path.join(self.opt.log_dir, 'eval', str(step))
os.makedirs(save_dir, exist_ok=True)
self.netG_student.eval()
if self.opt.eval_mode == 'both':
settings = ('largest', 'smallest')
else:
settings = (self.opt.eval_mode,)
for config_name in settings:
config = self.configs(config_name)
fakes, names = [], []
cnt = 0
for i, data_i in enumerate(tqdm(self.eval_dataloader, desc='Eval ', position=2, leave=False)):
if self.opt.dataset_mode == 'aligned':
self.set_input(data_i)
else:
self.set_single_input(data_i)
self.test(config)
fakes.append(self.Sfake_B.cpu())
for j in range(len(self.image_paths)):
short_path = ntpath.basename(self.image_paths[j])
name = os.path.splitext(short_path)[0]
names.append(name)
if cnt < 10:
input_im = util.tensor2im(self.real_A[j])
Sfake_im = util.tensor2im(self.Sfake_B[j])
Tfake_im = util.tensor2im(self.Tfake_B[j])
util.save_image(input_im, os.path.join(save_dir, 'input', '%s.png') % name, create_dir=True)
util.save_image(Sfake_im, os.path.join(save_dir, 'Sfake', '%s.png' % name), create_dir=True)
util.save_image(Tfake_im, os.path.join(save_dir, 'Tfake', '%s.png' % name), create_dir=True)
if self.opt.dataset_mode == 'aligned':
real_im = util.tensor2im(self.real_B[j])
util.save_image(real_im, os.path.join(save_dir, 'real', '%s.png' % name), create_dir=True)
cnt += 1
fid = get_fid(fakes, self.inception_model, self.npz, device=self.device,
batch_size=self.opt.eval_batch_size, tqdm_position=2)
if fid < getattr(self, 'best_fid_%s' % config_name):
self.is_best = True
setattr(self, 'best_fid_%s' % config_name, fid)
fids = getattr(self, 'fids_%s' % config_name)
fids.append(fid)
if len(fids) > 3:
fids.pop(0)
ret['metric/fid_%s' % config_name] = fid
ret['metric/fid_%s-mean' % config_name] = sum(getattr(self, 'fids_%s' % config_name)) / len(
getattr(self, 'fids_%s' % config_name))
ret['metric/fid_%s-best' % config_name] = getattr(self, 'best_fid_%s' % config_name)
self.netG_student.train()
return ret
def test(self, config):
with torch.no_grad():
self.forward(config)
def load_networks(self, verbose=True):
super(MunitSupernet, self).load_networks()
|
hiragana = "ぁあぃいぅうぇえぉおかがきぎくぐけげこごさざしじすずせぜそぞただちぢっつづてでとどなにぬねのはばぱひびぴふぶぷへべぺほぼぽまみむめもゃやゅゆょよらりるれろゎわゐゑをん"
katakana = "ァアィイゥウェエォオカガキギクグケゲコゴサザシジスズセゼソゾタダチヂッツヅテデトドナニヌネノハバパヒビピフブプヘベペホボポマミムメモャヤュユョヨラリルレロヮワヰヱヲンヴ"
hankana = ""
suuji = "01234567890123456789"
# 日本語文字列のソート
def sort_str(string, reverse=False):
return "".join(sorted(string, reverse=reverse))
# ひらがなだけの文字列ならTrue
def ishira(strj):
return all([ch in hiragana for ch in strj])
# カタカナだけの文字列ならTrue
def iskata(strj):
return all([ch in katakana for ch in strj])
# カタカナ・ひらがなだけの文字列ならTrue
def iskatahira(strj):
return all([ch in katakana or ch in hiragana for ch in strj])
# 漢字だけの文字列ならTrue
def iskanji(strj):
return all(["一" <= ch <= "龥" for ch in strj])
# ひらがなをカタカナに直す
def kata_to_hira(strj):
return "".join([chr(ord(ch) - 96) if ("ァ" <= ch <= "ン") else ch for ch in strj])
# ひらがなをカタカナに直す
def hira_to_kata(strj):
return "".join([chr(ord(ch) + 96) if ("ぁ" <= ch <= "ん") else ch for ch in strj])
# 全角数字を半角数字に直す
def hankaku_suuji(strj):
dic2 = str.maketrans("0123456789", "0123456789")
return strj.translate(dic2)
|
import random
import itertools
NAMES = ['arnold schwarzenegger', 'alec baldwin', 'bob belderbos',
'julian sequeira', 'sandra bullock', 'keanu reeves',
'julbob pybites', 'bob belderbos', 'julian sequeira',
'al pacino', 'brad pitt', 'matt damon', 'brad pitt']
# name_titles = [name.title() for name in name_list]
# title_names = []
#
#
# def titler(name_list=name_list):
# for first, last in name_list:
# fs_list = (first.title(), last.title())
# title_names.append(fs_list)
# return title_names
#
#
# titles = titler()
# print(titles[0])
#
# arnold = titles[0]
# print(arnold[0])
# new_names2 = [name.title() for name in names if name[0] in first_half_alphabet]
name_list = [name.title() for name in NAMES]
print(name_list)
def reverse_first_last_names(name):
first, last = name.split()
return f'{last} {first}'
print([reverse_first_last_names(name) for name in NAMES])
def gen_pairs():
# again a list comprehension is great here to get the first names
# and title case them in just 1 line of code (this comment took 2)
first_names = [name.split()[0].title() for name in NAMES]
while True:
# added this when I saw Julian teaming up with Julian (always test your code!)
first, second = None, None
while first == second:
first, second = random.sample(first_names, 2)
yield f'{first} teams up with {second}'
pairs = gen_pairs()
for _ in range(10):
print(next(pairs))
first_ten = itertools.islice(pairs, 10)
print(list(first_ten))
|
#!/usr/bin/env python
import sys
import chpl_mem, overrides
from utils import error, memoize
@memoize
def get():
jemalloc_val = overrides.get('CHPL_JEMALLOC')
mem_val = chpl_mem.get('target')
if not jemalloc_val:
if mem_val == 'jemalloc':
jemalloc_val = 'jemalloc'
else:
jemalloc_val = 'none'
if mem_val == 'jemalloc' and jemalloc_val == 'none':
error("CHPL_JEMALLOC must not be 'none' when CHPL_MEM is jemalloc")
if mem_val != 'jemalloc' and jemalloc_val != 'none':
error("CHPL_JEMALLOC must not be none when CHPL_MEM is not jemalloc")
return jemalloc_val
def _main():
jemalloc_val = get()
sys.stdout.write("{0}\n".format(jemalloc_val))
if __name__ == '__main__':
_main()
|
"""
Module for entities implemented using the
number platform (https://www.home-assistant.io/integrations/number/).
"""
from __future__ import annotations
import logging
from typing import Any
from hahomematic.const import ATTR_HM_VALUE, HmPlatform
import hahomematic.device as hm_device
from hahomematic.entity import GenericEntity
_LOGGER = logging.getLogger(__name__)
class HmNumber(GenericEntity[float]):
"""
Implementation of a number.
This is a default platform that gets automatically generated.
"""
def __init__(
self,
device: hm_device.HmDevice,
unique_id: str,
address: str,
parameter: str,
parameter_data: dict[str, Any],
):
super().__init__(
device=device,
unique_id=unique_id,
address=address,
parameter=parameter,
parameter_data=parameter_data,
platform=HmPlatform.NUMBER,
)
async def set_state(self, value: float) -> None:
"""Set the state of the entity."""
# pylint: disable=no-else-return
if value is not None and self._min <= value <= self._max:
await self.send_value(value)
return
elif self._special:
if [sv for sv in self._special.values() if value == sv[ATTR_HM_VALUE]]:
await self.send_value(value)
return
_LOGGER.error(
"number: Invalid value: %s (min: %s, max: %s, special: %s)",
value,
self._min,
self._max,
self._special,
)
|
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix:
return False
elif not matrix[0]:
return False
elif target <= matrix[0][-1]:
return target in matrix[0]
elif target > matrix[0][-1]:
matrix.pop(0)
return self.searchMatrix(matrix, target)
|
from django.http import HttpRequest
from math import ceil
from nltk.tokenize.regexp import RegexpTokenizer
import os
import re
from search.models import Search, Field
def url_part_escape(orig):
"""
simple encoding for url-parts where all non-alphanumerics are
wrapped in e.g. _xxyyzz_ blocks w/hex UTF-8 xx, yy, zz values
used for safely including arbitrary unicode as part of a url path
all returned characters will be in [a-zA-Z0-9_-]
"""
return '_'.join(
s.hex() if i % 2 else s.decode('ascii')
for i, s in enumerate(
re.split(b'([^-a-zA-Z0-9]+)', orig.encode('utf-8'))
)
)
def url_part_unescape(urlpart):
"""
reverse url_part_escape
"""
return ''.join(
bytes.fromhex(s).decode('utf-8') if i % 2 else s
for i, s in enumerate(urlpart.split('_'))
)
# Credit to https://gist.github.com/kgriffs/c20084db6686fee2b363fdc1a8998792 for this function
def uuid_pattern(version):
return re.compile(
(
'[a-f0-9]{8}-' +
'[a-f0-9]{4}-' +
version + '[a-f0-9]{3}-' +
'[89ab][a-f0-9]{3}-' +
'[a-f0-9]{12}$'
),
re.IGNORECASE
)
def calc_pagination_range(num_found: int, pagesize, current_page, delta=2):
# @TODO This is not very efficient - could be refactored
pages = int(ceil(num_found / pagesize))
if current_page > pages:
current_page = pages
elif current_page < 1:
current_page = 1
left = current_page - delta
right = current_page + delta + 1
pagination = []
spaced_pagination = []
for p in range(1, pages + 1):
if (p == 1) or (p == pages) or (left <= p < right):
pagination.append(p)
last = None
for p in pagination:
if last:
if p - last == 2:
spaced_pagination.append(last + 1)
elif p - last != 1:
spaced_pagination.append(0)
spaced_pagination.append(p)
last = p
return spaced_pagination
def calc_starting_row(page_num, rows_per_page=10):
"""
Calculate a starting row for the Solr search results. We only retrieve one page at a time
:param page_num: Current page number
:param rows_per_page: number of rows per page
:return: starting row
"""
page = 1
try:
page = int(page_num)
except ValueError:
pass
if page < 1:
page = 1
elif page > 100000: # @magic_number: arbitrary upper range
page = 100000
return rows_per_page * (page - 1), page
def get_search_terms(search_text: str):
# Get any search terms
tr = RegexpTokenizer('[^"\s]\S*|".+?"', gaps=False)
# Respect quoted strings
search_terms = tr.tokenize(search_text)
if len(search_terms) == 0:
solr_search_terms = "*"
else:
solr_search_terms = ' '.join(search_terms)
return solr_search_terms
def get_query_fields(request: HttpRequest, fields: dict):
qf = ['id']
for f in fields:
if fields[f].solr_field_lang in [request.LANGUAGE_CODE, 'bi']:
qf.append(f)
if fields[f].solr_extra_fields:
if fields[f].solr_field_lang == request.LANGUAGE_CODE:
qf.extend(fields[f].solr_extra_fields.split(","))
else:
copy_fields = fields[f].solr_extra_fields.split(",")
for copy_field in copy_fields:
if copy_field.endswith("_" + request.LANGUAGE_CODE):
qf.append(copy_field)
if fields[f].solr_field_is_coded:
code_value_field = "{0}_{1}".format(f, request.LANGUAGE_CODE)
if code_value_field not in qf:
qf.append(code_value_field)
return qf
def get_mlt_fields(request: HttpRequest, fields: dict):
qf = ['id']
for f in fields:
if fields[f].solr_field_lang in [request.LANGUAGE_CODE, 'bi']:
if fields[f].solr_field_type in ['search_text_en', 'search_text_en', 'string']:
qf.append(f)
return qf
def create_solr_query(request: HttpRequest, search: Search, fields: dict, Codes: dict, facets: list, start_row: int,
rows: int, record_id: str, export=False, highlighting=False, default_sort='score desc'):
"""
Create a complete query to send to the SolrClient query.
:param request:
:param search:
:param fields:
:param Codes:
:param facets:
:param start_row:
:param rows:
:param record_id:
:param export: Set to true if constructing the query for a /export Solr handler query
:param highlighting: set to true if the query should include search term highlighting
:return:
"""
using_facets = len(facets) > 0
# Look for known fields in the GET request
known_fields = {}
solr_query = {'q': '*', 'defType': 'edismax', 'sow': True}
for request_field in request.GET.keys():
if request_field == 'search_text' and not record_id:
solr_query['q'] = get_search_terms(request.GET.get('search_text'))
elif request_field == 'sort' and not record_id:
if request.LANGUAGE_CODE == 'fr':
solr_query['sort'] = request.GET.get('sort') if request.GET.get('sort') in search.results_sort_order_fr.split(',') else default_sort
else:
solr_query['sort'] = request.GET.get('sort') if request.GET.get('sort') in search.results_sort_order_en.split(',') else default_sort
elif request_field in fields:
known_fields[request_field] = request.GET.get(request_field).split('|')
# If sort not specified in the request, then use the default
if 'sort' not in solr_query:
solr_query['sort'] = default_sort
# This happens for record reviews
if record_id:
#record_id_esc = url_part_escape(record_id)
solr_query['q'] = 'id:"{0}"'.format(record_id)
solr_query['q.op'] = "AND"
# Create a Solr query field list based on the Fields Model. Expand the field list where needed
solr_query['qf'] = get_query_fields(request, fields)
if export:
ef = ['id']
for f in fields:
if fields[f].solr_field_lang in [request.LANGUAGE_CODE, 'bi']:
if fields[f].solr_field_type in ['string', 'pint', 'pfloat', 'pdate']:
ef.append(f)
solr_query['fl'] = ",".join(ef)
else:
solr_query['fl'] = ",".join(solr_query['qf'])
if not export:
solr_query['start'] = start_row
solr_query['rows'] = rows
if not export and highlighting:
hl_fields = []
for field in fields:
if fields[field].solr_field_type in ["search_text_en", "search_text_fr", "string", 'text_general']:
hl_fields.append(field)
if fields[field].solr_extra_fields:
for extra_field in fields[field].solr_extra_fields.split(","):
if extra_field.endswith("_en") or extra_field.endswith("_fr"):
hl_fields.append(extra_field.strip())
solr_query.update({
'hl': 'on',
'hl.method': 'original',
'hl.simple.pre': '<mark>',
'hl.simple.post': '</mark>',
'hl.snippets': 10,
'hl.fl': hl_fields,
'hl.highlightMultiTerm': True,
})
# Set a default sort order
if 'sort' not in solr_query:
solr_query['sort'] = 'score desc'
if using_facets:
solr_query['facet'] = True
solr_query['facet.sort'] = 'index'
fq = []
ff = []
for facet in facets:
solr_query['f.{0}.facet.sort'.format(facet)] = fields[facet].solr_facet_sort
solr_query['f.{0}.facet.limit'.format(facet)] = fields[facet].solr_facet_limit
if facet in known_fields:
# Use this query syntax when facet search values are specified
quoted_terms = ['"{0}"'.format(item) for item in known_fields[facet]]
facet_text = '{{!tag=tag_{0}}}{0}:({1})'.format(facet, ' OR '.join(quoted_terms))
fq.append(facet_text)
ff.append('{{!ex=tag_{0}}}{0}'.format(facet))
else:
# Otherwise just retrieve the entire facet
ff.append(facet)
solr_query['fq'] = fq
solr_query['facet.field'] = ff
if export and solr_query['sort'] == "score desc":
solr_query['sort'] = "id asc"
return solr_query
def create_solr_mlt_query(request: HttpRequest, search: Search, fields: dict, start_row: int, record_id: str):
solr_query = {
'q': 'id:"{0}"'.format(record_id),
'mlt': True,
'mlt.count': search.mlt_items,
'mlt.boost': True,
'start': start_row,
'rows': search.mlt_items,
'fl': get_query_fields(request, fields),
'mlt.fl': get_mlt_fields(request, fields),
'mlt.mintf': 1,
'mlt.minwl': 3,
'mlt.mindf': 2,
'mlt.maxdfpct': 50,
}
return solr_query
|
from subprocess import call
"""
This file contains the class Task, currently not used (Configuration.Task is used instead)
"""
__author__ = 'Sander Krause <sanderkrause@gmail.com>'
__author__ = 'Roel van Nuland <roel@kompjoefriek.nl>'
class Task:
command = None
input = None
output = None
name = None
def __init__(self, config):
if "command" in config:
self.command = config["command"]
if "input" in config:
self.input = config["input"]
if "output" in config:
self.output = config["output"]
if "name" in config:
self.name = config["name"]
def run(self):
# TODO: handle input / output redirects properly
call(self.command)
|
from ...nodes.BASE.node_base import RenderNodeBase
from ...preferences import get_pref
import bpy
from bpy.props import *
def test_email(self, context):
if self.test_send:
self.process()
self.test_send = False
class RenderNodeEmailNode(RenderNodeBase):
"""A simple input node"""
bl_idname = 'RenderNodeEmailNode'
bl_label = 'Email'
def init(self, context):
self.create_input('RenderNodeSocketTask', 'task', 'Task')
self.create_input('RenderNodeSocketBool', 'only_render', 'Send only in render mode')
self.create_input('RenderNodeSocketString', 'subject', 'Subject')
self.create_input('RenderNodeSocketString', 'content', 'Content')
self.create_input('RenderNodeSocketString', 'sender_name', 'Sender Name')
self.create_input('RenderNodeSocketString', 'email', 'Email')
self.create_output('RenderNodeSocketTask', 'task', 'Task')
self.width = 200
def process(self, context, id, path):
if not self.process_task(): return
use = self.inputs['only_render'].get_value()
if not use or (use and context.window_manager.rsn_running_modal):
bpy.ops.rsn.send_email(subject=self.inputs['subject'].get_value(),
content=self.inputs['content'].get_value(),
sender_name=self.inputs['sender_name'].get_value(),
email=self.inputs['email'].get_value())
def register():
bpy.utils.register_class(RenderNodeEmailNode)
def unregister():
bpy.utils.unregister_class(RenderNodeEmailNode)
|
# FPRBRRZA, 10027
#
# https://adventofcode.com/2018/day/10
import sys
import numpy as np
import pprint
def printStars(starsToPrint):
# need to shift everything over to be postive
minx = min([s[0] for s in starsToPrint])
maxx = max([s[0] for s in starsToPrint])
miny = min([s[1] for s in starsToPrint])
maxy = max([s[1] for s in starsToPrint])
ncols, nrows = (abs(maxy-miny) + 1, abs(maxx - minx) + 1)
if max(nrows, ncols) > 300:
return
array = np.array(list(' ' * ncols * nrows), dtype = np.str).reshape((ncols, nrows))
for s in starsToPrint:
array[s[1] - miny, s[0] - minx] = '*'
for row in array:
print ''.join(row)
if __name__ == "__main__":
with open(sys.argv[1], 'r') as inputFile:
lines = [line.strip() for line in list(inputFile)]
stars = []
# position=<-3, 6> velocity=< 2, -1>
# -3 6 2 -1
# ['-3', '6', '2', '-1']
for line in lines:
line = line.replace('position=', '').replace('velocity=', '').replace('<', '').replace('>', '').replace(',', '').replace(' ', ' ').strip()
tokens = line.split(' ')
stars.append([int(s) for s in tokens])
oldStars = stars
for i in range(10027):
newStars = [[xpos + xvel, ypos + yvel, xvel, yvel] for xpos, ypos, xvel, yvel in oldStars]
oldStars = newStars
printStars(oldStars)
print i + 1
|
D_params ={
"ID_BUFFER_SIZE":[1],
"N_TX_S": [3],
"DUR_S_MS": [8],
"N_TX_T": [2],
"DUR_T_MS": [5],
"N_TX_A": [2],
"DUR_A_MS": [5],
"CRYSTAL_SINK_MAX_EMPTY_TS" : [3],
"CRYSTAL_MAX_SILENT_TAS" : [2],
"CRYSTAL_MAX_MISSING_ACKS" : [4]
}
|
from .horizontal_spinbox import HorizontalSpinbox
__all__ = ["HorizontalSpinbox"]
|
import os
import bb
import oe.path
from swupd.utils import manifest_to_file_list, create_content_manifests
from swupd.path import copyxattrfiles
def create_rootfs(d):
"""
create/replace rootfs with equivalent files from mega image rootfs
Create or replace the do_image rootfs output with the corresponding
subset from the mega rootfs. Done even if there is no actual image
getting produced, because there may be QA tests defined for
do_image which depend on seeing the actual rootfs that would be
used for images.
d -- the bitbake data store
"""
bndl = d.getVar('BUNDLE_NAME', True)
pn = d.getVar('PN', True)
pn_base = d.getVar('PN_BASE', True)
imageext = d.getVar('IMAGE_BUNDLE_NAME', True) or ''
if bndl and bndl != 'os-core':
bb.debug(2, "Skipping swupd_create_rootfs() in bundle image %s for bundle %s." % (pn, bndl))
return
havebundles = (d.getVar('SWUPD_BUNDLES', True) or '') != ''
if not havebundles:
bb.debug(2, 'Skipping swupd_create_rootfs(), original rootfs can be used as no additional bundles are defined')
return
contentsuffix = d.getVar('SWUPD_ROOTFS_MANIFEST_SUFFIX', True)
imagesuffix = d.getVar('SWUPD_IMAGE_MANIFEST_SUFFIX', True)
suffixes = (contentsuffix, imagesuffix)
# Sanity checking was already done in swupdimage.bbclass.
# Here we can simply use the settings.
imagebundles = d.getVarFlag('SWUPD_IMAGES', imageext, True).split() if imageext else []
rootfs = d.getVar('IMAGE_ROOTFS', True)
rootfs_contents = set()
parts = d.getVar('WORKDIR', True).rsplit(pn, 1)
if not pn_base: # the base image
import subprocess
# For the base image only we need to remove all of the files that were
# installed during the base do_rootfs and replace them with the
# equivalent files from the mega image.
#
# The virtual image recipes will already have an empty rootfs.
outfile = d.expand('${WORKDIR}/orig-rootfs-manifest.txt')
rootfs = d.getVar('IMAGE_ROOTFS', True)
# Generate a manifest of the current file contents
create_content_manifests(rootfs, outfile, None, [])
rootfs_contents.update(manifest_to_file_list(outfile))
# clean up
os.unlink(outfile)
else: # non-base image, i.e. swupdimage
manifest = d.expand("${DEPLOY_DIR_SWUPD}/image/${OS_VERSION}/os-core")
for suffix in suffixes:
rootfs_contents.update(manifest_to_file_list(parts[0] + pn_base + parts[1] + '/swupd' + suffix))
bb.debug(3, 'rootfs_contents has %s entries' % (len(rootfs_contents)))
for bundle in imagebundles:
for suffix in suffixes:
rootfs_contents.update(manifest_to_file_list(parts[0] + ('bundle-%s-%s' % (pn_base, bundle)) + parts[1] + '/swupd' + suffix))
mega_archive = d.getVar('MEGA_IMAGE_ARCHIVE', True)
bb.debug(2, 'Re-copying rootfs contents from mega image %s to %s' % (mega_archive, rootfs))
copyxattrfiles(d, rootfs_contents, mega_archive, rootfs)
deploy_dir = d.getVar('IMGDEPLOYDIR', True)
link_name = d.getVar('IMAGE_LINK_NAME', True)
# Create .rootfs.manifest for bundle images as the union of all
# contained bundles. Otherwise the image wouldn't have that file,
# which breaks certain image types ("toflash" in the Edison BSP)
# and utility classes (like isafw.bbclass).
if imageext:
packages = set()
manifest = d.getVar('IMAGE_MANIFEST', True)
for bundle in imagebundles:
bundlemanifest = manifest.replace(pn, 'bundle-%s-%s' % (pn_base, bundle))
if not os.path.exists(bundlemanifest):
bundlemanifest = deploy_dir + '/' + link_name + '.manifest'
bundlemanifest = bundlemanifest.replace(pn, 'bundle-%s-%s' % (pn_base, bundle))
with open(bundlemanifest) as f:
packages.update(f.readlines())
with open(manifest, 'w') as f:
f.writelines(sorted(packages))
# Also write a manifest symlink
if os.path.exists(manifest):
manifest_link = deploy_dir + '/' + link_name + '.manifest'
if os.path.lexists(manifest_link):
if d.getVar('RM_OLD_IMAGE', True) == "1" and \
os.path.exists(os.path.realpath(manifest_link)):
os.remove(os.path.realpath(manifest_link))
os.remove(manifest_link)
bb.debug(3, 'Linking composed rootfs manifest from %s to %s' % (manifest, manifest_link))
os.symlink(os.path.basename(manifest), manifest_link)
|
# -*- coding: utf-8 -*-
'''
Microsoft Updates (KB) Management
This module provides the ability to enforce KB installations
from files (.msu), without WSUS.
.. versionadded:: Neon
'''
# Import python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import salt libs
import salt.utils.platform
import salt.exceptions
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'win_wusa'
def __virtual__():
'''
Load only on Windows
'''
if not salt.utils.platform.is_windows():
return False, 'Only available on Windows systems'
return __virtualname__
def installed(name, source):
'''
Enforce the installed state of a KB
name
Name of the Windows KB ("KB123456")
source
Source of .msu file corresponding to the KB
'''
ret = {
'name': name,
'changes': {},
'result': False,
'comment': '',
}
# Start with basic error-checking. Do all the passed parameters make sense
# and agree with each-other?
if not name or not source:
raise salt.exceptions.SaltInvocationError(
'Arguments "name" and "source" are mandatory.')
# Check the current state of the system. Does anything need to change?
current_state = __salt__['win_wusa.is_installed'](name)
if current_state:
ret['result'] = True
ret['comment'] = 'KB already installed'
return ret
# The state of the system does need to be changed. Check if we're running
# in ``test=true`` mode.
if __opts__['test'] is True:
ret['comment'] = 'The KB "{0}" will be installed.'.format(name)
ret['changes'] = {
'old': current_state,
'new': True,
}
# Return ``None`` when running with ``test=true``.
ret['result'] = None
return ret
try:
result = __states__['file.cached'](source,
skip_verify=True,
saltenv=__env__)
except Exception as exc:
msg = 'Failed to cache {0}: {1}'.format(
salt.utils.url.redact_http_basic_auth(source),
exc.__str__())
log.exception(msg)
ret['comment'] = msg
return ret
if result['result']:
# Get the path of the file in the minion cache
cached = __salt__['cp.is_cached'](source, saltenv=__env__)
else:
log.debug(
'failed to download %s',
salt.utils.url.redact_http_basic_auth(source)
)
return result
# Finally, make the actual change and return the result.
new_state = __salt__['win_wusa.install'](cached)
ret['comment'] = 'The KB "{0}" was installed!'.format(name)
ret['changes'] = {
'old': current_state,
'new': new_state,
}
ret['result'] = True
return ret
|
import json
import sys
def load_dict(fname):
with open(fname, 'r') as fp:
data = json.load(fp)
return data
def dump_dict(fname, data):
with open(fname, 'w') as fp:
json.dump(data, fp)
def break_data(data, fname, bucket_size=5000):
num_buckets = len(data) / bucket_size
if num_buckets > int(num_buckets):
num_buckets = int(num_buckets) + 1
else:
num_buckets = int(num_buckets)
for i in range(num_buckets):
print("Bucket: ", i)
data_bucket = data[i * bucket_size:min((i + 1) * bucket_size, len(data))]
dump_dict(fname[:-5] + str(i) + ".json", data_bucket)
if __name__ == "__main__":
data = load_dict(sys.argv[1])
break_data(data, sys.argv[1])
|
#!/usr/bin/env python
'''
Use Netmiko to enter into configuration mode on pynet-rtr2. Also use
Netmiko to verify your state (i.e. that you are currently in configuration mode).
'''
from netmiko import ConnectHandler
from getpass import getpass
from routers import pynet_rtr2
def main():
'''
Use Netmiko to enter into configuration mode on pynet-rtr2. Also use
Netmiko to verify your state (i.e. that you are currently in configuration mode).
'''
ip_address = raw_input("Please enter IP: ")
password = getpass()
pynet_rtr2['ip'] = ip_address
pynet_rtr2['password'] = password
ssh_conn = ConnectHandler(**pynet_rtr2)
ssh_conn.config_mode()
if ssh_conn.check_config_mode():
print "In config mode"
if __name__ == '__main__':
main()
|
#
# @lc app=leetcode id=15 lang=python3
#
# [15] 3Sum
#
# Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
# Note:
# The solution set must not contain duplicate triplets.
# Example:
# Given array nums = [-1, 0, 1, 2, -1, -4],
# A solution set is:
# [
# [-1, 0, 1],
# [-1, -1, 2]
# ]
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
# Accepted
# 313/313 cases passed (848 ms)
# Your runtime beats 84.65 % of python3 submissions
# Your memory usage beats 23.46 % of python3 submissions (17.1 MB) nums.sort()
l = len(nums)
if l == 3 and sum(nums) == 0:
return [nums]
res = []
for i in range(l):
last = l-1
j = i+1
if i != 0 and nums[i] == nums[i-1]:
continue
while j < last:
s = nums[i]+nums[j]+nums[last]
if s < 0:
j += 1
elif s > 0:
last -= 1
else:
res += [[nums[i], nums[j], nums[last]]]
j += 1
last -= 1
while j != (i+1) and nums[j] == nums[j-1] and j < last:
j += 1
while last != (l-1) and nums[last] == nums[last+1] and last > j:
last -= 1
return res
|
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, next, random):
self.val = val
self.next = next
self.random = random
"""
# Approach 3: Iterative with O(1)O(1) Space . Source: https://tinyurl.com/tnwofvs
class Solution(object):
def copyRandomList(self, head):
"""
:type head: Node
:rtype: Node
"""
if not head:
return head
weavedListPtr = head
while weavedListPtr:
newNode = Node(weavedListPtr.val, None, None)
newNode.next = weavedListPtr.next
weavedListPtr.next = newNode
weavedListPtr = newNode.next
weavedListPtr = head
while weavedListPtr:
weavedListPtr.next.random = weavedListPtr.random.next if weavedListPtr.random else None
weavedListPtr = weavedListPtr.next.next
ptrOldList = head
ptrNewList = head.next
headNew = ptrNewList
while ptrOldList:
ptrOldList.next = ptrOldList.next.next
ptrNewList.next = ptrNewList.next.next if ptrNewList.next else None
ptrOldList = ptrOldList.next
ptrNewList = ptrNewList.next
return headNew
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.