content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import numpy
def twopointsthesame(pt1, pt2, tolerance):
v = numpy.array(pt2) - numpy.array(pt1)
l = v.dot(v)**.5
return l < tolerance
def rounded_equal(pt1,pt2,decimal_places):
a = numpy.array(pt1).round(decimal_places).tolist()
b = numpy.array(pt2).round(decimal_places).tolist()
return a==b
def identical(pt1,pt2):
return all(numpy.array(pt1) == numpy.array(pt2))
def pointinpoints(pt1, pts, tolerance):
tests = [twopointsthesame(pt1, pt2, tolerance) for pt2 in pts]
return True in tests
def point_on_line(point, line, tolerance):
point = numpy.array(point)
p1 = numpy.array(line[0])
p2 = numpy.array(line[1])
v = p2 - p1
lv = v.dot(v)
v2 = point - p1
lv2 = v2.dot(v2)
vpoint = v.dot(v2)**2 - lv * lv2
vpoint = abs(vpoint)**(.5)
return abs(vpoint) < abs(tolerance)
def colinear(line1, line2, tolerance):
a = point_on_line(line2[0], line1, tolerance)
b = point_on_line(line2[1], line1, tolerance)
return a and b
def point_within_line(point, line, tolerance):
point = numpy.array(point)
p1 = numpy.array(line[0])
p2 = numpy.array(line[1])
v = p2 - p1
v2 = point - p1
lv = v.dot(v)**.5
lv2 = v2.dot(v2)**.5
v_dot_v2 = v.dot(v2)
same_orientation = v_dot_v2 > 0
within = lv2 < lv
same_direction = abs(abs(v_dot_v2) - (lv * lv2)) < tolerance
return same_direction and same_orientation and within
def order_vertices(vertices, segment_seed, tolerance):
vertices = list(set(vertices))
ordering = list(segment_seed)
while vertices:
c = vertices.pop()
a = ordering[0]
b = ordering[-1]
if point_within_line(a, [c, b], tolerance):
ordering.insert(0, c)
elif point_within_line(b, [a, c], tolerance):
ordering.append(c)
else:
for ii, b in enumerate(ordering[1:]):
if point_within_line(c, [a, b], tolerance):
ordering.insert(ii + 1, c)
break
return ordering
def segment_midpoints(segments):
segments = numpy.array(segments)
a = segments.sum(1) / 2
return a.tolist()
def distance_of_lines(lines, point=[0, 0]):
point = numpy.array(point)
lines2 = numpy.array(lines)
v1 = lines2[:, 1, :] - lines2[:, 0, :]
l1 = (v1**2).sum(1)**.5
v2 = lines2[:, 0, :] - point
v3 = numpy.cross(v1, v2)
l3 = v3 / l1
return abs(l3)
def shared_edge(line1, line2, tolerance):
if colinear(line1, line2, tolerance):
a = point_within_line(line2[0], line1, tolerance)
b = point_within_line(line2[1], line1, tolerance)
return a or b
return False
def inner_segment(line1, line2, tolerance):
points = line1
if point_within_line(line2[0], line1, tolerance):
if point_within_line(line1[0], (line2[0], line1[1]), tolerance):
points = (line1[0], line2[0])
elif point_within_line(line1[1], (line2[0], line1[0]), tolerance):
points = (line1[1], line2[0])
else:
points = line2
if point_within_line(line2[1], line1, tolerance):
if point_within_line(line1[0], (line2[1], line1[1]), tolerance):
points = (line1[0], line2[1])
elif point_within_line(line1[1], (line2[1], line1[0]), tolerance):
points = (line1[1], line2[1])
else:
points = line2
return points
def calctransformfrom2lines(pointset1, pointset2, scale_x=None, scale_y=None):
import math
pointset1 = numpy.array(pointset1)
pointset2 = numpy.array(pointset2)
v1 = pointset1[1] - pointset1[0]
v2 = pointset2[1] - pointset2[0]
q1 = math.atan2(v1[1], v1[0])
q2 = math.atan2(v2[1], v2[0])
l1 = v1.dot(v1)**.5
l2 = v2.dot(v2)**.5
scale_internal = l2 / l1
if scale_x is None:
scale_x = scale_internal
if scale_y is None:
scale_y = scale_internal
T0 = numpy.eye(3)
T0[0:2, 2] = -pointset1[0]
T1 = numpy.eye(3)
T1[0, 0] = math.cos(-q1)
T1[0, 1] = -math.sin(-q1)
T1[1, 1] = math.cos(-q1)
T1[1, 0] = math.sin(-q1)
T2 = numpy.eye(3)
T2[0, 0] = scale_x
T2[1, 1] = scale_y
T3 = numpy.eye(3)
T3[0, 0] = math.cos(q2)
T3[0, 1] = -math.sin(q2)
T3[1, 1] = math.cos(q2)
T3[1, 0] = math.sin(q2)
T3[0:2, 2] = pointset2[0]
T = T3.dot(T2.dot(T1.dot(T0)))
return T[0, 0], T[0, 1], T[1, 0], T[1, 1], T[0, 2], T[1, 2]
def angle_between_lines(pointset1, pointset2, scale_x=None, scale_y=None):
import math
pointset1 = numpy.array(pointset1)
pointset2 = numpy.array(pointset2)
v1 = pointset1[1] - pointset1[0]
v2 = pointset2[1] - pointset2[0]
l1 = (v1.dot(v1))**.5
l2 = (v2.dot(v2))**.5
cq = v1.dot(v2) / (l1 * l2)
q1 = math.acos(cq)
return q1
def convert_to_3d(listin):
a = numpy.array(listin)
c = a.T[0] * 0
d = numpy.concatenate((a.T, [c]), 0)
a = d.T
return a.tolist()
if __name__ == '__main__':
pass |
""" Cut root
Graph type: Barabasi-Albert
MaxCut formulation: McCormic
Baseline: SCIP with defaults
Each graph is solved using different scip_seed,
and SCIP statistics are collected.
All results are written to experiment_results.pkl file
and should be post-processed using experiments/analyze_experiment.py
utils/analyze_experiment.py can generate tensorboard hparams,
and a csv file summarizing the statistics in a table (useful for latex).
In this experiment cutting planes are added only at the root node,
and the dualbound, lp_iterations and other statistics are collected.
The metric optimized is the dualbound integral w.r.t the number of lp iterations at each round.
"""
from ray import tune
from utils.scip_models import maxcut_mccormic_model, MccormickCycleSeparator
from utils.misc import get_separator_cuts_applied
from utils.samplers import SepaSampler
import pickle
import os
def generate_examples_from_graph(config):
# load config if experiment launched from complete_experiment.py
if 'complete_experiment' in config.keys():
config = config['complete_experiment']
# set the current sweep trial parameters
sweep_config = config['sweep_config']
for k, v in sweep_config['constants'].items():
config[k] = v
# read graph
graph_idx = config['graph_idx']
filepath = os.path.join(config['data_abspath'], "graph_idx_{}.pkl".format(graph_idx))
with open(filepath, 'rb') as f:
G = pickle.load(f)
scip_seed = config['scip_seed']
model, x, y = maxcut_mccormic_model(G, use_general_cuts=False)
sepa = MccormickCycleSeparator(G=G, x=x, y=y, name='MLCycles', hparams=config)
model.includeSepa(sepa, 'MLCycles',
"Generate cycle inequalities for the MaxCut McCormic formulation",
priority=1000000, freq=1)
sampler = SepaSampler(G=G, x=x, y=y, name='g{}-samples'.format(graph_idx), hparams=config)
# sampler = Sampler(G=G, x=x, y=y, name='g{}-samples'.format(graph_idx), hparams=config)
model.includeSepa(sampler, 'g{}-samples1'.format(graph_idx),
"Store and save scip cut selection algorithm decisions",
priority=1, freq=1)
#set scip params:
model.setRealParam('separating/objparalfac', config['objparalfac'])
model.setRealParam('separating/dircutoffdistfac', config['dircutoffdistfac'])
model.setRealParam('separating/efficacyfac', config['efficacyfac'])
model.setRealParam('separating/intsupportfac', config['intsupportfac'])
model.setIntParam('separating/maxrounds', config['maxrounds'])
model.setIntParam('separating/maxroundsroot', config['maxroundsroot'])
model.setIntParam('separating/maxcuts', config['maxcuts'])
model.setIntParam('separating/maxcutsroot', config['maxcutsroot'])
# set up randomization
model.setBoolParam('randomization/permutevars', True)
model.setIntParam('randomization/permutationseed', scip_seed)
model.setIntParam('randomization/randomseedshift', scip_seed)
# set time limit
model.setRealParam('limits/time', config['time_limit_sec'])
# set termination condition - exit after root node finishes
model.setLongintParam('limits/nodes', 1)
model.setIntParam('separating/maxstallroundsroot', -1) # add cuts forever.
# run optimizer
model.optimize()
# save the episode state-action pairs to a file
sampler.save_data()
print('expeiment finished')
def submit_job(jobname, taskid, time_limit_minutes):
# CREATE SBATCH FILE
job_file = os.path.join(args.log_dir, jobname + '.sh')
with open(job_file, 'w') as fh:
fh.writelines("#!/bin/bash\n")
fh.writelines('#SBATCH --time=00:{}:00\n'.format(time_limit_minutes))
fh.writelines('#SBATCH --account=def-alodi\n')
fh.writelines('#SBATCH --output={}/{}.out\n'.format(args.log_dir,jobname))
fh.writelines('#SBATCH --mem=0\n')
fh.writelines('#SBATCH --mail-user=avrech@campus.technion.ac.il\n')
fh.writelines('#SBATCH --mail-type=END\n')
fh.writelines('#SBATCH --mail-type=FAIL\n')
fh.writelines('#SBATCH --nodes=1\n')
fh.writelines('#SBATCH --job-name={}\n'.format(jobname))
fh.writelines('#SBATCH --ntasks-per-node=1\n')
fh.writelines('#SBATCH --cpus-per-task={}\n'.format(args.cpus_per_task))
fh.writelines('module load python\n')
fh.writelines('source $HOME/server_bashrc\n')
fh.writelines('source $HOME/venv/bin/activate\n')
fh.writelines('python adaptive_policy_runner.py --experiment {} --log_dir {} --config_file {} --data_dir {} --taskid {} {} --product_keys {}\n'.format(
args.experiment,
args.log_dir,
args.config_file,
args.data_dir,
taskid,
'--auto' if args.auto else '',
' '.join(args.product_keys)
))
os.system("sbatch {}".format(job_file))
if __name__ == '__main__':
import argparse
import yaml
from experiments.imitation.data_generator import generate_data
parser = argparse.ArgumentParser()
parser.add_argument('--datadir', type=str, default='data',
help='path to generate/read data')
parser.add_argument('--graphidx', type=str, default='data',
help='path to generate/read data')
parser.add_argument('--configfile', type=str, default='experiment_config.yaml',
help='path to generate/read data')
parser.add_argument('--ntasks-per-node', type=int, default=40,
help='Graham - 32, Niagara - 40')
parser.add_argument('--graphs', nargs='+', default=[0],
help='list of hparam keys on which to product')
args = parser.parse_args()
with open('experiment_config.yaml') as f:
sweep_config = yaml.load(f, Loader=yaml.FullLoader)
config = sweep_config['constants']
for k, v in sweep_config['sweep'].items():
if k == 'graph_idx':
config[k] = args.graphidx
else:
config[k] = v['values'][0]
data_abspath = generate_data(sweep_config, 'data', solve_maxcut=True, time_limit=600)
config['sweep_config'] = sweep_config
config['data_abspath'] = data_abspath
generate_examples_from_graph(config)
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is SearchSpace for blocks."""
from vega.search_space.fine_grained_space import FineGrainedSpace
from vega.search_space.fine_grained_space.conditions import Sequential
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.search_space.fine_grained_space.operators import conv_bn_relu6
from vega.search_space.fine_grained_space.blocks.sr import InvertedResidual
@ClassFactory.register(ClassType.SEARCH_SPACE)
class MobileNetV3Tiny(FineGrainedSpace):
"""Create MobileNetV3Tiny SearchSpace."""
inverted_residual_setting = [
[1.0, 9, 1],
[4.0, 14, 2],
[3.0, 14, 1],
[3.0, 24, 2],
[3.0, 24, 1],
[3.0, 24, 1],
[6.0, 48, 2],
[2.5, 48, 1],
[2.3, 48, 1],
[2.3, 48, 1],
[6.0, 67, 1],
[6.0, 67, 1],
[6.0, 96, 2],
[6.0, 96, 1],
[6.0, 96, 1],
[6.0, 96, 1]]
def constructor(self, load_path=None):
"""Construct MobileNetV3Tiny class.
:param load_path: path for saved model
"""
input_channel = 9
features = [conv_bn_relu6(inchannel=3, outchannel=input_channel, kernel=3, stride=2)]
for _, lst in enumerate(self.inverted_residual_setting):
output_channel = lst[1]
features.append(InvertedResidual(inp=input_channel, oup=output_channel, stride=lst[2], expand_ratio=lst[0]))
input_channel = output_channel
self.block = Sequential(*tuple(features), out_list=[3, 6, 12, 16])
|
#!/usr/bin/env python
from sensor_msgs.msg import Joy
from std_msgs.msg import Empty
from std_msgs.msg import Int8MultiArray
import rospy
DRIVE_PUB_INDEX = 0
YAW_PUB_INDEX = 1
KILL_INDEX_R1 = 5
DRIVE_INDEX = 1
YAW_INDEX = 3
class DualShock():
def __init__(self):
rospy.Subscriber('joy', Joy, self.callback)
self.kill_pub = rospy.Publisher('kill_motors', Empty, queue_size=1)
self.setpoint_pub = rospy.Publisher('move_setpoints', Int8MultiArray, queue_size=5)
self.motor_control = True
def callback(self, msg):
buttons = msg.buttons
if buttons[KILL_INDEX_R1]:
rospy.logwarn("Killed")
self.kill_pub.publish(Empty())
elif self.motor_control:
raw_drive = msg.axes[DRIVE_INDEX]
drive = self.transform_drive(raw_drive)
raw_yaw = msg.axes[YAW_INDEX]
yaw = self.transform_yaw(raw_yaw)
data = [];
data.append(drive)
data.append(yaw)
msg = Int8MultiArray(); msg.data = data
self.setpoint_pub.publish(msg)
if drive:
rospy.loginfo("drive: " + str(drive))
if yaw:
rospy.loginfo("yaw: " + str(yaw))
def transform_yaw(self, raw_yaw):
"""
Pololu driver expects yaw -100, 100
"""
return -int(raw_yaw * 100)
def transform_drive(self, raw_drive):
"""
Pololu driver expects drive in range 0-100
"""
return int(raw_drive * 100)
# if raw_drive < 0:
# return 0
# else:
# return int(raw_drive * 100)
def main():
rospy.init_node('dual_shock')
d = DualShock()
rospy.spin()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Python imports
import base64
from decimal import Decimal, InvalidOperation
import functools
from http.client import UNAUTHORIZED
import logging
import random
import re
import string
# Django imports
from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.auth.views import redirect_to_login
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.db.models.fields import FieldDoesNotExist
from django.http import HttpResponseBadRequest, HttpResponse
from django.template.defaultfilters import yesno, capfirst
from django.utils.functional import lazy
from django.utils.translation import get_language, ugettext as _
from django.utils.timezone import now
from django.views.decorators.debug import sensitive_variables
# 3rd party imports
from braces.views import PermissionRequiredMixin, MultiplePermissionsRequiredMixin
from pytz import utc
# This project's imports
from .constants import ARABIC_COMMA
BAD_REQUEST = HttpResponseBadRequest.status_code
logger = logging.getLogger(__name__)
def ensure_unique(model, instance, fieldname, **kwargs):
# Ensure there are no other instances with the same value of
# fieldname among undeleted records of this model
#
# :param kwargs: additional query params when checking for dupes
if not instance.deleted:
query_parms = {
'deleted': False,
fieldname: getattr(instance, fieldname),
}
query_parms.update(**kwargs)
others = model.objects.filter(**query_parms)
if instance.pk:
others = others.exclude(pk=instance.pk)
if others.exists():
verbose_name = model._meta.get_field(fieldname).verbose_name
msg = _("Duplicate value for {fieldname}").format(fieldname=verbose_name)
raise ValidationError(msg)
def get_permission_object_by_name(name,
permission_class=None,
contenttype_class=None,
create_if_needed=False):
"""Given a Django permission name like `app_label.change_thing`,
return its Permission object.
You can pass in the Permission class when using this from a migration.
Pass in create_if_needed=True to have the permission created if it doesn't exist.
"""
# I hate Django permissions
app_label, codename = name.split(".", 1)
if not permission_class:
permission_class = Permission
# Is that enough to be unique? Hope so
try:
return permission_class.objects.get(content_type__app_label=app_label,
codename=codename)
except permission_class.DoesNotExist:
if create_if_needed:
if not contenttype_class:
contenttype_class = ContentType
perm_name, model_name = codename.split("_", 1)
ct, unused = contenttype_class.objects.get_or_create(app_label=app_label,
model=model_name)
# Come up with a permission name. E.g. if code name is 'add_user',
# the full name might be 'Can add user'
full_name = "Can %s %s" % (perm_name, model_name)
return permission_class.objects.create(content_type=ct, codename=codename,
name=full_name)
print("NO SUCH PERMISSION: %s, %s" % (app_label, codename))
raise
def permission_names_to_objects(names):
"""
Given an iterable of permission names (e.g. 'app_label.add_model'),
return an iterable of Permission objects for them.
"""
return [get_permission_object_by_name(name) for name in names]
def astz(dt, tz):
"""
Given a datetime object and a timezone object, return a new
datetime object that represents the same moment, but written
in terms of the new timezone.
:param dt: a datetime object
:param tz: a timezone object
:return: a datetime object
"""
# See http://pythonhosted.org/pytz/ for why this is
# not as trivial as it might first appear.
return tz.normalize(dt.astimezone(tz))
def max_non_none_datetime(*values):
"""
Given some datetime objects, and ignoring any None values,
return the datetime object that occurs latest in
absolute time, or None.
"""
# Start by annotating the non-none values with the
# corresponding UTC times
annotated_list = [(astz(value, utc), value) for value in values if value is not None]
if not annotated_list:
return None
# Now find the max tuple, which will be the one with the max UTC. The
# second item in the tuple will be the corresponding original object.
unused, max_object = max(annotated_list)
return max_object
def min_non_none_datetime(*values):
"""
Given some datetime objects, and ignoring any None values,
return the datetime object that occurs earliest in
absolute time, or None.
"""
# Start by annotating the non-none values with the
# corresponding UTC times
annotated_list = [(astz(value, utc), value) for value in values if value is not None]
if not annotated_list:
return None
# Now find the min tuple, which will be the one with the min UTC. The
# second item in the tuple will be the corresponding original object.
unused, min_object = min(annotated_list)
return min_object
class FormErrorReturns400Mixin(object):
def form_invalid(self, form):
# If the form is not valid, return the usual page but with a 400 status
return self.render_to_response(self.get_context_data(form=form), status=BAD_REQUEST)
NUM_LATLONG_DECIMAL_PLACES = 8
LATLONG_QUANTIZE_PLACES = Decimal(10) ** -NUM_LATLONG_DECIMAL_PLACES
MAX_LATLONG = Decimal('180.0')
def parse_latlong(value):
"""
Given a string with a decimal value, or a float,
parse to a Decimal and truncate to the number of places
we're keeping for lat/long values. Return the result.
"""
val = Decimal(value).quantize(LATLONG_QUANTIZE_PLACES)
if val > MAX_LATLONG:
raise InvalidOperation("Lat or long too large")
return val
def cleanup_lat_or_long(latlng):
"""
Given character string that is supposed to contain a latitude or longitude,
return either a valid Decimal value, or None.
Note: This assumes E/N and does not handle anything west of Greenwich or
south of the equator! If the input has a - or W or S in it, it'll probably
just fail to recognize it as a valid coordinate and return None.
"""
# Strip whitespace and degree signs
s = latlng.strip().rstrip('E\xb0')
# If nothing left, we have no data.
if len(s) == 0:
return None
d = None
if d is None:
# See if it's a simple decimal value
if '.' in s:
try:
d = parse_latlong(s)
except InvalidOperation:
pass
if d is None:
# 290250
# 204650
# Assume DDMMSS
m = re.match(r'^(\d\d)(\d\d)(\d\d)$', s)
if m:
val = float(m.group(1)) + float(m.group(2)) / 60.0 + float(m.group(3)) / 3600.0
d = parse_latlong(val)
if d is None:
# 12°37'49.30"
# 20° 6'9.54"E
# 20°29'33.84"E
# 10ْ .05 30 63
# 12° 2'54.62"
# 12°37'7.00"
# Assume the format is: degrees minutes seconds.milliseconds
m = re.match(r'^(\d\d?)\D+(\d\d?)\D+(\d\d?)\D+(\d\d?)$', s)
if m:
parts = m.groups()
val = (float(parts[0])
+ float(parts[1]) / 60.0
+ float('%s.%s' % (parts[2], parts[3])) / 3600.0)
d = parse_latlong(val)
if d is None:
# Pick out the groups of digits
parts = _extract_numerals(s)
if len(parts) == 4:
# 12°37'49.30"
# 20° 6'9.54"E
# 20°29'33.84"E
# 10ْ .05 30 63
# 12° 2'54.62"
# 12°37'7.00"
# Assume the format is: degrees minutes seconds.fractionalseconds
val = (float(parts[0])
+ float(parts[1]) / 60.0
+ float('%s.%s' % (parts[2], parts[3])) / 3600.0)
d = parse_latlong(val)
elif len(parts) == 3:
# 12ْ 14 23
# 14ْ 25 816
# 57 " .579" .12
# 32ْ 453 700
# Hmm - assume degrees minutes seconds?
if float(parts[1]) > 60.0 or float(parts[2]) > 60.0:
# Just makes no sense - ignore it
return None
val = (float(parts[0])
+ float(parts[1]) / 60.0
+ float(parts[2]) / 3600.0)
d = parse_latlong(val)
elif len(parts) == 2:
# 12° 2
d = parse_latlong(float(parts[0]) + float(parts[1]) / 60.0)
if d is None:
return None
if d > Decimal('180.0'):
return None
return d
NONDIGITS_RE = re.compile(r'[^\d]', flags=re.UNICODE)
@sensitive_variables()
def clean_input_msg(msg_text):
"""Process a user's message, finding all number-strings, translating
them to American strings, and then joining them with an asterisk. We do not
validate them. That will be done by the handlers.
"""
return '*'.join(str(int(num)) for num in _extract_numerals(msg_text))
def _extract_numerals(msg_text):
"""Return a list of all strings of numerals. Works on american and
eastern arabic numerals (Python FTW!)
"""
# split string using any non-digits as a delimiter, then drop the empty strings
number_list = [n for n in NONDIGITS_RE.split(msg_text) if n]
return number_list
def get_now():
# make sure this is timezone-aware
return now()
class LoginPermissionRequiredMixin(PermissionRequiredMixin):
"""Combines LoginRequiredMixin and PermissionRequiredMixin, according to our rules.
When an unauthenticated user visits a page that requires login, s/he gets redirected to the
login page.
When an authenticated user lacks the permission for a page, s/he gets a 403.
In contrast to the LoginRequiredMixin and PermissionRequiredMixin, the subclass need not
set the raise_exception attribute. (It's ignored.)
"""
def dispatch(self, request, *args, **kwargs):
# User has to be logged in
if not request.user.is_authenticated:
return redirect_to_login(request.get_full_path(),
self.get_login_url(),
self.get_redirect_field_name())
# Force raise_exception to be True when invoking PermissionRequiredMixin.
self.raise_exception = True
return super(LoginPermissionRequiredMixin, self).dispatch(request, *args, **kwargs)
class LoginMultiplePermissionsRequiredMixin(MultiplePermissionsRequiredMixin):
"""Combines LoginRequiredMixin and MultiplePermissionsRequiredMixin, according to our rules.
When an unauthenticated user visits a page that requires login, s/he gets redirected to the
login page.
When an authenticated user lacks the permission for a page, s/he gets a 403.
In contrast to the LoginRequiredMixin and PermissionRequiredMixin, the subclass need not
set the raise_exception attribute. (It's ignored.)
Also provides the non-standard pre_dispatch_check which runs before invoking
the parent dispatch method, and if it returns a response, will
return that instead of calling parent dispatch.
"""
def pre_dispatch_check(self, request, *args, **kwargs):
return None
def dispatch(self, request, *args, **kwargs):
# User has to be logged in
if not request.user.is_authenticated:
return redirect_to_login(request.get_full_path(),
self.get_login_url(),
self.get_redirect_field_name())
# Now we know they're logged in.
response = self.pre_dispatch_check(request, *args, **kwargs)
if response:
return response
# Force raise_exception to be True when invoking LoginMultiplePermissionsRequiredMixin.
self.raise_exception = True
return super(LoginMultiplePermissionsRequiredMixin, self).dispatch(request, *args, **kwargs)
def get_db_connection_tz(cursor):
""" Return time zone of the Django database connection with which
the specified cursor is associated.
"""
cursor.execute("SHOW timezone;")
return cursor.fetchall()[0][0] # e.g., [(u'UTC',)][0][0]
class ConnectionInTZ(object):
""" This context manager manipulates the time zone of the Django database
connection with which the specified cursor is associated, ensuring that
the original time zone is restored at the end of the context.
Example use:
with ConnectionInTZ(cursor, 'Libya'):
cursor.execute(something)
cursor.fetchall()
Django throws an exception when handling some SQL time-related constructs
in local time; DATE_TRUNC('day', <field>) can't be used, for example.
"""
def __init__(self, cursor, desired_tz):
self.cursor = cursor
self.tz = desired_tz
def __enter__(self):
self.saved_tz = get_db_connection_tz(self.cursor)
self.cursor.execute("SET timezone=%s;", [self.tz])
def __exit__(self, exception_type, exception_value, exception_traceback):
self.cursor.execute("SET timezone=%s;", [self.saved_tz])
def refresh_model(obj):
"""
Given an instance of a model, fetch a fresh copy from the database and return it.
"""
return type(obj).objects.get(pk=obj.pk)
def random_string(length=255, extra_chars=''):
""" Generate a random string of characters.
:param length: Length of generated string.
:param extra_chars: Additional characters to include in generated
string.
"""
chars = string.ascii_letters + extra_chars
return ''.join([random.choice(chars) for i in range(length)])
def get_random_number_string(length=10, choices=string.digits, no_leading_zero=True):
first = random.choice(choices)
# if no_leading_zero, then force first number to be nonzero
while no_leading_zero and not int(first):
# keep picking until we get nonzero
first = random.choice(choices)
return first + ''.join(random.choice(choices) for __ in range(length - 1))
def shuffle_string(s):
"""Randomly shuffle a string and return result."""
list_of_chars = list(s)
random.shuffle(list_of_chars)
return ''.join(list_of_chars)
def strip_nondigits(string):
"""
Return a string containing only the digits of the input string.
"""
return ''.join([c for c in string if c.isdigit()])
def at_noon(dt):
"""
Given a datetime, return a new datetime at noon on the
same day.
"""
return dt.replace(hour=12, minute=0, second=0, microsecond=0)
def at_midnight(dt):
"""
Given a datetime, return a new datetime at midnight on the
same day.
"""
return dt.replace(hour=0, minute=0, second=0, microsecond=0)
def find_overlapping_records(start_time, end_time, queryset, start_field, end_field):
"""
A utility to determine if any records in a queryset overlap with the period
passed in as start_time -> end_time
Returns a queryset with the overlapping records.
:param start_time: the start of the period to look for overlaps of
:param end_time: the end of the period to look for overlaps of
:param queryset: the records to look for an overlap in
:param start_field: name of the field in the model in the queryset that has
the start time for the record
:param end_field: name of the field in the model in the queryset that has
the end time for the record
:return: a queryset with the overlapping records. (could be empty)
"""
# Check for overlaps. Here are the cases we need to catch -
#
# An existing overlapping period might:
# * start during this period,
# * or end during this period,
# * or completely encompass this period.
# (The case of an existing period that starts and ends inside
# the new period will be caught by both of the first two checks,
# so we don't need to test it separately.)
# Also recall that a period runs from exactly its start time,
# up to but not including its end time.
# First case: Check for an existing period starting during this period.
# That would mean:
# the record in the database's start_time >= this records start time
# and
# < this record's end time.
# (It's okay for it to start exactly at this record's end time,
# because then they don't overlap.)
#
# existing: S E
# v
# self: s e
# query_start_during = Q(start_time__gte=start_time, start_time__lt=end_time)
query_start_during = Q(
**{
'%s__gte' % start_field: start_time,
'%s__lt' % start_field: end_time,
}
)
# Second case: Check for an existing period that ends during this period.
# That would mean:
# the database's end_time > this record's start time
# and
# <= this record's end time
#
# existing: S E
# v
# self: s e
# query_end_during = Q(end_time__gt=start_time, end_time__lte=end_time)
query_end_during = Q(
**{
'%s__gt' % end_field: start_time,
'%s__lte' % end_field: end_time,
}
)
# Third case: Check for an existing period that completely encompasses
# this period. That would mean:
# the record in the database's start time is less than this record's start time,
# and
# its end time is greater than this records end time.
#
# existing: S E
# v v
# self: s e
# query_encompass = Q(start_time__lte=start_time, end_time__gt=end_time)
query_encompass = Q(
**{
'%s__lte' % start_field: start_time,
'%s__gt' % end_field: end_time,
}
)
# Any of the ways they can overlap:
query_overlaps = query_start_during | query_end_during | query_encompass
# Are there any?
return queryset.filter(query_overlaps)
def basic_auth_view(auth_db, realm_name):
""""
Given an auth dictionary and a realm name,
returns a new wrapper function that takes a
view function and returns a wrapped view function.
This allows decorating a view like so:
@basic_auth_view(MY_DB_DICT, "secret realm")
def my_view(request):
...
or
url(r'...', basic_auth_view(auth_db, realm_name)(ViewClass.as_view()), ...)
Uses `basic_auth`, see below.
"""
def basic_auth_view_inner_wrapper(f):
return basic_auth(f, auth_db, realm_name)
return basic_auth_view_inner_wrapper
def basic_auth(f, auth_db, realm_name):
"""
View function wrapper to apply http basic auth.
Usage:
def view(request, ...):
...
view = basic_auth(view, auth_db, realm_name)
or
url(r'...', basic_auth(ViewClass.as_view(), auth_db, realm_name), ...),
:param auth_db: Dictionary of user -> password for access to the view.
:param realm_name: name of the auth realm to return on auth errors.
Derived (via several steps) from
https://djangosnippets.org/snippets/243/
"""
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2 and auth[0].lower() == b'basic':
username, passwd = base64.b64decode(auth[1]).decode().split(':')
ok = username in auth_db and passwd == auth_db[username]
del passwd
if ok:
return f(request, *args, **kwargs)
logger.error('Bad user id/password %s/******** for view %s' %
(username, f.__name__))
if len(auth_db) == 0:
logger.error('User database for this view not set up')
else:
logger.error('Unrecognized auth %s for view %s' %
(auth, f.__name__))
response = HttpResponse(status=UNAUTHORIZED)
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm_name
return response
return wrapper
def migrate_permission(apps, schema_editor, perm1, perm2):
"""Gives perm2 to all users and groups that currently have perm1.
The permissions should be strings in the form "applabel.perm_model", e.g. "voting.read_ballot".
This is especially useful for migrations.
"""
User = apps.get_model(settings.AUTH_USER_MODEL)
Group = apps.get_model('auth', 'Group')
Permission = apps.get_model('auth', 'Permission')
ContentType = apps.get_model('contenttypes', 'ContentType')
permission_1 = get_permission_object_by_name(perm1, Permission, ContentType, True)
permission_2 = get_permission_object_by_name(perm2, Permission, ContentType, True)
for group in Group.objects.filter(permissions=permission_1):
group.permissions.add(permission_2)
for user in User.objects.filter(user_permissions=permission_1):
user.user_permissions.add(permission_2)
def get_comma_delimiter(include_a_space=True):
"""Return the comma delimiter appropriate for the current language (Arabic or English).
When include_a_space is True (the default), the delimiter includes a space to make the returned
value easy to use with join() when constructing a list.
"""
delimiter = ARABIC_COMMA if (get_language() == 'ar') else ','
if include_a_space:
delimiter += ' '
return delimiter
def get_verbose_name(an_object, field_name, init_cap=True):
"""Given a model or model instance, return the verbose_name of the model's field.
If init_cap is True (the default), the verbose_name will be returned with the first letter
capitalized which makes the verbose_name look nicer in labels.
If field_name doesn't refer to a model field, raises a FieldDoesNotExist error.
"""
# get_field() can raise FieldDoesNotExist which we propagate up to the caller.
try:
field = an_object._meta.get_field(field_name)
except TypeError:
# TypeError happens if the caller is very confused and passes an unhashable type such
# as {} or []. I convert that into a FieldDoesNotExist exception for simplicity.
raise FieldDoesNotExist("No field named {}".format(str(field_name)))
verbose_name = field.verbose_name
if init_cap:
verbose_name = lazy(capfirst, str)(verbose_name)
return verbose_name
def format_tristate(tristate):
"""Given a boolean or tristate, returns Yes, No, or Maybe as appropriate"""
return yesno(tristate).capitalize()
def migrate_view_to_read(app_label, model, apps, schema_editor):
"""Migrate view_MODEL to read_MODEL, then delete the view permission object"""
model = model.lower()
migrate_permission(apps, schema_editor,
# myapp.view_mymodel
app_label + ".view_" + model,
# myapp.read_mymodel
app_label + ".read_" + model)
# remove view_mymodel
Permission = apps.get_model('auth', 'Permission')
Permission.objects.filter(content_type__app_label=app_label,
# view_mymodel
codename='view_' + model).delete()
def migrate_read_to_view(app_label, model, apps, schema_editor):
"""Migrate read_MODEL to view_MODEL"""
model = model.lower()
migrate_permission(apps, schema_editor,
# myapp.read_mymodel
app_label + ".read_" + model,
# myapp.view_mymodel
app_label + ".view_" + model)
def add_browse_to_read(app_label, model, apps):
"""Add the new browse_MODEL perm to anyone who has read_MODEL"""
model = model.lower()
User = apps.get_model('auth', 'User')
Group = apps.get_model('auth', 'Group')
Permission = apps.get_model('auth', 'Permission')
ContentType = apps.get_model('contenttypes', 'ContentType')
# myapp.read_mymodel
read_name = app_label + '.read_' + model
# myapp.browse_mymodel
browse_name = app_label + '.browse_' + model
perm_read = get_permission_object_by_name(read_name, Permission, ContentType, True)
perm_browse = get_permission_object_by_name(browse_name, Permission, ContentType, True)
for group in Group.objects.filter(permissions=perm_read):
group.permissions.add(perm_browse)
for user in User.objects.filter(user_permissions=perm_read):
user.user_permissions.add(perm_browse)
def migrate_bread_permissions_forward(app_label, model_names, apps, schema_editor):
"""For each app/model combo specified, for each user or group that has view permission,
grant them read permission (creating it if necessary), then delete the view permission
object. Then add browse to each user or group that has read.
"""
for model in model_names:
migrate_view_to_read(app_label, model, apps, schema_editor)
add_browse_to_read(app_label, model, apps)
def migrate_bread_permissions_backward(app_label, model_names, apps, schema_editor):
"""For each app/model combo specified, give view permission to any user or group that
has read or browse permission, then remove the read and browse permission objects."""
for model in model_names:
read = '%s.read_%s' % (app_label, model)
view = '%s.view_%s' % (app_label, model)
browse = '%s.browse_%s' % (app_label, model)
migrate_permission(apps, schema_editor, read, view)
migrate_permission(apps, schema_editor, browse, view)
Permission.objects.filter(content_type__app_label=app_label,
# view_mymodel
codename='read_' + model).delete()
Permission.objects.filter(content_type__app_label=app_label,
# view_mymodel
codename='browse_' + model).delete()
def should_hide_public_view(request):
"""
Return True if settings.HIDE_PUBLIC_DASHBOARD is True and the user
is not authenticated or is not a member of staff.
Call this from views that should not be public when HIDE_PUBLIC_DASHBOARD
is True.
"""
return settings.HIDE_PUBLIC_DASHBOARD and not request.user.is_staff
def should_see_staff_view(user):
"""
Return True if:
- user has `is_staff` set, or
- user is a member of any Group.
This allows users to see the staff view but not see the public dashboard.
"""
return user.is_staff or user.groups.exists()
|
import os
import pickle
import datasets
from dataclasses import dataclass
from typing import Optional
from torch.utils.data import IterableDataset
from datasets import IterableDatasetDict
from datasets.splits import Split, SplitDict, SplitGenerator
logger = datasets.utils.logging.get_logger(__name__)
class PickledDatasetIterator:
def __init__(self, filepath):
self.file = open(filepath, 'rb')
self.keys = None
self._load_batch()
def _load_batch(self):
try:
self.batch = pickle.load(self.file)
except EOFError:
raise StopIteration
keys = sorted(list(self.batch.keys()))
if self.keys is None:
self.keys = keys # first batch
elif keys != self.keys:
raise ValueError('keys differ between batches')
self.batch_size = len(self.batch[keys[0]])
assert all(len(self.batch[k]) == self.batch_size for k in self.keys)
self.batch_offset = 0
def __next__(self):
if self.batch_offset >= self.batch_size:
self._load_batch()
d = {
k: self.batch[k][self.batch_offset].tolist()
for k in self.keys
}
# TODO add parameter controlling addition of labels
if 'labels' not in d:
d['labels'] = d['input_ids']
self.batch_offset += 1
return d
class PickledDataset(IterableDataset):
def __init__(self, filepath):
self.filepath = filepath
self._column_names = None
def __iter__(self):
return PickledDatasetIterator(self.filepath)
@property
def column_names(self):
if self._column_names is None:
example = next(iter(self))
self._column_names = list(example.keys())
return self._column_names
class PickledDatasetBuilder(datasets.DatasetBuilder):
def __init__(self, **kwargs):
super().__init__(**kwargs)
print(kwargs)
self.data_dir = kwargs['data_dir']
self.split_paths = {}
def _info(self):
return datasets.DatasetInfo(
description='pickled dataset'
)
def download_and_prepare(self, dl_manager=None, **kwargs):
# Mostly no-op: data assumed to be on disk and not cached.
if dl_manager is not None:
logger.warning('ignoring dl_manager')
split_generators = self._split_generators(self.data_dir)
split_dict = SplitDict(dataset_name=self.name)
for split_generator in split_generators:
split_dict.add(split_generator.split_info)
self._prepare_split(split_generator, **kwargs)
self.info.splits = split_dict
def as_dataset(self, split: Optional[Split]=None, **kwargs):
# By default, return all splits
if split is None:
splits = self.info.splits
else:
splits = [split]
datasets = {
s: self._as_dataset(s) for s in splits
}
return IterableDatasetDict(datasets)
def _as_dataset(self, split: Split):
filepath = self.split_paths[split]
return PickledDataset(filepath)
def _split_generators(self, data_dir):
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, "train.pickle"),
"split": "train",
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, "dev.pickle"),
"split": "dev",
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, "test.pickle"),
"split": "test"
}
)
]
def _prepare_split(self, split_generator: SplitGenerator, **kwargs):
# Mostly no-op: data assumed to be on disk and preprocessed.
# Simply store the mapping from split name to path.
filepath = split_generator.gen_kwargs['filepath']
self.split_paths[split_generator.name] = filepath
|
#!/usr/bin/env python
# Copyright 2016-2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example shows how to get partial and full properties for CPCs
and for LPARs of a CPC.
"""
import sys
import logging
import yaml
import requests.packages.urllib3
from datetime import datetime
import zhmcclient
requests.packages.urllib3.disable_warnings()
if len(sys.argv) != 2:
print("Usage: %s hmccreds.yaml" % sys.argv[0])
sys.exit(2)
hmccreds_file = sys.argv[1]
with open(hmccreds_file, 'r') as fp:
hmccreds = yaml.safe_load(fp)
examples = hmccreds.get("examples", None)
if examples is None:
print("examples not found in credentials file %s" % \
(hmccreds_file))
sys.exit(1)
get_partial_and_full_properties = examples.get("get_partial_and_full_properties", None)
if get_partial_and_full_properties is None:
print("get_partial_and_full_properties not found in credentials file %s" % \
(hmccreds_file))
sys.exit(1)
loglevel = get_partial_and_full_properties.get("loglevel", None)
if loglevel is not None:
level = getattr(logging, loglevel.upper(), None)
if level is None:
print("Invalid value for loglevel in credentials file %s: %s" % \
(hmccreds_file, loglevel))
sys.exit(1)
logging.basicConfig(level=level)
hmc = get_partial_and_full_properties["hmc"]
cpcname = get_partial_and_full_properties["cpcname"]
cred = hmccreds.get(hmc, None)
if cred is None:
print("Credentials for HMC %s not found in credentials file %s" % \
(hmc, hmccreds_file))
sys.exit(1)
userid = cred['userid']
password = cred['password']
print(__doc__)
print("Using HMC %s with userid %s ..." % (hmc, userid))
session = zhmcclient.Session(hmc, userid, password)
cl = zhmcclient.Client(session)
timestats = get_partial_and_full_properties.get("timestats", None)
if timestats:
session.time_stats_keeper.enable()
for full_properties in (False, True):
print("Listing CPCs with full_properties=%s ..." % full_properties)
start_dt = datetime.now()
cpcs = cl.cpcs.list(full_properties)
end_dt = datetime.now()
duration = end_dt - start_dt
print("Duration: %s" % duration)
for cpc in cpcs:
print("Number of properties of CPC %s: %s" %
(cpc.name, len(cpc.properties)))
print("Finding CPC by name=%s ..." % cpcname)
try:
cpc = cl.cpcs.find(name=cpcname)
except zhmcclient.NotFound:
print("Could not find CPC %s on HMC %s" % (cpcname, hmc))
sys.exit(1)
print("Found CPC %s at: %s" % (cpc.name, cpc.uri))
for full_properties in (False, True):
print("Listing LPARs on CPC %s with full_properties=%s ..." %
(cpc.name, full_properties))
start_dt = datetime.now()
lpars = cpc.lpars.list(full_properties)
end_dt = datetime.now()
duration = end_dt - start_dt
print("Duration: %s" % duration)
for lpar in lpars:
print("Number of properties of LPAR %s: %s" %
(lpar.name, len(lpar.properties)))
print("Logging off ...")
session.logoff()
if timestats:
print(session.time_stats_keeper)
print("Done.")
|
import paddle
def gelu_python(x):
return x * 0.5 * (1.0 + paddle.erf(x / paddle.sqrt(2.0)))
def gelu_new(x):
return 0.5 * x * (1.0 + paddle.tanh(
paddle.sqrt(2.0 / 3.141592653589793) *
(x + 0.044715 * paddle.pow(x, 3.0))))
def gelu_fast(x):
return 0.5 * x * (1.0 + paddle.tanh(x * 0.7978845608 *
(1.0 + 0.044715 * x * x)))
def quick_gelu(x):
return x * paddle.nn.functional.sigmoid(1.702 * x)
def linear_act(x):
return x
ACT2FN = {
"relu": paddle.nn.functional.relu,
"silu": paddle.nn.functional.silu,
"swish": paddle.nn.functional.silu,
"gelu": paddle.nn.functional.gelu,
"tanh": paddle.tanh,
"gelu_python": gelu_python,
"gelu_new": gelu_new,
"gelu_fast": gelu_fast,
"quick_gelu": quick_gelu,
"mish": paddle.nn.functional.mish,
"linear": linear_act,
"sigmoid": paddle.nn.functional.sigmoid,
}
def get_activation(activation_string):
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
raise KeyError(
f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}"
)
def apply_chunking_to_forward(forward_fn, chunk_size, chunk_dim,
*input_tensors):
assert len(
input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors"
if chunk_size > 0:
tensor_shape = input_tensors[0].shape[chunk_dim]
for input_tensor in input_tensors:
if input_tensor.shape[chunk_dim] != tensor_shape:
raise ValueError(
f"All input tenors have to be of the same shape: {tensor_shape}, "
f"found shape {input_tensor.shape[chunk_dim]}")
if input_tensors[0].shape[chunk_dim] % chunk_size != 0:
raise ValueError(
f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk "
f"size {chunk_size}")
num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
input_tensors_chunks = tuple(
input_tensor.chunk(
num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
output_chunks = tuple(
forward_fn(*input_tensors_chunk)
for input_tensors_chunk in zip(*input_tensors_chunks))
return paddle.concat(output_chunks, axis=chunk_dim)
return forward_fn(*input_tensors)
def get_extended_attention_mask(attention_mask, input_shape):
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def trans_matrix(matrix):
dim = matrix.ndim
trans_list = [i for i in range(dim - 2)] + [dim - 1, dim - 2]
return matrix.transpose(trans_list)
def update_metrics(logits, labels, metrics):
for metric in metrics:
metric.update(logits.argmax(axis=1), labels)
def get_f1_score(precision, recall):
p, r = precision.accumulate(), recall.accumulate()
return 2 * p * r / (p + r)
|
"""
Combine .csv files into a single .csv file with all fields
2013-Nov-25 by Robert Woodhead, trebor@animeigo.com
2016-Apr by merrikat
Usage:
python combine-csv.py {csv folder} {output file} [{optional count field name}]
Where:
{csv folder} is a folder containing .csv files.
{output file} is the destination file.
{optional count field name} if present, is added to the list of fields; only unique lines
are output, with a count of how many occurrences.
IMPORTANT: If NOT PRESENT, then an additional field with the source file name of the line is appended.
If you are outputting field counts, the source file name line is not emitted because the same line
could be present in multiple files.
Output:
Reads all the .csv files in the csv folder. Compiles a list of all the
header fields that are present. Rereads the files, and outputs a single
.csv containing all the records, shuffling fields to match the global
header fields. Combines contents of duplicate fields using a | delimiter.
Adds either a file name field or a count field (with definable name) to each line.
"""
import sys
import os
import glob
import csv
import copy
#
# Globals
#
error_count = 0 # number of errors encountered during processing
#
# Error reporting
#
def add_error(desc):
global error_count
error_count += 1
sys.stderr.write(desc + '\n')
def optional(caption,value):
if value == '':
return ''
else:
return caption + value + '\n'
def cleanup(str):
while str.rfind(' ') != -1:
str = str.replace(' ',' ')
return str
#
# process command line arguments and execute
#
if __name__ == '__main__':
if not (3 <= len(sys.argv) <= 4):
print 'usage: python combine-csv.py {thread folder} {output file} [{optional count field name}]'
sys.exit(1)
hdrList = []
hdrLen = []
doCount = (len(sys.argv) == 4)
counts = {}
# get the headers
for filename in glob.iglob(os.path.join(sys.argv[1],'*.csv')):
with open(filename,'rb') as f:
csvIn = csv.reader(f)
hdr = csvIn.next()
hdr[0] = hdr[0].replace('\xef\xbb\xbf','')
hdrList.append((len(hdr),hdr))
# construct the list of unique headers
hdrList.sort(reverse=True)
hdrs = []
template = []
for t in hdrList:
for f in t[1]:
if not (f in hdrs):
hdrs.append(f)
template.append('')
if doCount:
hdrs.append(sys.argv[3])
else:
hdrs.append('Source File')
# output the combined file
with open(sys.argv[2],'wb') as of:
csvOut = csv.writer(of)
csvOut.writerow(hdrs)
for filename in glob.iglob(os.path.join(sys.argv[1],'*.csv')):
with open(filename,'rb') as f:
csvIn = csv.reader(f)
hdr = csvIn.next()
hdr[0] = hdr[0].replace('\xef\xbb\xbf','')
for row in csvIn:
newRow = list(template)
for i,v in enumerate(row):
j = hdrs.index(hdr[i])
if newRow[j] == '':
newRow[j] = v
else:
newRow[j] = newRow[j] + '|' + v
if doCount:
newRow = tuple(newRow)
if newRow in counts:
counts[newRow] += 1
else:
counts[newRow] = 1
else:
newRow.append(os.path.splitext(os.path.basename(filename))[0].title())
csvOut.writerow(newRow)
# if doing counts, output newRow
print counts
for k,v in counts.iteritems():
k = list(k)
k.append(v)
csvOut.writerow(k)
|
'''
Created on Mar 7, 2011
@author: johnsalvatier
'''
from __future__ import division
import numpy as np
import scipy.linalg
import theano.tensor as tt
import theano
from theano.scalar import UnaryScalarOp, upgrade_to_float
from .special import gammaln
from pymc3.theanof import floatX
from six.moves import xrange
from functools import partial
f = floatX
c = - .5 * np.log(2. * np.pi)
def bound(logp, *conditions, **kwargs):
"""
Bounds a log probability density with several conditions.
Parameters
----------
logp : float
*conditions : booleans
broadcast_conditions : bool (optional, default=True)
If True, broadcasts logp to match the largest shape of the conditions.
This is used e.g. in DiscreteUniform where logp is a scalar constant and the shape
is specified via the conditions.
If False, will return the same shape as logp.
This is used e.g. in Multinomial where broadcasting can lead to differences in the logp.
Returns
-------
logp with elements set to -inf where any condition is False
"""
broadcast_conditions = kwargs.get('broadcast_conditions', True)
if broadcast_conditions:
alltrue = alltrue_elemwise
else:
alltrue = alltrue_scalar
return tt.switch(alltrue(conditions), logp, -np.inf)
def alltrue_elemwise(vals):
ret = 1
for c in vals:
ret = ret * (1 * c)
return ret
def alltrue_scalar(vals):
return tt.all([tt.all(1 * val) for val in vals])
def logpow(x, m):
"""
Calculates log(x**m) since m*log(x) will fail when m, x = 0.
"""
# return m * log(x)
return tt.switch(tt.eq(x, 0) & ~tt.eq(m + x, 0), -np.inf, m * tt.log(x))
def factln(n):
return gammaln(n + 1)
def binomln(n, k):
return factln(n) - factln(k) - factln(n - k)
def betaln(x, y):
return gammaln(x) + gammaln(y) - gammaln(x + y)
def std_cdf(x):
"""
Calculates the standard normal cumulative distribution function.
"""
return .5 + .5 * tt.erf(x / tt.sqrt(2.))
def sd2rho(sd):
"""
`sd -> rho` theano converter
:math:`mu + sd*e = mu + log(1+exp(rho))*e`"""
return tt.log(tt.exp(tt.abs_(sd)) - 1.)
def rho2sd(rho):
"""
`rho -> sd` theano converter
:math:`mu + sd*e = mu + log(1+exp(rho))*e`"""
return tt.nnet.softplus(rho)
def log_normal(x, mean, **kwargs):
"""
Calculate logarithm of normal distribution at point `x`
with given `mean` and `std`
Parameters
----------
x : Tensor
point of evaluation
mean : Tensor
mean of normal distribution
kwargs : one of parameters `{sd, tau, w, rho}`
Notes
-----
There are four variants for density parametrization.
They are:
1) standard deviation - `std`
2) `w`, logarithm of `std` :math:`w = log(std)`
3) `rho` that follows this equation :math:`rho = log(exp(std) - 1)`
4) `tau` that follows this equation :math:`tau = std^{-1}`
----
"""
sd = kwargs.get('sd')
w = kwargs.get('w')
rho = kwargs.get('rho')
tau = kwargs.get('tau')
eps = kwargs.get('eps', 0.)
check = sum(map(lambda a: a is not None, [sd, w, rho, tau]))
if check > 1:
raise ValueError('more than one required kwarg is passed')
if check == 0:
raise ValueError('none of required kwarg is passed')
if sd is not None:
std = sd
elif w is not None:
std = tt.exp(w)
elif rho is not None:
std = rho2sd(rho)
else:
std = tau**(-1)
std += f(eps)
return f(c) - tt.log(tt.abs_(std)) - (x - mean) ** 2 / (2. * std ** 2)
def MvNormalLogp():
"""Compute the log pdf of a multivariate normal distribution.
This should be used in MvNormal.logp once Theano#5908 is released.
Parameters
----------
cov : tt.matrix
The covariance matrix.
delta : tt.matrix
Array of deviations from the mean.
"""
cov = tt.matrix('cov')
cov.tag.test_value = floatX(np.eye(3))
delta = tt.matrix('delta')
delta.tag.test_value = floatX(np.zeros((2, 3)))
solve_lower = tt.slinalg.Solve(A_structure='lower_triangular')
solve_upper = tt.slinalg.Solve(A_structure='upper_triangular')
cholesky = Cholesky(nofail=True, lower=True)
n, k = delta.shape
n, k = f(n), f(k)
chol_cov = cholesky(cov)
diag = tt.nlinalg.diag(chol_cov)
ok = tt.all(diag > 0)
chol_cov = tt.switch(ok, chol_cov, tt.fill(chol_cov, 1))
delta_trans = solve_lower(chol_cov, delta.T).T
result = n * k * tt.log(f(2) * np.pi)
result += f(2) * n * tt.sum(tt.log(diag))
result += (delta_trans ** f(2)).sum()
result = f(-.5) * result
logp = tt.switch(ok, result, -np.inf)
def dlogp(inputs, gradients):
g_logp, = gradients
cov, delta = inputs
g_logp.tag.test_value = floatX(1.)
n, k = delta.shape
chol_cov = cholesky(cov)
diag = tt.nlinalg.diag(chol_cov)
ok = tt.all(diag > 0)
chol_cov = tt.switch(ok, chol_cov, tt.fill(chol_cov, 1))
delta_trans = solve_lower(chol_cov, delta.T).T
inner = n * tt.eye(k) - tt.dot(delta_trans.T, delta_trans)
g_cov = solve_upper(chol_cov.T, inner)
g_cov = solve_upper(chol_cov.T, g_cov.T)
tau_delta = solve_upper(chol_cov.T, delta_trans.T)
g_delta = tau_delta.T
g_cov = tt.switch(ok, g_cov, -np.nan)
g_delta = tt.switch(ok, g_delta, -np.nan)
return [-0.5 * g_cov * g_logp, -g_delta * g_logp]
return theano.OpFromGraph(
[cov, delta], [logp], grad_overrides=dlogp, inline=True)
class Cholesky(theano.Op):
"""
Return a triangular matrix square root of positive semi-definite `x`.
This is a copy of the cholesky op in theano, that doesn't throw an
error if the matrix is not positive definite, but instead returns
nan.
This has been merged upstream and we should switch to that
version after the next theano release.
L = cholesky(X, lower=True) implies dot(L, L.T) == X.
"""
__props__ = ('lower', 'destructive', 'nofail')
def __init__(self, lower=True, nofail=False):
self.lower = lower
self.destructive = False
self.nofail = nofail
def make_node(self, x):
x = tt.as_tensor_variable(x)
if x.ndim != 2:
raise ValueError('Matrix must me two dimensional.')
return tt.Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
x = inputs[0]
z = outputs[0]
try:
z[0] = scipy.linalg.cholesky(x, lower=self.lower).astype(x.dtype)
except (ValueError, scipy.linalg.LinAlgError):
if self.nofail:
z[0] = np.eye(x.shape[-1])
z[0][0, 0] = np.nan
else:
raise
def grad(self, inputs, gradients):
"""
Cholesky decomposition reverse-mode gradient update.
Symbolic expression for reverse-mode Cholesky gradient taken from [0]_
References
----------
.. [0] I. Murray, "Differentiation of the Cholesky decomposition",
http://arxiv.org/abs/1602.07527
"""
x = inputs[0]
dz = gradients[0]
chol_x = self(x)
ok = tt.all(tt.nlinalg.diag(chol_x) > 0)
chol_x = tt.switch(ok, chol_x, tt.fill_diagonal(chol_x, 1))
dz = tt.switch(ok, dz, floatX(1))
# deal with upper triangular by converting to lower triangular
if not self.lower:
chol_x = chol_x.T
dz = dz.T
def tril_and_halve_diagonal(mtx):
"""Extracts lower triangle of square matrix and halves diagonal."""
return tt.tril(mtx) - tt.diag(tt.diagonal(mtx) / 2.)
def conjugate_solve_triangular(outer, inner):
"""Computes L^{-T} P L^{-1} for lower-triangular L."""
solve = tt.slinalg.Solve(A_structure="upper_triangular")
return solve(outer.T, solve(outer.T, inner.T).T)
s = conjugate_solve_triangular(
chol_x, tril_and_halve_diagonal(chol_x.T.dot(dz)))
if self.lower:
grad = tt.tril(s + s.T) - tt.diag(tt.diagonal(s))
else:
grad = tt.triu(s + s.T) - tt.diag(tt.diagonal(s))
return [tt.switch(ok, grad, floatX(np.nan))]
class SplineWrapper(theano.Op):
"""
Creates a theano operation from scipy.interpolate.UnivariateSpline
"""
__props__ = ('spline',)
def __init__(self, spline):
self.spline = spline
def make_node(self, x):
x = tt.as_tensor_variable(x)
return tt.Apply(self, [x], [x.type()])
@property
def grad_op(self):
if not hasattr(self, '_grad_op'):
try:
self._grad_op = SplineWrapper(self.spline.derivative())
except ValueError:
self._grad_op = None
if self._grad_op is None:
raise NotImplementedError('Spline of order 0 is not differentiable')
return self._grad_op
def perform(self, node, inputs, output_storage):
x, = inputs
output_storage[0][0] = np.asarray(self.spline(x))
def grad(self, inputs, grads):
x, = inputs
x_grad, = grads
return [x_grad * self.grad_op(x)]
# Custom Eigh, EighGrad, and eigh are required until
# https://github.com/Theano/Theano/pull/6557 is handled, since lambda's
# cannot be used with pickling.
class Eigh(tt.nlinalg.Eig):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
This is a copy of Eigh from theano that calls an EighGrad which uses
partial instead of lambda. Once this has been merged with theano this
should be removed.
"""
_numop = staticmethod(np.linalg.eigh)
__props__ = ('UPLO',)
def __init__(self, UPLO='L'):
assert UPLO in ['L', 'U']
self.UPLO = UPLO
def make_node(self, x):
x = tt.as_tensor_variable(x)
assert x.ndim == 2
# Numpy's linalg.eigh may return either double or single
# presision eigenvalues depending on installed version of
# LAPACK. Rather than trying to reproduce the (rather
# involved) logic, we just probe linalg.eigh with a trivial
# input.
w_dtype = self._numop([[np.dtype(x.dtype).type()]])[0].dtype.name
w = theano.tensor.vector(dtype=w_dtype)
v = theano.tensor.matrix(dtype=x.dtype)
return theano.gof.Apply(self, [x], [w, v])
def perform(self, node, inputs, outputs):
(x,) = inputs
(w, v) = outputs
w[0], v[0] = self._numop(x, self.UPLO)
def grad(self, inputs, g_outputs):
r"""The gradient function should return
.. math:: \sum_n\left(W_n\frac{\partial\,w_n}
{\partial a_{ij}} +
\sum_k V_{nk}\frac{\partial\,v_{nk}}
{\partial a_{ij}}\right),
where [:math:`W`, :math:`V`] corresponds to ``g_outputs``,
:math:`a` to ``inputs``, and :math:`(w, v)=\mbox{eig}(a)`.
Analytic formulae for eigensystem gradients are well-known in
perturbation theory:
.. math:: \frac{\partial\,w_n}
{\partial a_{ij}} = v_{in}\,v_{jn}
.. math:: \frac{\partial\,v_{kn}}
{\partial a_{ij}} =
\sum_{m\ne n}\frac{v_{km}v_{jn}}{w_n-w_m}
"""
x, = inputs
w, v = self(x)
# Replace gradients wrt disconnected variables with
# zeros. This is a work-around for issue #1063.
gw, gv = tt.nlinalg._zero_disconnected([w, v], g_outputs)
return [EighGrad(self.UPLO)(x, w, v, gw, gv)]
class EighGrad(theano.Op):
"""
Gradient of an eigensystem of a Hermitian matrix.
This is a copy of EighGrad from theano that uses partial instead of lambda.
Once this has been merged with theano this should be removed.
"""
__props__ = ('UPLO',)
def __init__(self, UPLO='L'):
assert UPLO in ['L', 'U']
self.UPLO = UPLO
if UPLO == 'L':
self.tri0 = np.tril
self.tri1 = partial(np.triu, k=1)
else:
self.tri0 = np.triu
self.tri1 = partial(np.tril, k=-1)
def make_node(self, x, w, v, gw, gv):
x, w, v, gw, gv = map(tt.as_tensor_variable, (x, w, v, gw, gv))
assert x.ndim == 2
assert w.ndim == 1
assert v.ndim == 2
assert gw.ndim == 1
assert gv.ndim == 2
out_dtype = theano.scalar.upcast(x.dtype, w.dtype, v.dtype,
gw.dtype, gv.dtype)
out = theano.tensor.matrix(dtype=out_dtype)
return theano.gof.Apply(self, [x, w, v, gw, gv], [out])
def perform(self, node, inputs, outputs):
"""
Implements the "reverse-mode" gradient for the eigensystem of
a square matrix.
"""
x, w, v, W, V = inputs
N = x.shape[0]
outer = np.outer
def G(n):
return sum(v[:, m] * V.T[n].dot(v[:, m]) / (w[n] - w[m])
for m in xrange(N) if m != n)
g = sum(outer(v[:, n], v[:, n] * W[n] + G(n))
for n in xrange(N))
# Numpy's eigh(a, 'L') (eigh(a, 'U')) is a function of tril(a)
# (triu(a)) only. This means that partial derivative of
# eigh(a, 'L') (eigh(a, 'U')) with respect to a[i,j] is zero
# for i < j (i > j). At the same time, non-zero components of
# the gradient must account for the fact that variation of the
# opposite triangle contributes to variation of two elements
# of Hermitian (symmetric) matrix. The following line
# implements the necessary logic.
out = self.tri0(g) + self.tri1(g).T
# Make sure we return the right dtype even if NumPy performed
# upcasting in self.tri0.
outputs[0][0] = np.asarray(out, dtype=node.outputs[0].dtype)
def infer_shape(self, node, shapes):
return [shapes[0]]
def eigh(a, UPLO='L'):
"""A copy, remove with Eigh and EighGrad when possible"""
return Eigh(UPLO)(a)
class I0e(UnaryScalarOp):
"""
Modified Bessel function of the first kind of order 0, exponentially scaled.
"""
nfunc_spec = ('scipy.special.i0e', 1, 1)
def impl(self, x):
return scipy.special.i0e(x)
i0e = I0e(upgrade_to_float, name='i0e')
|
# -*- coding: utf-8 -*-
"""
Created on July 2017
@author: JulienWuthrich
"""
import logging
class LogFile(object):
def __init__(self, logfile, level, show=False, fmt="%(message)s"):
self.logfile = logfile
self.level = level
self.fmt = fmt
self.logger = logging.getLogger(logfile)
self.logger.setLevel(level)
self.hfile()
if show:
self.hstream()
def hfile(self):
hdlr = logging.FileHandler(self.logfile, encoding="utf-8")
hdlr.setLevel(self.level)
hdlr.setFormatter(logging.Formatter(self.fmt))
self.logger.addHandler(hdlr)
def hstream(self):
hdlr = logging.StreamHandler()
hdlr.setLevel(self.level)
hdlr.setFormatter(logging.Formatter(self.fmt))
self.logger.addHandler(hdlr)
def log(self, level, msg):
self.logger.log(level, msg)
def kill(self):
for hdlr in self.logger.handlers:
self.logger.removeHandler(hdlr)
|
import os
import csv
import random
from buses.Demand import Demand
from traffic_types import PEAK
path = os.environ['TS_SIMULATION']
def generate(traffic_type, city):
if traffic_type == PEAK:
generate_peak_hour_traffic(city)
def generate_peak_hour_traffic(city):
random.seed(42)
path = os.environ['TS_SIMULATION']
filepath = path + "/input/" + city + "/buses-" + PEAK + ".trips.xml"
mode = 'a' if os.path.exists(filepath) else 'w'
with open(filepath, mode) as routes:
print_header(routes)
print_peak_trips(routes, 3600, read_demands(city))
print_footer(routes)
def print_peak_trips(routes, number_of_steps, demands):
veh_number = 0
for step in range(number_of_steps):
for d in demands:
demand_per_second = d.demand_per_second
origin = d.origin
destination = d.destination
if random.uniform(0, 1) < demand_per_second:
print('\t<trip id="bus-%i" type="bus_bus" depart="%i" departLane="best" from="%s" to="%s"/>'
% (veh_number, step, origin, destination), file=routes)
veh_number += 1
def read_demands(city):
demand_path = path + '/buses/input/' + city + '/buses.csv'
demands = []
with open(demand_path, 'r') as file:
reader = csv.reader(file, delimiter=';')
counter = 0
for row in reader:
if counter > 0:
route = str(row[0])
origin = str(row[1])
destination = str(row[2])
demand_per_second = float(row[3])
demands.append(Demand(route, origin, destination, demand_per_second))
counter += 1
return demands
def print_header(routes):
print("<routes>", file=routes)
print('\t<vType id="bus_bus" vClass="bus"/>', file=routes)
def print_footer(routes):
print("</routes>", file=routes)
|
#!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train a model."""
import argparse
import logging
import numpy as np
import os
import tensorflow as tf
import time
from loss import compute_loss
from net import unet
from data import data_pipeline as dp
from metrics import MultiScaleSSIM
from metrics import PSNR
logging.basicConfig(format="[%(process)d] %(levelname)s %(filename)s:%(lineno)s | %(message)s")
log = logging.getLogger("train")
log.setLevel(logging.INFO)
def log_hook(sess, log_fetches):
"""Message display at every log step."""
data = sess.run(log_fetches)
step = data['step']
loss = data['loss']
loss_content=data['loss_content']
loss_texture=data['loss_texture']
loss_color=data['loss_color']
loss_tv=data['loss_tv']
loss_Mssim=data['loss_Mssim']
discim_accuracy = data['discim_accuracy']
psnr = data['psnr']
loss_ssim = data['loss_ssim']
log.info('Step {} | loss = {:.4f}| loss_content = {:.4f} | loss_texture = {:.4f} | '
'loss_color = {:.4f} | loss_tv = {:.6f} |loss_Mssim = {:.4f} |discim_accuracy = {:.4f} | psnr = {:.2f} dB|ssim = {:.4f}'.format(step,
loss, loss_content, loss_texture,loss_color, loss_tv, loss_Mssim,discim_accuracy, psnr, loss_ssim))
def main(args, data_params):
procname = os.path.basename(args.checkpoint_dir)
log.info('Preparing summary and checkpoint directory {}'.format(
args.checkpoint_dir))
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
tf.set_random_seed(1234) # Make experiments repeatable
# Select an architecture
# Add model parameters to the graph (so they are saved to disk at checkpoint)
# --- Train/Test datasets ---------------------------------------------------
data_pipe = getattr(dp, args.data_pipeline)
with tf.variable_scope('train_data'):
train_data_pipeline = data_pipe(
args.data_dir,
shuffle=True,
batch_size=args.batch_size, nthreads=args.data_threads,
fliplr=args.fliplr, flipud=args.flipud, rotate=args.rotate,
random_crop=args.random_crop, params=data_params,
output_resolution=args.output_resolution,scale=args.scale)
train_samples = train_data_pipeline.samples
if args.eval_data_dir is not None:
with tf.variable_scope('eval_data'):
eval_data_pipeline = data_pipe(
args.eval_data_dir,
shuffle=True,
batch_size=args.batch_size, nthreads=args.data_threads,
fliplr=False, flipud=False, rotate=False,
random_crop=False, params=data_params,
output_resolution=args.output_resolution,scale=args.scale)
eval_samples = eval_data_pipeline.samples
# ---------------------------------------------------------------------------
swaps = np.reshape(np.random.randint(0, 2, args.batch_size), [args.batch_size, 1])
swaps = tf.convert_to_tensor(swaps)
swaps = tf.cast(swaps, tf.float32)
# Training graph
with tf.variable_scope('inference'):
prediction = unet(train_samples['image_input'])
loss,loss_content,loss_texture,loss_color,loss_Mssim,loss_tv,discim_accuracy =\
compute_loss.total_loss(train_samples['image_output'], prediction, swaps, args.batch_size)
psnr = PSNR(train_samples['image_output'], prediction)
loss_ssim = MultiScaleSSIM(train_samples['image_output'],prediction)
# Evaluation graph
if args.eval_data_dir is not None:
with tf.name_scope('eval'):
with tf.variable_scope('inference', reuse=True):
eval_prediction = unet(eval_samples['image_input'])
eval_psnr = PSNR(eval_samples['image_output'], eval_prediction)
eval_ssim = MultiScaleSSIM(eval_samples['image_output'], eval_prediction)
# Optimizer
model_vars1 = [v for v in tf.global_variables() if v.name.startswith("inference/generator")]
discriminator_vars1 = [v for v in tf.global_variables() if v.name.startswith("inference/l2_loss/discriminator")]
global_step = tf.contrib.framework.get_or_create_global_step()
with tf.name_scope('optimizer'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
updates = tf.group(*update_ops, name='update_ops')
log.info("Adding {} update ops".format(len(update_ops)))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if reg_losses and args.weight_decay is not None and args.weight_decay > 0:
print("Regularization losses:")
for rl in reg_losses:
print(" ", rl.name)
opt_loss = loss + args.weight_decay*sum(reg_losses)
else:
print("No regularization.")
opt_loss = loss
with tf.control_dependencies([updates]):
opt = tf.train.AdamOptimizer(args.learning_rate)
minimize = opt.minimize(opt_loss, name='optimizer', global_step=global_step,var_list=model_vars1)
minimize_discrim = opt.minimize(-loss_texture, name='discriminator', global_step=global_step,var_list=discriminator_vars1)
# Average loss and psnr for display
with tf.name_scope("moving_averages"):
ema = tf.train.ExponentialMovingAverage(decay=0.99)
update_ma = ema.apply([loss,loss_content,loss_texture,loss_color,loss_Mssim,loss_tv,discim_accuracy,psnr,loss_ssim])
loss = ema.average(loss)
loss_content=ema.average(loss_content)
loss_texture=ema.average(loss_texture)
loss_color=ema.average(loss_color)
loss_Mssim = ema.average(loss_Mssim)
loss_tv=ema.average(loss_tv)
discim_accuracy = ema.average(discim_accuracy)
psnr = ema.average(psnr)
loss_ssim = ema.average(loss_ssim)
# Training stepper operation
train_op = tf.group(minimize,update_ma)
train_discrim_op = tf.group(minimize_discrim,update_ma)
# Save a few graphs to
summaries = [
tf.summary.scalar('loss', loss),
tf.summary.scalar('loss_content',loss_content),
tf.summary.scalar('loss_color',loss_color),
tf.summary.scalar('loss_texture',loss_texture),
tf.summary.scalar('loss_ssim', loss_Mssim),
tf.summary.scalar('loss_tv', loss_tv),
tf.summary.scalar('discim_accuracy',discim_accuracy),
tf.summary.scalar('psnr', psnr),
tf.summary.scalar('ssim', loss_ssim),
tf.summary.scalar('learning_rate', args.learning_rate),
tf.summary.scalar('batch_size', args.batch_size),
]
log_fetches = {
"loss_content":loss_content,
"loss_texture":loss_texture,
"loss_color":loss_color,
"loss_Mssim": loss_Mssim,
"loss_tv":loss_tv,
"discim_accuracy":discim_accuracy,
"step": global_step,
"loss": loss,
"psnr": psnr,
"loss_ssim":loss_ssim}
model_vars = [v for v in tf.global_variables() if not v.name.startswith("inference/l2_loss/discriminator")]
discriminator_vars = [v for v in tf.global_variables() if v.name.startswith("inference/l2_loss/discriminator")]
# Train config
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # Do not canibalize the entire GPU
sv = tf.train.Supervisor(
saver=tf.train.Saver(var_list=model_vars, max_to_keep=100),
local_init_op=tf.initialize_variables(discriminator_vars),
logdir=args.checkpoint_dir,
save_summaries_secs=args.summary_interval,
save_model_secs=args.checkpoint_interval)
# Train loopl
with sv.managed_session(config=config) as sess:
sv.loop(args.log_interval, log_hook, (sess,log_fetches))
last_eval = time.time()
while True:
if sv.should_stop():
log.info("stopping supervisor")
break
try:
step, _= sess.run([global_step, train_op])
_ = sess.run(train_discrim_op)
since_eval = time.time()-last_eval
if args.eval_data_dir is not None and since_eval > args.eval_interval:
log.info("Evaluating on {} images at step {}".format(
3, step))
p_ = 0
s_ = 0
for it in range(3):
p_ += sess.run(eval_psnr)
s_ += sess.run(eval_ssim)
p_ /= 3
s_ /= 3
sv.summary_writer.add_summary(tf.Summary(value=[
tf.Summary.Value(tag="psnr/eval", simple_value=p_)]), global_step=step)
sv.summary_writer.add_summary(tf.Summary(value=[
tf.Summary.Value(tag="ssim/eval", simple_value=s_)]), global_step=step)
log.info(" Evaluation PSNR = {:.2f} dB".format(p_))
log.info(" Evaluation SSIM = {:.4f} ".format(s_))
last_eval = time.time()
except tf.errors.AbortedError:
log.error("Aborted")
break
except KeyboardInterrupt:
break
chkpt_path = os.path.join(args.checkpoint_dir, 'on_stop.ckpt')
log.info("Training complete, saving chkpt {}".format(chkpt_path))
sv.saver.save(sess, chkpt_path)
sv.request_stop()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# pylint: disable=line-too-long
# ----------------------------------------------------------------------------
req_grp = parser.add_argument_group('required')
req_grp.add_argument('--checkpoint_dir', default='../checkpoint/', help='directory to save checkpoints to.')
req_grp.add_argument('--data_dir', default= '/root/hj9/ECCV/image_enhance_challenge/train_datasets/dataset.txt', help='input directory containing the training .tfrecords or images.')
req_grp.add_argument('--eval_data_dir', default= '/root/hj9/ECCV/image_enhance_challenge/test_datasets/dataset.txt', type=str, help='directory with the validation data.')
# Training, logging and checkpointing parameters
train_grp = parser.add_argument_group('training')
train_grp.add_argument('--learning_rate', default=5e-4, type=float, help='learning rate for the stochastic gradient update.')
train_grp.add_argument('--weight_decay', default=None, type=float, help='l2 weight decay on FC and Conv layers.')
train_grp.add_argument('--log_interval', type=int, default=1, help='interval between log messages (in s).')
train_grp.add_argument('--summary_interval', type=int, default=120, help='interval between tensorboard summaries (in s)')
train_grp.add_argument('--checkpoint_interval', type=int, default=600, help='interval between model checkpoints (in s)')
train_grp.add_argument('--eval_interval', type=int, default=200, help='interval between evaluations (in s)')
# Debug and perf profiling
debug_grp = parser.add_argument_group('debug and profiling')
debug_grp.add_argument('--profiling', dest='profiling', action='store_true', help='outputs a profiling trace.')
debug_grp.add_argument('--noprofiling', dest='profiling', action='store_false')
# Data pipeline and data augmentation
data_grp = parser.add_argument_group('data pipeline')
data_grp.add_argument('--batch_size', default=32, type=int, help='size of a batch for each gradient update.')
data_grp.add_argument('--data_threads', default=8, help='number of threads to load and enqueue samples.')
data_grp.add_argument('--rotate', dest="rotate", action="store_true", help='rotate data augmentation.')
data_grp.add_argument('--norotate', dest="rotate", action="store_false")
data_grp.add_argument('--flipud', dest="flipud", action="store_true", help='flip up/down data augmentation.')
data_grp.add_argument('--noflipud', dest="flipud", action="store_false")
data_grp.add_argument('--fliplr', dest="fliplr", action="store_true", help='flip left/right data augmentation.')
data_grp.add_argument('--nofliplr', dest="fliplr", action="store_false")
data_grp.add_argument('--random_crop', dest="random_crop", action="store_true", help='random crop data augmentation.')
data_grp.add_argument('--norandom_crop', dest="random_crop", action="store_false")
data_grp.add_argument('--data_pipeline', default='ImageFilesDataPipeline',help='classname of the data pipeline to use.', choices=dp.__all__)
data_grp.add_argument('--output_resolution', default=[100, 100], type=int, nargs=2, help='resolution of the output image.')
data_grp.add_argument('--scale', default= 1, type=int, help='resolution scale of the low image.')
parser.set_defaults(
profiling=False,
flipud=False,
fliplr=False,
rotate=False,
random_crop=True,
batch_norm=False)
# ----------------------------------------------------------------------------
# pylint: enable=line-too-long
args = parser.parse_args()
data_params = {}
for a in data_grp._group_actions:
data_params[a.dest] = getattr(args, a.dest, None)
main(args, data_params)
|
import os
import sys
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# file path for fitted_Q_agents
FQ_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(FQ_DIR)
# file path for chemostat_env
C_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
C_DIR = os.path.join(C_DIR, 'chemostat_env')
sys.path.append(C_DIR)
from chemostat_envs import *
from fitted_Q_agents import *
from reward_func import *
import yaml
import matplotlib.pyplot as plt
def test_trajectory():
param_file = os.path.join(C_DIR, 'parameter_files/double_aux.yaml')
update_timesteps = 1
one_min = 0.016666666667
sampling_time = one_min*10
env = ChemostatEnv(param_file, reward_func, sampling_time, 1000)
rew = 0
actions = []
for i in range(1000):
a = np.random.choice(range(4))
a = 3
'''
a = 3
if i == 400:
a = 2
if i == 500:
a = 1
'''
#a = 2
state = env.get_state()
'''
a = 0
if state[0] < 15000:
a = 2
elif state[1] < 25000:
a = 1
if state[0] < 15000 and state[1] < 25000:
a = 3
'''
r, done = no_LV_reward_function_new_target(state, None, None)
print(r)
rew += r
env.step(a)
if done:
break
actions.append(a)
print(actions)
env.plot_trajectory([0,1])
env.plot_trajectory([2,3,4])
plt.show()
print(rew)
if __name__ == '__main__':
test_trajectory()
|
from .exchange import Exchange
class ExchangeReleaseFeedback(Exchange):
pass
|
from collections import deque
def readable_size(b):
bytes=iter(["B", "KB", "MB", "GB", "TB", "PB"])
res=deque([])
while b>0:
num, byte=b%1024, next(bytes)
if num>0:
res.appendleft(f"{num} {byte}")
b//=1024
if len(res)<=2:
return ", ".join(res)
last=res.pop()
return ", ".join(res)+f" and {last}" |
# -*- coding:utf-8 -*-
#!/usr/bin/env python
import logging
from typing import List,Dict,Any#,Union
from attrbox import AttrDict
from ....definations.cfg import DY_CONFIGURATION_KEY_DEF, ConfigerDefs
# from ....dataclasses.i.rdb import IDatabase
from ....dataclasses.i.cfg import IDatabaseConfiger
from ....dataclasses.c.db import Databases
from ...deco import configer_d
@configer_d(DY_CONFIGURATION_KEY_DEF.DATABASE, ConfigerDefs.DB.value)
class DatabaseConfiger(IDatabaseConfiger):
def __init__(self, cfg:AttrDict, *args:List[Any], **kwargs:Dict[str,Any]):
IDatabaseConfiger.__init__(self)
self.__cfg = cfg
def __enter__(self):
return self
def init_db(self, databases: Databases):
for name, subcfg in self.__cfg.items():
if not subcfg: continue
try:
db = databases.db(name)
if not db: continue
db.init(**subcfg)
except:
logging.exception(f"Initialize failed: {name}")
def __exit__(self,exc_type, exc_val, exc_tb):
pass |
import random
from vardefunc.noise import AddGrain
seed = random.seed()
graigasm_args = dict(
thrs=[x << 8 for x in (26, 75, 130, 180)],
strengths=[(0.7, 0.2), (1, 0.12), (1.05, 0.05), (0.12, 0)],
sizes=(1.1, 1.53, 1.86, 1.7),
sharps=(80, 60, 40, 40),
grainers=[
AddGrain(seed=seed, constant=False),
AddGrain(seed=seed, constant=False),
AddGrain(seed=seed, constant=False),
AddGrain(seed=seed, constant=True)
]
)
|
_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py'
# _base_ = [
# '../_base_/models/faster_rcnn_r50_fpn.py',
# '../_base_/datasets/coco_detection.py',
# '../_base_/schedules/schedule_1x.py',
# '../_base_/default_runtime.py'
# ]
# model
# freeze backbone completely
model = dict(backbone=dict(frozen_stages=4))
# training
albu_train_transforms = [
dict(
type='ShiftScaleRotate',
shift_limit=0.0,
scale_limit=0.0,
rotate_limit=180,
interpolation=1,
p=0.5)]
img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) # caffee image norm
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
# original coco pedestrian detector uses multi-scale resizing
dict(type='Resize', img_scale=(800, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
# testing
# test_pipeline = [
# dict(type='LoadImageFromFile'),
# dict(
# type='MultiScaleFlipAug',
# img_scale=(1333, 800),
# flip=False,
# transforms=[
# dict(type='Resize', keep_ratio=True),
# dict(type='RandomFlip'),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
# dict(type='ImageToTensor', keys=['img']),
# dict(type='Collect', keys=['img']),
# ])
# ]
# datasets
data_root = 'data/PIROPO/'
data = dict(
# with 4 GPUs batch size = 4*4 = 16
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
# classes=classes,
ann_file=data_root + 'omni_training.json',
img_prefix=data_root,
pipeline=train_pipeline
),
val=dict(
# classes=classes,
ann_file=data_root + 'omni_test3.json',
img_prefix=data_root
),
test=dict(
# classes=classes,
ann_file=data_root + 'omni_test2.json',
img_prefix=data_root
))
# optimizer
# fine-tuning: smaller lr, freeze FPN (neck), freeze RPN
optimizer = dict(
type='SGD',
lr=0.001,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(
custom_keys={
'neck': dict(lr_mult=0.0),
'rpn_head.cls_convs': dict(lr_mult=0.0)
}
))
# optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
# we don't need warmup
warmup=None,
policy='step',
step=[10000])
# full dataset has 2357 imgs ---(batch-size=16)--> 148 iterations * 12 epochs = 1776 total iterations
# few-shot fine-tuning paper uses anywhere between 500 and 160000 iterations
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=1776)
# evaluate every 500 iterations
evaluation = dict(interval=500, metric='bbox')
checkpoint_config = dict(interval=1776)
# checkpoint_config = dict(interval=1)
# yapf:disable
# log_config = dict(
# interval=50,
# hooks=[
# dict(type='TextLoggerHook'),
# # dict(type='TensorboardLoggerHook')
# ])
# yapf:enable
# custom_hooks = [dict(type='NumClassCheckHook')]
# dist_params = dict(backend='nccl')
# log_level = 'INFO'
# load_from = None
# resume_from = None
# workflow = [('train', 1)]
load_from = 'mmdetection/checkpoints/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth'
work_dir = 'work_dirs/PIROPO'
|
from DataHandler import *
from Util import *
from DataProcessor import *
from Portfolio import *
from datetime import datetime
from dateutil.relativedelta import relativedelta
def get_date(dat, end_date_intervel, start_date_intervel):
e = (datetime.strptime(dat, '%Y-%m-%d')+ relativedelta(days=end_date_intervel)).strftime('%Y-%m-%d')
s = (datetime.strptime(dat, '%Y-%m-%d')+ relativedelta(days=start_date_intervel)).strftime('%Y-%m-%d')
return e, s
start_date_list = ['2014-01-01', '2012-01-01', '2013-05-01', '2015-07-01', '2010-02-01', '2011-11-15'
, '2015-02-01','2015-05-01','2015-06-01','2015-07-01', '2015-08-01']
end_date_list = [450, 60, 60, 90, 75, 90, 60, 60, 60, 60, 60]
start_test_list = [330, 45, 45, 75, 60, 80, 50, 45, 45, 45, 45]
#indices = ['^NSEI', '^DJI', '^FTSE', '^AXJO', '^HSI', '^N225', '^IXIC']#, '000001.SS']
#market_name = ['nse', 'dji', 'ftse', 'aus', 'hsi', 'nikkei', 'nasdaq']#, 'sanghai']
indices = ['^NSEI', '^BSESN', '^AXJO', '^HSI', '^N225']#, '000001.SS']
market_name = ['nse', 'bse', 'aus', 'hsi', 'nikkei']#, 'sanghai']
if __name__ == '__main__':
final_result=''
for start_date,e,s in zip(start_date_list, end_date_list, start_test_list):
end_date, start_test = get_date(start_date, e,s)
d = DataHandler()
data_frames = d.fetch_and_save_data(indices, market_name, start_date, end_date)
# plot([data_frames[0]], 'Adj Close', market_name[0])
for data_frame in data_frames:
d.daily_return(data_frame)
# for index, data_frame in zip(range(len(data_frames)), data_frames):
# plot([d.daily_return(data_frame)], 'Daily Return', market_name[index].upper()+' Index')
# print(data_frames[0].head(5))
#d.plot_data([data_frames[0],data_frames[7]], ['Daily Return'], market_names=market_name)
#plt.show()
# Plot data
# data_frames[0]['Daily Return'].plot()
#print(data_frames[0].index.name)
#print(data_frames[0].columns.values.tolist())
dp = DataProcessor()
data_points = [4, 8, 12]
# Compute moving average
for data_frame in data_frames:
data_frame = dp.get_moving_average(data_frame, data_points)
#Compute exponential moving average
for data_frame in data_frames:
data_frame = dp.get_ewma(data_frame, data_points)
# cols=['Adj Close','MA_5','MA_10','MA_15','MA_20']
# plot different calculations
'''
for i in range(2):
data_frames[i]['Adj Close'].plot(legend=True, linestyle='-', linewidth=2)
cols=[ 'MA_5']
for col in cols:
data_frames[i][col].plot(legend=True, linestyle='-', linewidth=2)
plt.grid(linestyle='--', linewidth=2)
plt.title(market_name[i].upper()+' ADj Close & MA')
cols_ema=['EMA_5','EMA_10']
for col in cols_ema:
data_frames[i][col].plot(legend=True, linewidth=2)
plt.title(market_name[i].upper()+' ADj Close MA 20 & EMA 20')
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
'''
renamme_columns(data_frames, market_name)
#print(data_frames[1].index.name)
#print([data_frame.columns for data_frame in data_frames])
# for name, data_frame in zip(market_name, data_frames):
# print("No of Data for [%8s] are [%s]" % (name.upper(), len(data_frame)))
merged_data = merge_data_frames(data_frames, 5)
#print("============", merged_data.columns, "===================")
#print(merged_data.describe())
print(merged_data.columns.values.tolist())
merged_data.Return_CNX_NIFTY = merged_data['Daily Return_nse'].shift(-1)
# Plot is broken due to missing data
# merged_data.Return_CNX_NIFTY.plot()
# merged_data['Adj Close_nse'].plot()
# plt.show()
print("Shape of merged data", merged_data.shape, ".")
print("After merge out of [", len(merged_data) * len(merged_data.columns), "] [", count_missing(merged_data),
"] data points are missing.")
#print("Merged data Index = ", merged_data.index)
merged_data = merged_data.interpolate(method='time')
print('Number of NaN after time interpolation: %s' % str(count_missing(merged_data)))
merged_data = merged_data.fillna(merged_data.mean())
print('Number of NaN after mean interpolation: %s' % count_missing(merged_data))
# Plot after
# merged_data['Adj Close_nse'].plot()
# merged_data['Daily Return_nse'].plot()
# plt.show()
# from sklearn.decomposition import PCA
# pca = PCA(n_components=2)
# pca.fit(merged_data)
X_train, y_train, X_test, y_test = dp.prepare_data_for_classification(merged_data, start_test)
# print("======== Shapes ======== ")
# print("Training X", X_train.shape)
# print("Training y", y_train.shape)
# print("Test X", X_test.shape)
# print("Test y", y_test.shape)
# print("======================== ")
# plt.figure()
# y_test.plot(kind='bar', alpha=0.5)
# plt.title('Test data plot')
# plt.axhline(0, color='k')
# plt.show()
#
# plt.figure()
# y_train.plot(kind='bar', alpha=0.9)
# plt.axhline(0, color='r')
# plt.title('Train data plot')
# plt.show()
print("Positive and negative movement in train data outcome.")
print(y_train.value_counts())
print("Positive and negative movement in test data outcome.")
print(y_test.value_counts())
dp.apply_logistic_regressor(X_train, y_train, X_test, y_test)
dp.apply_svc(X_train, y_train, X_test, y_test)
dp.apply_knn(X_train, y_train, X_test, y_test)
dp.apply_random_forest(X_train, y_train, X_test, y_test)
parameters = {'kernel': ('linear', 'rbf'), 'C': [1, 10, 100, 1000]}
bp=dp.select_best_param_svc(X_train, y_train, parameters)
dp.apply_svc(X_train, y_train, X_test, y_test, kernel='rbf', C=1)
dp.apply_svc(X_train, y_train, X_test, y_test, kernel='linear', C=1)
symbol = 'CNX-NIFTY'
bars = d.fetch_data_from_yahoo('^NSEI', start_test, end_date)
X_train, y_train, X_test, y_test = dp.partition_data(merged_data, len(bars))
predict_svc = dp.get_svc_prediction(X_train, y_train, X_test, kernel=bp['kernel'], C=bp['C'])
signals_svc = pd.DataFrame(index=bars.index)
signals_svc['signal'] = 0.0
signals_svc['signal'] = predict_svc
signals_svc['positions'] = signals_svc['signal'].diff()
portfolio_svc = MarketIntradayPortfolio(symbol, bars, signals_svc)
returns_svc = portfolio_svc.backtest_portfolio()
predict_rf = dp.get_randomforest_prediction(X_train, y_train, X_test, 50)
signals_rf = pd.DataFrame(index=bars.index)
signals_rf['signal'] = 0.0
signals_rf['signal'] = predict_rf
signals_rf['positions'] = signals_rf['signal'].diff()
portfolio_rf = MarketIntradayPortfolio(symbol, bars, signals_rf)
returns_rf = portfolio_rf.backtest_portfolio()
# print(signals_rf)
#
# print(returns_rf)
# print(returns_svc)
predict_lr = dp.get_logistic_reg_prediction(X_train, y_train, X_test)
signals_lr = pd.DataFrame(index=bars.index)
signals_lr['signal'] = 0.0
signals_lr['signal'] = predict_lr
signals_lr['positions'] = signals_lr['signal'].diff()
portfolio_lr = MarketIntradayPortfolio(symbol, bars, signals_lr)
returns_lr = portfolio_lr.backtest_portfolio()
bench_ret=(bars['Close'][-1]-bars['Close'][0])*100/bars['Close'][0]
lr_ret=(returns_lr['total'][-1]-returns_lr['total'][0])*100/returns_lr['total'][0]
svc_ret=(returns_svc['total'][-1]-returns_svc['total'][0])*100/returns_svc['total'][0]
rf_ret=(returns_rf['total'][-1]-returns_rf['total'][0])*100/returns_rf['total'][0]
f, ax = plt.subplots(4, sharex=True)
f.patch.set_facecolor('white')
ylabel = symbol + ' Close Price in Rs'
bars['Close'].plot(ax=ax[0], color='r', lw=1.)
ax[0].set_ylabel(ylabel, fontsize=10)
ax[0].set_xlabel('', fontsize=14)
ax[0].legend(('Close Price CNX-NIFTY [Return %.2f]%%'%bench_ret,), loc='upper left', prop={"size": 12})
ax[0].set_title('CNX-NIFTY Close Price VS Portfolio Performance for Training Data ('+start_date+' to '
+start_test+') Test Data ('
+start_test+' to '+end_date+')', fontsize=14,
fontweight="bold")
returns_svc['total'].plot(ax=ax[1], color='b', lw=1.)
ax[1].set_ylabel('Portfolio value in Rs', fontsize=10)
ax[1].set_xlabel('Date', fontsize=14)
ax[1].legend(('Portfolio Performance.SVC [Return %.2f]%%'%svc_ret,), loc='upper left', prop={"size": 12})
plt.tick_params(axis='both', which='major', labelsize=10)
returns_rf['total'].plot(ax=ax[2], color='k', lw=1.)
ax[2].set_ylabel('Portfolio value in Rs', fontsize=10)
ax[2].set_xlabel('Date', fontsize=14)
ax[2].legend(('Portfolio Performance.RF [Return %.2f]%%'%rf_ret,), loc='upper left', prop={"size": 12})
plt.tick_params(axis='both', which='major', labelsize=10)
returns_lr['total'].plot(ax=ax[3], color='g', lw=1.)
ax[3].set_ylabel('Portfolio value in Rs', fontsize=10)
ax[3].set_xlabel('Date', fontsize=14)
ax[3].legend(('Portfolio Performance.LR [Return %.2f]%%'%lr_ret,), loc='upper left', prop={"size": 12})
plt.tick_params(axis='both', which='major', labelsize=10)
print("Benchmark Return [%.2f]%%" %(bench_ret))
print("LR Return [%.2f]%%" %(lr_ret))
print("SVC Return [%.2f]%%" %(svc_ret))
print("RF Return [%.2f]%%" %(rf_ret))
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
final_result +='Training Data from (' + start_date +' to '+ start_test + ') Test Data (' + start_test +' to ' + end_date + ') Benchmark Return [%.2f]%% ' %(bench_ret) +" LR Return [%.2f]%%" %(lr_ret)+" SVC Return [%.2f]%%" %(svc_ret) +" RF Return [%.2f]%%" %(rf_ret)+'\n'
plt.show()
print(final_result)
|
from __future__ import annotations
from collections import deque
from dataclasses import dataclass
from typing import List, Dict, Tuple
NO_FRIENDSHIP = -1
@dataclass
class User:
user_id: str
friends: List[User]
def __eq__(self: User, other: User) -> bool:
return self.user_id == other.user_id
def __hash__(self: User) -> int:
return hash(self.user_id)
def __str__(self: User) -> str:
return self.user_id
def make_friendship_graph(
nodes: List[str], edges: List[Tuple[str, str]]
) -> Dict[str, User]:
"""
We assume that the graph is connected for this example.
"""
user_graph: Dict[str, User] = {
node: User(user_id=node, friends=list()) for node in nodes
}
for edge in edges:
node_a, node_b = edge
user_graph[node_a].friends.append(user_graph[node_b])
user_graph[node_b].friends.append(user_graph[node_a])
return user_graph
def smallest_friendships(from_user: User, to_user: User) -> int:
q = deque([(from_user, 0)])
visited = {from_user}
while len(q) > 0:
top, dist = q.popleft()
if top == to_user:
return dist - 1
for friend in top.friends:
if friend not in visited:
visited.add(friend)
q.append((friend, dist + 1))
return NO_FRIENDSHIP
graph = make_friendship_graph(
nodes=["a", "b", "c", "d", "e"],
edges=[("a", "b"), ("b", "d"), ("a", "c"), ("b", "d"), ("d", "e")],
)
assert smallest_friendships(graph["a"], graph["e"]) == 2
assert smallest_friendships(graph["a"], graph["c"]) == 0
assert smallest_friendships(graph["c"], graph["e"]) == 3
|
Python 3.9.1 (tags/v3.9.1:1e5d33e, Dec 7 2020, 17:08:21) [MSC v.1927 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> import math
>>> math.pi
3.141592653589793
>>>
>>> def circle_area(radius):
area = math.pi * (radius ** 2)
return area
>>> circle_area(1.1)
3.8013271108436504
>>> |
# /usr/bin/python3
import datetime
def tup_replace(tup, pos, value):
_l = list(tup)
_l[pos] = value
return tuple(_l)
def get_timestamp(form="%Y%m%d%H%M"):
return datetime.datetime.now().strftime(form)
if __name__ == "__main__":
pass |
from typing import List
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
def find_island(row: int, col: int):
if 0 <= row < m and 0 <= col < n and grid[row][col]:
grid[row][col] = 0
return 1 + find_island(row - 1, col) + find_island(row + 1, col) + find_island(row, col - 1) + find_island(
row, col + 1)
return 0
areas = [find_island(i, j) for i in range(m) for j in range(n) if grid[i][j]]
return max(areas) if areas else 0 |
# Generated file, please do not change!!!
import typing
from ..active_cart.by_project_key_in_store_key_by_store_key_me_active_cart_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyMeActiveCartRequestBuilder,
)
from ..carts.by_project_key_in_store_key_by_store_key_me_carts_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyMeCartsRequestBuilder,
)
from ..orders.by_project_key_in_store_key_by_store_key_me_orders_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyMeOrdersRequestBuilder,
)
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyInStoreKeyByStoreKeyMeRequestBuilder:
_client: "BaseClient"
_project_key: str
_store_key: str
def __init__(
self,
project_key: str,
store_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._store_key = store_key
self._client = client
def carts(self) -> ByProjectKeyInStoreKeyByStoreKeyMeCartsRequestBuilder:
"""A shopping cart holds product variants and can be ordered."""
return ByProjectKeyInStoreKeyByStoreKeyMeCartsRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def orders(self) -> ByProjectKeyInStoreKeyByStoreKeyMeOrdersRequestBuilder:
"""An order can be created from a order, usually after a checkout process has been completed."""
return ByProjectKeyInStoreKeyByStoreKeyMeOrdersRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def active_cart(self) -> ByProjectKeyInStoreKeyByStoreKeyMeActiveCartRequestBuilder:
return ByProjectKeyInStoreKeyByStoreKeyMeActiveCartRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
|
# encoding: utf-8
"""
@author: jemmy li
@contact: zengarden2009@gmail.com
"""
if __name__ == '__main__':
pass |
import asyncio
import collections
import re
from pychess.System.Log import log
from pychess.ic import BLOCK_START, BLOCK_SEPARATOR, BLOCK_END, BLKCMD_PASSWORD
from pychess.ic.icc import UNIT_START, UNIT_END, DTGR_START, MY_ICC_PREFIX
class ConsoleHandler:
def __init__(self, callback):
self.callback = callback
def handle(self, line):
if line:
self.callback(line)
class Prediction:
def __init__(self, callback, *regexps):
self.callback = callback
self.name = callback.__name__
self.regexps = []
self.matches = ()
self.hash = hash(callback)
for regexp in regexps:
self.hash ^= hash(regexp)
if not hasattr("match", regexp):
# FICS being fairly case insensitive, we can compile with IGNORECASE
# to easy some expressions
self.regexps.append(re.compile(regexp, re.IGNORECASE))
def __hash__(self):
return self.hash
def __len__(self):
return len(self.regexps)
RETURN_NO_MATCH, RETURN_MATCH, RETURN_NEED_MORE, RETURN_MATCH_END = range(4)
BL, DG, CN = range(3)
class LinePrediction(Prediction):
def __init__(self, callback, regexp):
Prediction.__init__(self, callback, regexp)
def handle(self, line):
match = self.regexps[0].match(line)
if match:
self.matches = (match.string,)
self.callback(match)
return RETURN_MATCH
return RETURN_NO_MATCH
class MultipleLinesPrediction(Prediction):
def __init__(self, callback, *regexps):
Prediction.__init__(self, callback, *regexps)
self.matchlist = []
class NLinesPrediction(MultipleLinesPrediction):
def __init__(self, callback, *regexps):
MultipleLinesPrediction.__init__(self, callback, *regexps)
def handle(self, line):
regexp = self.regexps[len(self.matchlist)]
match = regexp.match(line)
if match:
self.matchlist.append(match)
if len(self.matchlist) == len(self.regexps):
self.matches = [m.string for m in self.matchlist]
self.callback(self.matchlist)
del self.matchlist[:]
return RETURN_MATCH
return RETURN_NEED_MORE
del self.matchlist[:]
return RETURN_NO_MATCH
class FromPlusPrediction(MultipleLinesPrediction):
def __init__(self, callback, regexp0, regexp1):
MultipleLinesPrediction.__init__(self, callback, regexp0, regexp1)
def handle(self, line):
if not self.matchlist:
match = self.regexps[0].match(line)
if match:
self.matchlist.append(match)
return RETURN_NEED_MORE
else:
match = self.regexps[1].match(line)
if match:
self.matchlist.append(match)
return RETURN_NEED_MORE
else:
self.matches = [m.string for m in self.matchlist]
self.callback(self.matchlist)
del self.matchlist[:]
return RETURN_MATCH_END
del self.matchlist[:]
return RETURN_NO_MATCH
class FromABPlusPrediction(MultipleLinesPrediction):
def __init__(self, callback, regexp0, regexp1, regexp2):
MultipleLinesPrediction.__init__(self, callback, regexp0, regexp1, regexp2)
def handle(self, line):
if not self.matchlist:
match = self.regexps[0].match(line)
if match:
self.matchlist.append(match)
return RETURN_NEED_MORE
elif len(self.matchlist) == 1:
match = self.regexps[1].match(line)
if match:
self.matchlist.append(match)
return RETURN_NEED_MORE
else:
match = self.regexps[2].match(line)
if match:
self.matchlist.append(match)
return RETURN_NEED_MORE
else:
self.matches = [m.string for m in self.matchlist]
self.callback(self.matchlist)
del self.matchlist[:]
return RETURN_MATCH_END
del self.matchlist[:]
return RETURN_NO_MATCH
class FromToPrediction(MultipleLinesPrediction):
def __init__(self, callback, regexp0, regexp1):
MultipleLinesPrediction.__init__(self, callback, regexp0, regexp1)
def handle(self, line):
if not self.matchlist:
match = self.regexps[0].match(line)
if match:
self.matchlist.append(match)
return RETURN_NEED_MORE
else:
match = self.regexps[1].match(line)
if match:
self.matchlist.append(match)
self.matches = [
m if isinstance(m, str) else m.string for m in self.matchlist
]
self.callback(self.matchlist)
del self.matchlist[:]
return RETURN_MATCH
else:
self.matchlist.append(line)
return RETURN_NEED_MORE
return RETURN_NO_MATCH
TelnetLine = collections.namedtuple("TelnetLine", ["line", "code", "code_type"])
EmptyTelnetLine = TelnetLine("", None, None)
class TelnetLines:
def __init__(self, telnet, show_reply):
self.telnet = telnet
self.lines = collections.deque()
self.block_mode = False
self.datagram_mode = False
self.line_prefix = None
self.consolehandler = None
self.show_reply = show_reply
def appendleft(self, x):
self.lines.appendleft(x)
def extendleft(self, iterable):
self.lines.extendleft(iterable)
@asyncio.coroutine
def popleft(self):
try:
return self.lines.popleft()
except IndexError:
lines = yield from self._get_lines()
self.lines.extend(lines)
return self.lines.popleft() if self.lines else EmptyTelnetLine
@asyncio.coroutine
def _get_lines(self):
lines = []
line = yield from self.telnet.readline()
identifier = 0
if line.startswith(self.line_prefix):
line = line[len(self.line_prefix) + 1 :]
if self.datagram_mode:
identifier = -1
code = 0
unit = False
if line.startswith(UNIT_START):
unit = True
unit_lines = []
cn_code = int(line[2 : line.find(" ")])
if MY_ICC_PREFIX in line:
identifier = 0
line = yield from self.telnet.readline()
if unit:
while UNIT_END not in line:
if line.startswith(DTGR_START):
code, data = line[2:-2].split(" ", 1)
log.debug(
"%s %s" % (code, data),
extra={"task": (self.telnet.name, "datagram")},
)
lines.append(TelnetLine(data, int(code), DG))
else:
if line.endswith(UNIT_END):
parts = line.split(UNIT_END)
if parts[0]:
unit_lines.append(parts[0])
else:
unit_lines.append(line)
line = yield from self.telnet.readline()
if len(unit_lines) > 0:
text = "\n".join(unit_lines)
lines.append(TelnetLine(text, cn_code, CN))
log.debug(text, extra={"task": (self.telnet.name, "not datagram")})
elif self.block_mode and line.startswith(BLOCK_START):
parts = line[1:].split(BLOCK_SEPARATOR)
if len(parts) == 3:
identifier, code, text = parts
elif len(parts) == 4:
identifier, code, error_code, text = parts
else:
log.warning(
"Posing not supported yet",
extra={"task": (self.telnet.name, "lines")},
)
return lines
code = int(code)
identifier = int(identifier)
if text:
line = text
else:
line = yield from self.telnet.readline()
while not line.endswith(BLOCK_END):
lines.append(TelnetLine(line, code, BL))
line = yield from self.telnet.readline()
lines.append(TelnetLine(line[:-1], code, BL))
if code != BLKCMD_PASSWORD:
log.debug(
"%s %s %s"
% (
identifier,
code,
"\n".join(line.line for line in lines).strip(),
),
extra={"task": (self.telnet.name, "command_reply")},
)
else:
code = 0
lines.append(TelnetLine(line, None, None))
if self.consolehandler:
if identifier == 0 or identifier in self.show_reply:
self.consolehandler.handle(lines)
# self.show_reply.discard(identifier)
return lines
class PredictionsTelnet:
def __init__(
self, telnet, predictions, reply_cmd_dict, replay_dg_dict, replay_cn_dict
):
self.telnet = telnet
self.predictions = predictions
self.reply_cmd_dict = reply_cmd_dict
self.replay_dg_dict = replay_dg_dict
self.replay_cn_dict = replay_cn_dict
self.show_reply = set([])
self.lines = TelnetLines(telnet, self.show_reply)
self.__command_id = 1
@asyncio.coroutine
def parse(self):
line = yield from self.lines.popleft()
if not line.line:
return # TODO: necessary?
# print("line.line:", line.line)
if self.lines.datagram_mode and line.code is not None:
if line.code_type == DG:
callback = self.replay_dg_dict[line.code]
callback(line.line)
log.debug(
line.line, extra={"task": (self.telnet.name, callback.__name__)}
)
return
elif line.code_type == CN and line.code in self.replay_cn_dict:
callback = self.replay_cn_dict[line.code]
callback(line.line)
log.debug(
line.line, extra={"task": (self.telnet.name, callback.__name__)}
)
return
predictions = (
self.reply_cmd_dict[line.code]
if line.code is not None and line.code in self.reply_cmd_dict
else self.predictions
)
for pred in list(predictions):
answer = yield from self.test_prediction(pred, line)
# print(answer, " parse_line: trying prediction %s for line '%s'" % (pred.name, line.line[:80]))
if answer in (RETURN_MATCH, RETURN_MATCH_END):
log.debug(
"\n".join(pred.matches),
extra={"task": (self.telnet.name, pred.name)},
)
break
else:
# print(" NOT MATCHED:", line.line[:80])
if line.code != BLKCMD_PASSWORD:
log.debug(line.line, extra={"task": (self.telnet.name, "nonmatched")})
@asyncio.coroutine
def test_prediction(self, prediction, line):
lines = []
answer = prediction.handle(line.line)
while answer is RETURN_NEED_MORE:
line = yield from self.lines.popleft()
lines.append(line)
answer = prediction.handle(line.line)
if lines and answer not in (RETURN_MATCH, RETURN_MATCH_END):
self.lines.extendleft(reversed(lines))
elif answer is RETURN_MATCH_END:
self.lines.appendleft(line) # re-test last line that didn't match
return answer
def run_command(self, text, show_reply=False):
logtext = "*" * len(text) if self.telnet.sensitive else text
log.debug(logtext, extra={"task": (self.telnet.name, "run_command")})
if self.lines.block_mode:
# TODO: reuse id after command reply handled
self.__command_id += 1
text = "%s %s" % (self.__command_id, text)
if show_reply:
self.show_reply.add(self.__command_id)
self.telnet.write(text)
elif self.lines.datagram_mode:
if show_reply:
text = "`%s`%s" % (MY_ICC_PREFIX, text)
self.telnet.write("%s" % text)
else:
self.telnet.write("%s" % text)
def cancel(self):
self.run_command("quit")
self.telnet.cancel()
def close(self):
# save played game (if there is any) if no moves made
self.run_command("abort")
self.run_command("quit")
self.telnet.close()
|
import logging
from airflow_helpers.sdc_airflow_config import dag_emails
from sdc_etl_libs.sdc_data_exchange.SDCDataExchangeEnums import FileResultTypes
class AirflowHelpers:
@staticmethod
def combine_xcom_pulls_from_tasks(tasks_, key_='etl_results',
return_html_=True, **kwargs):
"""
Combines pulled xcom variables from a provided list of Airflow
DAG task ids.
:param tasks_: List of task ids.
:param key_: Task key to pull from tasks.
:param return_html_: If True, will format for use in HTML formatted
e-mails.
:param kwargs: Airflow kwargs.
:return: Results as a string.
"""
results = ""
for task in tasks_:
results = results + kwargs['ti'].xcom_pull(task_ids=task, key=key_) + \
("<br>" if return_html_ else "\n")
return results
@staticmethod
def push_xcom_variable(key_, value_, **kwargs):
"""
Pushes an xcom variable to Airflow.
:param key_: Task key..
:param value_: Task value
:param kwargs: Airflow kwargs.
:return: String message of what value was pushed to what key.
"""
kwargs['ti'].xcom_push(key=key_, value=value_)
return f"{value_} pushed to key {key_}"
def get_dag_emails(dag_id_):
"""
Given a Airflow DAG ID (ex. 'etl-hfd-reports'), returns a list of
e-mails that need to be notified when DAG completes. If the
DAG ID does not exist in the config file, returns the default
Data Engineering e-mail.
:param dag_id_: DAG id.
:return: List of e-mails.
"""
default_email = dag_emails["data-eng"]
if dag_id_ not in dag_emails.keys():
logging.warning(f"E-mail list not set for dag {dag_id_}. "
f"Using default data-eng email.")
return dag_emails.get(dag_id_, default_email)
@staticmethod
def process_etl_results_log(header_, etl_results_log_, **kwargs):
"""
Formats ETL log results from data exchange.
:param header_: Header for ETL results.
:param etl_results_log_: Dictionary produced from SDCDataExchage
exchange_data() with log results.
:return: Results as string.
"""
msg = f"<h3>{header_}:</h3>"
for tag, logs in etl_results_log_.items():
logs.sort(reverse=True)
msg += f"<h4>{tag}</h4>"
for log in logs:
if "error" in log.lower():
result = f"<font color=\"red\">{log}</font>"
else:
result = log
msg = msg + result + "<br>"
AirflowHelpers.push_xcom_variable('etl_results', msg, **kwargs)
return msg
@staticmethod
def generate_data_exchange_email(etl_name_=None, tasks_=None,
environment_=None, **kwargs):
"""
Generates Airflow completion e-mail for Data Exchange. Generates header, body and subject
based on the etl_name_ and tasks_ passed in as op_kwargs in PythonOperator.
:param etl_name_: ETL name (will appear in Subject / Body of e-mail.
Defaults to DAG name via "dag.dag_id"
:param tasks_: List of tasks to include in the e-mail.
Defaults to all tasks associated with DAG via "[task.task_id for task in dag.tasks]"
:param environment_: If "development", flag is added to e-mail subject to note
data exchange affected development environment.
:param kwargs: Airflow kwargs
:return: None, but, pushes the following two xcom variables for use in DAG script:
- email_subject
- email_body
Example use in DAG file:
# Pass in tasks to include in e-mail generation and name of ETL
generate_email = PythonOperator(
task_id='generate_email',
provide_context=True,
op_kwargs={
'etl_name_': "Logic Plum",
'tasks_': ["logic_plum_scan_show_rates_s3_to_sftp",
"logic_plum_scan_show_rates_s3_to_db"]
},
python_callable=SDCDataExchange.generate_airflow_completion_email,
dag=dag)
# Reference the email_subject and email_body xcoms pushed from generate_email task
send_email = EmailOperator(
task_id='send_email',
to=AirflowHelpers.get_dag_emails(dag.dag_id),
retries=0,
dag=dag,
subject="{{ task_instance.xcom_pull(task_ids='generate_email', key='email_subject') }}",
html_content="{{ task_instance.xcom_pull(task_ids='generate_email', key='email_body') }}"
)
# Make sure generate_email task is upstream of send_email in graph
(
logic_plum_scan_show_rates_s3_to_sftp,
logic_plum_scan_show_rates_s3_to_db
) >> generate_email >> send_email
"""
if not etl_name_:
etl_name_ = dag.dag_id
if not tasks_:
[task.task_id for task in dag.tasks]
body = f"<h3>{etl_name_} ETL has <font color=\"green\">completed.</font></h3>"
body = body + AirflowHelpers.combine_xcom_pulls_from_tasks(tasks_, **kwargs)
errors = body.count(f"{FileResultTypes.error.value}: ")
successes = body.count(f"{FileResultTypes.success.value}: ")
skipped = body.count(f"{FileResultTypes.empty.value}: ")
dev_flag = '**DEV** ' if environment_ == 'development' else ''
subject = f"{dev_flag}{etl_name_}: AIRFLOW Completed " + \
("w/ Errors " if errors > 0 else "") + \
f"({errors} error(s), {successes} success(es), {skipped} skipped)"
AirflowHelpers.push_xcom_variable('email_body', body, **kwargs)
AirflowHelpers.push_xcom_variable('email_subject', subject, **kwargs)
return body, subject, errors, successes, skipped
|
from flask import Flask
import requests
app = Flask(__name__)
@app.route("/")
def hello():
response = requests.get("http://10.0.0.3:5000/companies")
print(response.status_code)
json = str(response.content)
return json
# if __name__ == '__main__':
# app.run(host='0.0.0.0', port=5001) |
from django.apps import AppConfig
class VnfpackagesubscriptionConfig(AppConfig):
name = 'VnfPackageSubscription'
|
import unittest
import pytest
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from {{ cookiecutter.project_slug }}.config.settings.base import ROOT_DIR, env
class FunctionalTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.accessibility = self.create_accessibility()
def create_accessibility(self):
return self.Accessibility(self)
def tearDown(self):
self.browser.quit()
class Accessibility:
def __init__(self, ft):
self.ft = ft
def check(self):
self.visual_impaired()
def visual_impaired(self):
# Check if all images have `alt` attribute
images = self.ft.browser.find_elements_by_tag_name('img')
self.ft.assertTrue(
all(image.get_attribute('alt') != '' for image in images), 'Image without `alt` attribute'
)
class NewVisitorTest(FunctionalTest):
def test_can_see_dashboard(self):
# Acessando o endereço local do servidor (localhost), o usuário pode acessar o IoT Server
self.browser.get('http://localhost:8000')
self.accessibility.check()
# Ao acessar o link, ele encontra o nome do projeto e uma tela de Login.
self.assertIn("Entrar", self.browser.title)
email = self.browser.find_element_by_name('login')
passwd = self.browser.find_element_by_name('password')
# Inserindo as credenciais corretas, ele é redirecionado para a dashboard
email.send_keys(env.str("USERNAME"))
passwd.send_keys(env.str("PASSWD"))
email.submit()
WebDriverWait(self.browser, 10).until(
EC.title_is("{{ cookiecutter.project_slug }}")
)
self.accessibility.check()
if __name__ == "__main__":
unittest.main(warnings='ignore')
|
# -*- coding: utf-8 -*-
def fact(n): # 计算阶乘n! = 1 * 2 * 3 * ... * n
if 1 == n:
return 1
return n * fact(n - 1)
def fact2(n):
return fact_iter(n, 1)
def fact_iter(num, product):
if num == 1:
return product
return fact_iter(num - 1, num * product) # 仅返回递归函数本身,参数值在函数调用前就会被计算完成,不影响函数调用
print(fact(5))
# fact(1000) # 递归调用的次数过多导致栈溢出
print(fact2(5))
# fact2(1000) # 递归调用的次数过多导致栈溢出
# ### 递归(Recursion)
# - 递归函数: 函数在内部调用自身本身;
# - 递归函数的优点是定义简单,逻辑清晰;
#
# ### 栈溢出
# - 函数调用是通过栈(stack)实现的,每当进入一个函数调用,栈就会加一层栈帧,每当函数返回,栈就会减一层栈帧;
# - 栈的大小不是无限的,当递归调用的次数过多时,会导致栈溢出;
#
# ### 尾递归
# - 尾递归:在函数返回时调用自身本身,并且return语句不能包含表达式;
# - 如果编译器可以把尾递归做优化,使递归本身无论调用多少次,都只占用一个栈帧,就不会出现栈溢出的情况;
# - 遗憾的是,大多数编程语言没有针对尾递归做优化,Python解释器也没有做优化,任何递归函数都存在栈溢出的问题;
|
import numpy as np
import const
class TrueData():
def __init__(self, path_features="", path_label=""):
assert path_label!= "" and path_features != ""
self._index_in_epoch = 0
self._epochs_completed = 0
#self._num_examples = const.N_EXAMPLE
self._rate = 0.8
n_current = 0
fLabel = open(path_label,"r")
fFea = open(path_features, "r")
list_features = []
list_labels = []
list_labelids = []
list_lineids = []
while True:
line_label = fLabel.readline()
line_feature = fFea.readline()
if line_label == "":
break
n_current += 1
if line_feature == "":
print "Unmatching line at ",n_current
exit(-1)
line_label = line_label.strip()
label_vec = np.fromstring(line_label, dtype=int, sep=" ")
label_id = label_vec.argmax()
v_label_max = label_vec[label_id]
if v_label_max == 0:
continue
line_feature == line_feature.strip()
feature_vec = np.fromstring(line_feature, dtype=float, sep=" ")
#print feature_vec.shape
list_lineids.append(n_current)
list_features.append(feature_vec)
list_labels.append(label_vec)
list_labelids.append(label_id)
fLabel.close()
fFea.close()
print "Length: ",len(list_features),len(list_labels)
self._n_total = len(list_features)
#self.all_images = np.concatenate(list_features,axis=0)
#self.all_labels = np.concatenate(list_labels,axis=0)
self.all_images = np.asarray(list_features)
self.all_labels = np.asarray(list_labels)
self.all_labelids = np.array(list_labelids,dtype=int)
self.all_lineids =np.asarray(list_lineids,dtype=int)
print "All shape: ",self.all_images.shape,self.all_labels.shape,self.all_labelids.shape
perm0 = np.arange(self._n_total)
print perm0
np.random.shuffle(perm0)
i_spliter = int(self._n_total * self._rate)
self._num_examples = i_spliter
train_indices = perm0[:i_spliter]
test_indices = perm0[i_spliter:]
lineid_test= self.all_lineids[test_indices]
lineid_train = self.all_lineids[train_indices]
flineid = open("line_ids_testi.dat","w")
for idc in lineid_test:
flineid.write("%s\n"%idc)
flineid.close()
ftrainid =open("line_ids_traini.dat","w")
for idc in lineid_train:
ftrainid.write("%s\n"%idc)
ftrainid.close()
self.images = self.all_images[train_indices]
self.true_img_tags = self.all_labelids[:i_spliter]
self.texts = self.all_labels[train_indices]
self.dtags = np.ndarray((const.N_TAGS,const.INP_TXT_DIM))
self.dtags.fill(0)
for i in xrange(const.N_TAGS):
self.dtags[i][i] = 1
self.true_txt_tags = np.arange(const.N_TAGS)
self.test_images = self.all_images[test_indices]
self.true_test_img_tags = self.all_labelids[test_indices]
#
# self.images= np.random.rand(const.N_EXAMPLE, const.INP_IMAGE_DIM)
# self.true_img_tags = np.random.randint(0,const.N_TAGS,const.N_EXAMPLE)
#
# self.texts= np.random.rand(const.N_EXAMPLE, const.INP_TXT_DIM)
#
# self.dtags = np.random.rand(const.N_TAGS,const.INP_TXT_DIM)
# self.true_txt_tags = np.arange(const.N_TAGS)
#
#
# self.test_images = np.random.rand(const.N_TEST,const.INP_IMAGE_DIM)
# self.true_test_img_tags = np.random.randint(0,const.N_TAGS,const.N_TEST)
def get_all_train_images(self):
return self.images,self.true_img_tags
def get_all_txt_tags(self):
return self.dtags,self.true_txt_tags
def get_test_txt_tags(self):
return self.dtags, self.true_txt_tags
def get_test_images(self):
return self.test_images,self.true_test_img_tags
def tripple_data(self,data1,data2):
data1_0 = np.zeros((data1.shape),dtype=float)
data2_0 = np.zeros((data2.shape),dtype=float)
new_data1 = np.concatenate((data1,data1_0,data1),axis=0)
new_data2 = np.concatenate((data2,data2,data2_0),axis=0)
true_data1 = np.concatenate((data1,data1,data1),axis=0)
true_data2 = np.concatenate((data2,data2,data2), axis = 0)
segment_size = data1.shape[0]
perms = np.arange(segment_size*3)
for i in xrange(segment_size*3):
anchor = (i%3)*segment_size
offset = i/3
perms[i] = anchor + offset
new_data1 = new_data1[perms]
new_data2 = new_data2[perms]
return [(new_data1,new_data2),(true_data1,true_data2)]
def next_minibatch(self,batch_size):
start = self._index_in_epoch
if self._epochs_completed == 0 and start == 0 :
perm0 = np.arange(self._num_examples)
np.random.shuffle(perm0)
#self._images = self.images
#self._texts = self.texts
self._images = self.images[perm0]
self._texts = self.texts[perm0]
if start + batch_size > self._num_examples:
self._epochs_completed += 1
rest_num_examples = self._num_examples - start
images_rest_part = self._images[start:self._num_examples]
texts_rest_part = self._texts[start:self._num_examples]
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
images_new_part = self._images[start:end]
texts_new_part = self._texts[start:end]
return self.tripple_data(np.concatenate((images_rest_part, images_new_part), axis=0), np.concatenate(
(texts_rest_part, texts_new_part), axis=0))
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self.tripple_data(self._images[start:end], self._texts[start:end])
|
import os
from flask import Flask, render_template, request
from werkzeug.utils import secure_filename
from detection import detect_people
app = Flask(__name__)
UPLOAD_FOLDER = './static/uploads'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/')
def main():
return render_template('./main.html')
@app.route('/uploader', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return render_template('./show_image.html', image_file=filename)
@app.route('/detection', methods=['GET', 'POST'])
def detection():
if request.method == 'POST':
image_file = request.form['image_file']
if image_file:
filename = image_file
try:
detect_people(filename)
except Exception:
pass
return render_template('./result.html', image_file=filename)
if __name__ == '__main__':
app.run(debug=True)
|
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2017
import sys
import string
import instana
if sys.version_info.major == 2:
string_types = basestring
else:
string_types = str
def test_id_generation():
count = 0
while count <= 10000:
id = instana.util.ids.generate_id()
base10_id = int(id, 16)
assert base10_id >= 0
assert base10_id <= 18446744073709551615
count += 1
def test_various_header_to_id_conversion():
# Get a hex string to test against & convert
header_id = instana.util.ids.generate_id()
converted_id = instana.util.ids.header_to_long_id(header_id)
assert(header_id == converted_id)
# Hex value - result should be left padded
result = instana.util.ids.header_to_long_id('abcdef')
assert('0000000000abcdef' == result)
# Hex value
result = instana.util.ids.header_to_long_id('0123456789abcdef')
assert('0123456789abcdef' == result)
# Very long incoming header should just return the rightmost 16 bytes
result = instana.util.ids.header_to_long_id('0x0123456789abcdef0123456789abcdef')
assert('0x0123456789abcdef0123456789abcdef' == result)
def test_header_to_id_conversion_with_bogus_header():
# Bogus nil arg
bogus_result = instana.util.ids.header_to_long_id(None)
assert(instana.util.ids.BAD_ID == bogus_result)
# Bogus Integer arg
bogus_result = instana.util.ids.header_to_long_id(1234)
assert(instana.util.ids.BAD_ID == bogus_result)
# Bogus Array arg
bogus_result = instana.util.ids.header_to_long_id([1234])
assert(instana.util.ids.BAD_ID == bogus_result)
# Bogus Hex Values in String
bogus_result = instana.util.ids.header_to_long_id('0xZZZZZZ')
assert(instana.util.ids.BAD_ID == bogus_result)
bogus_result = instana.util.ids.header_to_long_id('ZZZZZZ')
assert(instana.util.ids.BAD_ID == bogus_result)
|
spark = SparkSession \
.builder \
.appName("exercise_twentysix") \
.getOrCreate()
df.groupBy("gender").avg("salary").show()
df.groupBy("country").avg("salary", "id").show(7)
|
# -*- coding:utf-8 -*-
__author__ = 'XF'
'meta network'
import os
import math
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import torch
import torch.nn as nn
from torch.nn import functional as F
from copy import deepcopy
from tools import metrics as Metrics
torch.set_default_tensor_type(torch.DoubleTensor)
class BaseCNN(nn.Module):
def __init__(self, output=10):
super(BaseCNN, self).__init__()
self.output = output
# this list contains all tensor needed to be optimized
self.vars = nn.ParameterList()
# running_mean and running var
self.vars_bn = nn.ParameterList()
# Conv1d layer
# [channel_out, channel_in, kernel-size]
weight = nn.Parameter(torch.ones(64, 1, 3))
nn.init.kaiming_normal_(weight)
bias = nn.Parameter(torch.zeros(64))
self.vars.extend([weight, bias])
# linear layer
weight = nn.Parameter(torch.ones(self.output, 64 * 100))
bias = nn.Parameter(torch.zeros(self.output))
self.vars.extend([weight, bias])
def forward(self, x, vars=None, bn_training=True):
'''
:param x: [batch size, 1, 3, 94]
:param vars:
:param bn_training: set false to not update
:return:
'''
if vars is None:
vars = self.vars
x = x.squeeze(dim=2)
x = x.unsqueeze(dim=1)
# Conv1d layer
weight, bias = vars[0].to(x.device), vars[1].to(x.device)
# x ==> (batch size, 1, 200)
x = F.conv1d(x, weight, bias, stride=1, padding=1) # ==>(batch size, 64, 200)
x = F.relu(x, inplace=True) # ==> (batch_size, 64, 200)
x = F.max_pool1d(x, kernel_size=2) # ==> (batch_size, 64, 100)
# linear layer
x = x.view(x.size(0), -1) # flatten ==> (batch_size, 16*12)
weight, bias = vars[-2].to(x.device), vars[-1].to(x.device)
x = F.linear(x, weight, bias)
return x
def parameters(self):
return self.vars
def zero_grad(self):
pass
pass
class BaseLSTM(nn.Module):
def __init__(self, n_features, n_hidden, n_output, n_layer=1):
super().__init__()
self.name = 'BaseLSTM'
# this list contains all tensor needed to be optimized
self.params = nn.ParameterList()
self.input_size = n_features
self.hidden_size = n_hidden
self.output_size = n_output
self.layer_size = n_layer
# input layer
W_i = nn.Parameter(torch.Tensor(self.hidden_size * 4, self.input_size))
bias_i = nn.Parameter(torch.Tensor(self.hidden_size * 4))
self.params.extend([W_i, bias_i])
# hidden layer
W_h = nn.Parameter(torch.Tensor(self.hidden_size * 4, self.hidden_size))
bias_h = nn.Parameter(torch.Tensor(self.hidden_size * 4))
self.params.extend([W_h, bias_h])
if self.layer_size > 1:
for _ in range(self.layer_size - 1):
# i-th layer
# input layer
W_i = nn.Parameter(torch.Tensor(self.hidden_size * 4, self.hidden_size))
bias_i = nn.Parameter(torch.Tensor(self.hidden_size * 4))
self.params.extend([W_i, bias_i])
# hidden layer
W_h = nn.Parameter(torch.Tensor(self.hidden_size * 4, self.hidden_size))
bias_h = nn.Parameter(torch.Tensor(self.hidden_size * 4))
self.params.extend([W_h, bias_h])
# output layer
W_linear = nn.Parameter(torch.Tensor(self.output_size, self.hidden_size))
bias_linear = nn.Parameter(torch.Tensor(self.output_size))
self.params.extend([W_linear, bias_linear])
self.init()
pass
def parameters(self):
return self.params
def init(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, x, vars=None, init_state=None):
if vars is None:
params = self.params
else:
params = vars
# assume the shape of x is (batch_size, time_size, feature_size)
batch_size, time_size, _ = x.size()
hidden_seq = []
if init_state is None:
h_t, c_t = (
torch.zeros(batch_size, self.hidden_size).to(x.device),
torch.zeros(batch_size, self.hidden_size).to(x.device)
)
else:
h_t, c_t = init_state
HS = self.hidden_size
for t in range(time_size):
x_t = x[:, t, :]
W_i, bias_i = (params[0].to(x.device), params[1].to(x.device))
W_h, bias_h = (params[2].to(x.device), params[3].to(x.device))
gates = F.linear(x_t, W_i, bias_i) + F.linear(h_t, W_h, bias_h)
i_t, f_t, g_t, o_t = (
torch.sigmoid(gates[:, :HS]), # input
torch.sigmoid(gates[:, HS:HS * 2]), # forget
torch.tanh(gates[:, HS * 2:HS * 3]),
torch.sigmoid(gates[:, HS * 3:]) # output
)
c_t = f_t * c_t + i_t * g_t
h_t = o_t * torch.tanh(c_t)
hidden_seq.append(h_t)
W_linear, bias_linear = (params[-2].to(x.device), params[-1].to(x.device))
out = F.linear(hidden_seq[-1], W_linear, bias_linear)
return out
class BaseCNNConLSTM(nn.Module):
def __init__(self, n_features, n_hidden, n_output, n_layer=1, time_size=1, cnn_feature=200):
super(BaseCNNConLSTM, self).__init__()
self.name = 'BaseCNNConLSTM'
self.time_size = time_size
# this list contain all tensor needed to be optimized
self.params = nn.ParameterList()
self.cnn = BaseCNN(output=cnn_feature)
self.lstm = BaseLSTM(n_features=n_features, n_hidden=n_hidden, n_output=n_output, n_layer=n_layer)
self.cnn_tensor_num = 0
self.lstm_tensor_num = 0
self.init()
def init(self):
self.cnn_tensor_num = len(self.cnn.parameters())
self.lstm_tensor_num = len(self.lstm.parameters())
for param in self.cnn.parameters():
self.params.append(param)
for param in self.lstm.parameters():
self.params.append(param)
def sequence(self, data):
dim_1, dim_2 = data.shape
new_dim_1 = dim_1 - self.time_size + 1
x = torch.zeros((new_dim_1, self.time_size, dim_2))
for i in range(dim_1 - self.time_size + 1):
x[i] = data[i: i + self.time_size]
return x.to(data.device)
def forward(self, x, vars=None, init_states=None):
if vars is None:
params = self.params
else:
params = vars
x = self.cnn(x, params[: self.cnn_tensor_num])
# x = self.sequence(x)
x = x.unsqueeze(dim=2)
output = self.lstm(x, params[self.cnn_tensor_num:], init_states)
return output
pass
pass
class MetaNet(nn.Module):
def __init__(self, baseNet=None, update_step=10, update_step_test=20, meta_lr=0.001, base_lr=0.01, fine_lr=0.01):
super(MetaNet, self).__init__()
self.update_step = update_step
self.update_step_test = update_step_test
self.meta_lr = meta_lr
self.base_lr = base_lr
self.fine_tune_lr = fine_lr
if baseNet is not None:
self.net = baseNet
else:
raise Exception('baseNet is None')
self.meta_optim = torch.optim.Adam(self.net.parameters(), lr=self.meta_lr)
pass
def forward(self, spt_x, spt_y, qry_x, qry_y, device='cpu'):
'''
:param spt_x: if baseNet is cnn: [ spt size, in_channel, height, width], lstm [spt_size, time_size, feature_size]
:param spt_y: [ spt size]
:param qry_x: if baseNet is cnn: [ qry size, in_channel, height, width], lstm [qry size, time_size, feature_size]
:param qry_y: [ qry size]
:return:
'''
task_num = len(spt_x)
loss_list_qry = []
rmse_list = []
smape_list = []
qry_loss_sum = 0
# 0-th step update
for i in range(task_num):
x_spt = torch.from_numpy(spt_x[i]).to(device)
y_spt = torch.from_numpy(spt_y[i]).to(device)
x_qry = torch.from_numpy(qry_x[i]).to(device)
y_qry = torch.from_numpy(qry_y[i]).to(device)
y_hat = self.net(x_spt, vars=None)
loss = F.mse_loss(y_hat, y_spt)
grad = torch.autograd.grad(loss, self.net.parameters())
grads_params = zip(grad, self.net.parameters())
fast_weights = list(map(lambda p: p[1] - self.base_lr * p[0], grads_params))
with torch.no_grad():
y_hat = self.net(x_qry, fast_weights)
loss_qry = F.mse_loss(y_hat, y_qry)
loss_list_qry.append(loss_qry)
# calculating metrics
rmse, smape = Metrics(y_qry, y_hat)
rmse_list.append(rmse)
smape_list.append(smape)
for step in range(1, self.update_step):
y_hat = self.net(x_spt, fast_weights)
loss = F.mse_loss(y_hat, y_spt)
grad = torch.autograd.grad(loss, fast_weights)
grads_params = zip(grad, fast_weights)
fast_weights = list(map(lambda p: p[1] - self.base_lr * p[0], grads_params))
if step < self.update_step - 1:
with torch.no_grad():
y_hat = self.net(x_qry, fast_weights)
loss_qry = F.mse_loss(y_hat, y_qry)
loss_list_qry.append(loss_qry)
else:
y_hat = self.net(x_qry, fast_weights)
loss_qry = F.mse_loss(y_hat, y_qry)
loss_list_qry.append(loss_qry)
qry_loss_sum += loss_qry
with torch.no_grad():
rmse, smape = Metrics(y_qry, y_hat)
rmse_list.append(rmse)
smape_list.append(smape)
pass
# update meta net
loss_qry = qry_loss_sum / task_num
self.meta_optim.zero_grad()
loss_qry.backward()
self.meta_optim.step()
return {
'loss': loss_list_qry[-1].item(),
'rmse': rmse_list[-1],
'smape': smape_list[-1]
}
def fine_tuning(self, spt_x, spt_y, qry_x, qry_y, naive=False):
'''
:param spt_x: if baseNet is cnn:[set size, channel, height, width] if baseNet is lstm: [batch_size, seq_size, feature_size]
:param spt_y:
:param qry_x:
:param qry_y:
:return:
'''
# metrics
loss_qry_list = []
rmse_list = []
smape_list = []
min_train_loss = 1000000
loss_set = {
'train_loss': [],
'validation_loss': []
}
new_net = deepcopy(self.net)
y_hat = new_net(spt_x)
loss = F.mse_loss(y_hat, spt_y)
loss_set['train_loss'].append(loss.item())
if loss.item() < min_train_loss:
min_train_loss = loss.item()
grad = torch.autograd.grad(loss, new_net.parameters())
grads_params = zip(grad, new_net.parameters())
fast_weights = list(map(lambda p: p[1] - self.fine_tune_lr * p[0], grads_params))
with torch.no_grad():
y_hat = new_net(qry_x, fast_weights)
loss_qry = F.mse_loss(y_hat, qry_y)
loss_set['validation_loss'].append(loss_qry.item())
loss_qry_list.append(loss_qry)
rmse, smape = Metrics(qry_y, y_hat)
rmse_list.append(rmse)
smape_list.append(smape)
min_rmse = rmse
min_smape = smape
min_loss = loss_qry.item()
rmse_best_epoch = 1
smape_best_epcoh = 1
if naive:
print(' Epoch [1] | train_loss: %.4f | test_loss: %.4f | rmse: %.4f | smape: %.4f |'
% (loss.item(), loss_qry.item(), rmse, smape))
for step in range(1, self.update_step_test):
y_hat = new_net(spt_x, fast_weights)
loss = F.mse_loss(y_hat, spt_y)
loss_set['train_loss'].append(loss.item())
if loss.item() < min_train_loss:
min_train_loss = loss.item()
grad = torch.autograd.grad(loss, fast_weights)
grads_params = zip(grad, fast_weights)
fast_weights = list(map(lambda p: p[1] - self.fine_tune_lr * p[0], grads_params))
# testing on query set
with torch.no_grad():
# calculating metrics
y_hat = new_net(qry_x, fast_weights)
loss_qry = F.mse_loss(y_hat, qry_y)
loss_set['validation_loss'].append(loss_qry.item())
loss_qry_list.append(loss_qry)
rmse, smape = Metrics(qry_y, y_hat)
rmse_list.append(rmse)
smape_list.append(smape)
if min_smape > smape:
min_smape = smape
smape_best_epcoh = step + 1
if min_rmse > rmse:
min_loss = loss_qry.item()
min_rmse = rmse
rmse_best_epoch = step + 1
print(
' Epoch [%d] | train_loss: %.4f | test_loss: %.4f | rmse: %.4f | smape: %.4f |'
% (step + 1, loss.item(), loss_qry.item(), rmse, smape))
del new_net
return {
'test_loss': min_loss,
'train_loss': min_train_loss,
'rmse': min_rmse,
'smape': min_smape,
'rmse_best_epoch': rmse_best_epoch,
'smape_best_epoch': smape_best_epcoh,
'loss_set': loss_set
}
pass
if __name__ == '__main__':
pass
|
# Copyright (c) 2017, 2020, DCSO GmbH
import urllib.request, urllib.error, urllib.parse
import urllib.request, urllib.parse, urllib.error
import json
import csv
import sys
import argparse
import time
import os
import logging, logging.handlers
import splunk
from splunk.clilib import cli_common as cli
proxy_args = cli.getConfStanza('dcso_hunt_setup', 'proxy')
tie_args = cli.getConfStanza('dcso_hunt_setup', 'tie')
# other_args = cli.getConfStanza('dcso_hunt_setup','other')
if str(proxy_args['host']):
proxy_link = "https://{}:{}@{}:{}".format(str(proxy_args['user']), str(proxy_args['password']),
str(proxy_args['host']), str(proxy_args['port']))
proxy = urllib.request.ProxyHandler({'https': proxy_link})
auth = urllib.request.HTTPBasicAuthHandler()
opener = urllib.request.build_opener(proxy, auth, urllib.request.HTTPHandler)
urllib.request.install_opener(opener)
def setup_logging():
logger = logging.getLogger('splunk.foo')
SPLUNK_HOME = "/opt/splunk/"
LOGGING_DEFAULT_CONFIG_FILE = os.path.join(SPLUNK_HOME, 'etc', 'log.cfg')
LOGGING_LOCAL_CONFIG_FILE = os.path.join(SPLUNK_HOME, 'etc', 'log-local.cfg')
LOGGING_STANZA_NAME = 'python'
LOGGING_FILE_NAME = "tie-pingback.log"
BASE_LOG_PATH = os.path.join('var', 'log', 'splunk')
LOGGING_FORMAT = "%(asctime)s %(levelname)-s\t%(module)s:%(lineno)d - %(message)s"
splunk_log_handler = logging.handlers.RotatingFileHandler(
os.path.join(SPLUNK_HOME, BASE_LOG_PATH, LOGGING_FILE_NAME), mode='a')
splunk_log_handler.setFormatter(logging.Formatter(LOGGING_FORMAT))
logger.addHandler(splunk_log_handler)
splunk.setupSplunkLogger(logger, LOGGING_DEFAULT_CONFIG_FILE, LOGGING_LOCAL_CONFIG_FILE, LOGGING_STANZA_NAME)
return logger
def ping_back(ioc_value):
build = ioc_value['configuration']
post = urllib.request.Request("%s" % str(tie_args["pingback_api"]))
post.add_header("Authorization", 'bearer {}'.format(str(tie_args["token"])))
post.add_data("data_type=%s&value=%s&seen=%s&occurrences=%s" % (
build['data_type'], build['value'], build['seen'], build['occurrences']))
response = urllib.request.urlopen(post).read()
logger.info(response)
return response
if __name__ == "__main__":
logger = setup_logging()
if len(sys.argv) < 2 or sys.argv[1] != "--execute":
print("FATAL Unsupported execution mode (expected --execute flag)", file=sys.stderr)
sys.exit(1)
else:
ioc_value = json.loads(sys.stdin.read())
ping_back(ioc_value)
|
class Bar(object):
pass
class Foo(object):
def __foo__(self):
print 'hi'
f = Foo()
f.__foo__()
x = f.__foo__
x()
#g = Foo()
#b = Bar()
x = Foo.__foo__
x(g)
x(b)
|
"""
Latin Hypercube Sampling and Descriptive Sampling
This file is part of PUQ
Copyright (c) 2013 PUQ Authors
See LICENSE file for terms.
"""
from __future__ import absolute_import, division, print_function
import sys
import numpy as np
from puq.util import process_data
from puq.psweep import PSweep
from logging import debug
from .response import SampledFunc
from puq.jpickle import pickle
from puq.pdf import UniformPDF
class LHS(PSweep):
"""
Class implementing Latin hypercube sampling (LHS).
Args:
params: Input list of :class:`Parameter`\s.
num: Number of samples to use.
ds(boolean): Use a modified LHS which always picks the center
of the Latin square.
response(boolean): Generate a response surface using the sample
points.
iteration_cb(function): A function to call after completion.
"""
def __init__(self, params, num, ds=False, response=True, iteration_cb=None):
PSweep.__init__(self, iteration_cb)
self.params = params
num = int(num)
self.num = num
self.ds = ds
self.response = response
self._start_at = 0
if self.response:
# To generate a complete response surface, use Uniform distributions
# with the same range as the original distributions.
for p in self.params:
if ds:
p.values = UniformPDF(*p.pdf.range).ds(num)
else:
p.values = UniformPDF(*p.pdf.range).lhs(num)
else:
for p in self.params:
if ds:
p.values = p.pdf.ds(num)
else:
p.values = p.pdf.lhs(num)
# Returns a list of name,value tuples
# For example, [('t', 1.0), ('freq', 133862.0)]
def get_args(self):
for i in range(self._start_at, self.num):
yield [(p.name, p.values[i]) for p in self.params]
def _do_pdf(self, hf, data):
if self.response:
# The response surface was built using Uniform distributions.
# We are interested in the mean and deviation of the data
# that would have been produced using the real PDFs. For this,
# we need to compute a weighted mean and deviation
weights = np.prod([p.pdf.pdf(p.values) for p in self.params], 0)
tweight = np.sum(weights)
mean = np.average(data, weights=weights)
dev = np.sqrt(np.dot(weights, (data - mean)**2) / tweight)
print("Mean = %s" % mean)
print("StdDev = %s" % dev)
rsd = np.vstack(([p.values for p in self.params], data))
rs = pickle(SampledFunc(*rsd, params=self.params))
return [('response', rs), ('mean', mean), ('dev', dev)]
else:
print("Mean = %s" % np.mean(data))
print("StdDev = %s" % np.std(data))
return [('samples', data), ('mean', np.mean(data)), ('dev', np.std(data))]
def analyze(self, hf):
debug('')
process_data(hf, 'lhs', self._do_pdf)
# extend the sample size by a factor of 3
# This works for DS because it always chooses the center of the probability bins.
# This means we can extend by using 3 times the bins.
# | * | * |
# | * | * | * | * | * | * |
def extend(self, num=None):
if not hasattr(self, 'ds') or not self.ds:
print("You must use Descriptive Sampling to extend.")
sys.exit(1)
print("Extending Descriptive Sampling run to %s samples." % (self.num * 3))
for p in self.params:
if self.response:
v = np.sort(UniformPDF(*p.pdf.range).ds(self.num * 3))
else:
v = np.sort(p.pdf.ds(self.num * 3))
# remove the ones we already did
v = np.concatenate((v[0::3], v[2::3]))
p.values = np.concatenate((p.values, np.random.permutation(v)))
self._start_at = self.num
self.num *= 3
|
class MagicalGirlLevelOneDivTwo:
def theMinDistance(self, d, x, y):
return min(
sorted(
(a ** 2 + b ** 2) ** 0.5
for a in xrange(x - d, x + d + 1)
for b in xrange(y - d, y + d + 1)
)
)
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Tests for EntityEnum module
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import unittest
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.test import TestCase # cf. https://docs.djangoproject.com/en/dev/topics/testing/tools/#assertions
from annalist.identifiers import RDF, RDFS, ANNAL
from annalist import layout
from annalist.models.entityroot import EntityRoot
from annalist.models.entity import Entity
from annalist.models.site import Site
from annalist.models.collection import Collection
from annalist.models.recordenum import RecordEnumBase, RecordEnumFactory
from .AnnalistTestCase import AnnalistTestCase
from .tests import (
test_layout,
TestHost, TestHostUri, TestBasePath, TestBaseUri, TestBaseDir
)
from .init_tests import (
init_annalist_test_site, init_annalist_test_coll, resetSitedata
)
from .entity_testutils import (
collection_dir, site_view_url, site_title
)
# -----------------------------------------------------------------------------
#
# Helper functions
#
# -----------------------------------------------------------------------------
def recordenum_url(enum_id, coll_id="testcoll", type_id="testtype"):
"""
Returns internal access URL for indicated enumerated value.
enum_id id of enumerated value
coll-id id of collection
type_id id of enumeration type
"""
return (
"/testsite/c/%(coll_id)s/d/%(type_id)s/%(enum_id)s/"%
{ 'coll_id': coll_id
, 'type_id': type_id
, 'enum_id': enum_id
}
)
def recordenum_view_url(enum_id, coll_id="testcoll", type_id="testtype"):
"""
Returns public access URL / URI for indicated enumerated value.
enum_id id of enumerated value
coll-id id of collection
type_id id of enumeration type
"""
return (
"/testsite/c/%(coll_id)s/d/%(type_id)s/%(enum_id)s/"%
{'coll_id': coll_id
, 'type_id': type_id
, 'enum_id': enum_id}
)
def recordenum_dir(enum_id, coll_id="testcoll", type_id="testtype"):
return collection_dir(coll_id) + layout.COLL_ENUM_PATH%{'type_id': type_id, 'id': enum_id} + "/"
def recordenum_value_keys():
"""
Keys in default view entity data
"""
return (
[ '@type'
, 'annal:id', 'annal:type_id'
, 'annal:type'
, 'annal:url'
, 'annal:uri'
, 'rdfs:label', 'rdfs:comment'
])
def recordenum_create_values(
enum_id, coll_id="testcoll", type_id="testtype",
type_uri="annal:List_type", update="Enum", hosturi=TestHostUri):
"""
Data used when creating enumeration test data
"""
enumuri = "annal:%s/%s"%(type_id, enum_id)
types = ["annal:Enum", type_uri]
return (
{ '@type': types
, 'annal:type': types[0]
, 'rdfs:label': '%s %s/%s/%s'%(update, coll_id, type_id, enum_id)
, 'rdfs:comment': '%s %s/%s/%s'%(update, coll_id, type_id, enum_id)
, 'annal:uri': enumuri
})
def recordenum_values(enum_id, coll_id="testcoll", type_id="testtype", update="Enum", hosturi=TestHostUri):
enumurl = recordenum_view_url(enum_id, coll_id=coll_id, type_id=type_id)
d = recordenum_create_values(
enum_id, coll_id=coll_id, type_id=type_id, update=update, hosturi=hosturi
).copy() #@@ copy needed here?
d.update(
{ 'annal:id': enum_id
, 'annal:type_id': type_id
, 'annal:url': enumurl
})
# log.info(d)
return d
def recordenum_read_values(
enum_id, coll_id="testcoll", type_id="testtype", update="Enum", hosturi=TestHostUri):
d = recordenum_values(
enum_id, coll_id=coll_id, type_id=type_id, update=update, hosturi=hosturi
).copy()
d.update(
{ '@id': layout.COLL_BASE_ENUM_REF%{'type_id': layout.ENUM_LIST_TYPE_ID, 'id': "testenum1"}
, '@context': [{'@base': "../../"}, "../../coll_context.jsonld"]
})
return d
# -----------------------------------------------------------------------------
#
# RecordEnum tests
#
# -----------------------------------------------------------------------------
class RecordEnumTest(AnnalistTestCase):
def setUp(self):
init_annalist_test_site()
self.testsite = Site(TestBaseUri, TestBaseDir)
self.testcoll = Collection(self.testsite, "testcoll")
self.testenum = RecordEnumFactory("testenum", layout.ENUM_LIST_TYPE_ID)
return
def tearDown(self):
return
@classmethod
def setUpClass(cls):
super(RecordEnumTest, cls).setUpClass()
return
@classmethod
def tearDownClass(cls):
super(RecordEnumTest, cls).tearDownClass()
resetSitedata(scope="collections") #@@checkme@@
return
def test_RecordEnumTest(self):
self.assertEqual(self.testenum.__name__, "testenum", "Check enumeration class name")
return
def test_recordenum_init(self):
e = self.testenum(self.testcoll, "testenum")
self.assertEqual(e._entitytype, ANNAL.CURIE.Enum)
self.assertEqual(e._entityfile, layout.ENUM_META_FILE)
self.assertEqual(e._entityref,
layout.COLL_BASE_ENUM_REF%{'type_id': layout.ENUM_LIST_TYPE_ID, 'id': "testenum"}
)
self.assertEqual(e._entityid, "testenum")
self.assertEqual(e._entityurl,
TestHostUri + recordenum_url("testenum", coll_id="testcoll", type_id=layout.ENUM_LIST_TYPE_ID)
)
self.assertEqual(e._entitydir,
recordenum_dir("testenum", coll_id="testcoll", type_id=layout.ENUM_LIST_TYPE_ID)
)
self.assertEqual(e._values, None)
return
def test_recordenum_base_init(self):
# Note that if base class is used directly, the type_id value isn't recognized
# as it needs to be a class property.
e = RecordEnumBase(self.testcoll, "testenum2", layout.ENUM_LIST_TYPE_ID)
self.assertEqual(e._entitytype, ANNAL.CURIE.Enum)
self.assertEqual(e._entityfile, layout.ENUM_META_FILE)
self.assertEqual(e._entityref,
layout.COLL_BASE_ENUM_REF%{'type_id': layout.ENUM_LIST_TYPE_ID, 'id': "testenum2"}
)
self.assertEqual(e._entityid, "testenum2")
self.assertEqual(e._entityurl,
TestHostUri + recordenum_url(
"testenum2", coll_id="testcoll", type_id="_enum_base_id" # See note
)
)
self.assertEqual(e._entitydir,
recordenum_dir(
"testenum2", coll_id="testcoll", type_id="_enum_base_id" # See note
)
)
self.assertEqual(e._values, None)
resetSitedata(scope="collections") #@@checkme@@
return
def test_recordenum1_data(self):
e = self.testenum(self.testcoll, "testenum1")
e.set_values(recordenum_create_values("testenum1", type_id=layout.ENUM_LIST_TYPE_ID))
ed = e.get_values()
self.assertEqual(set(ed.keys()), set(recordenum_value_keys()))
v = recordenum_values("testenum1", type_id=layout.ENUM_LIST_TYPE_ID)
self.assertDictionaryMatch(ed, v)
return
def test_recordenum2_data(self):
e = self.testenum(self.testcoll, "testenum2")
e.set_values(recordenum_create_values("testenum2", type_id=layout.ENUM_LIST_TYPE_ID))
ed = e.get_values()
self.assertEqual(set(ed.keys()), set(recordenum_value_keys()))
v = recordenum_values("testenum2", type_id=layout.ENUM_LIST_TYPE_ID)
self.assertDictionaryMatch(ed, v)
return
def test_recordenum_create_load(self):
ev = recordenum_create_values("testenum1", type_id=layout.ENUM_LIST_TYPE_ID)
e = self.testenum.create(self.testcoll, "testenum1", ev)
self.assertEqual(e._entitydir, recordenum_dir("testenum1", type_id=layout.ENUM_LIST_TYPE_ID))
self.assertTrue(os.path.exists(e._entitydir))
ed = self.testenum.load(self.testcoll, "testenum1").get_values()
v = recordenum_read_values("testenum1", type_id=layout.ENUM_LIST_TYPE_ID)
self.assertKeysMatch(ed, v)
self.assertDictionaryMatch(ed, v)
return
def test_recordenum_type_id(self):
r = EntityRoot(TestBaseUri, TestBaseUri, TestBaseDir, TestBaseDir)
self.assertEqual(r.get_type_id(), None)
e1 = Entity(r, "testid1")
self.assertEqual(e1.get_type_id(), None)
e2 = self.testenum(e1, "testid2")
self.assertEqual(e2.get_type_id(), layout.ENUM_LIST_TYPE_ID)
return
def test_recordenum_child_ids(self):
child_ids1 = self.testcoll.child_entity_ids(self.testenum, altscope="all")
self.assertEqual(set(child_ids1), {'_initial_values', 'Grid', 'List'})
ev = recordenum_create_values("testenum1", type_id=layout.ENUM_LIST_TYPE_ID)
e = self.testenum.create(self.testcoll, "testenum1", ev)
child_ids2 = self.testcoll.child_entity_ids(self.testenum, altscope="all")
self.assertEqual(set(child_ids2), {'_initial_values', 'Grid', 'List', 'testenum1'})
return
# End.
|
"""
Modular Exponential.
Modular exponentiation is a type of exponentiation performed over a modulus.
For more explanation, please check
https://en.wikipedia.org/wiki/Modular_exponentiation
"""
"""Calculate Modular Exponential."""
def modular_exponential(base: int, power: int, mod: int):
"""
>>> modular_exponential(5, 0, 10)
1
>>> modular_exponential(2, 8, 7)
4
>>> modular_exponential(3, -2, 9)
-1
"""
if power < 0:
return -1
base %= mod
result = 1
while power > 0:
if power & 1:
result = (result * base) % mod
power = power >> 1
base = (base * base) % mod
return result
def main():
"""Call Modular Exponential Function."""
print(modular_exponential(3, 200, 13))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
|
#!/usr/bin/env python
"""Test runner for typeshed.
Depends on mypy and pytype being installed.
If pytype is installed:
1. For every pyi, run "pytd <foo.pyi>" in a separate process
"""
import os
import re
import sys
import argparse
import subprocess
import collections
parser = argparse.ArgumentParser(description="Pytype tests.")
parser.add_argument('-n', '--dry-run', action='store_true', help="Don't actually run tests")
parser.add_argument('--num-parallel', type=int, default=1,
help="Number of test processes to spawn")
def main():
args = parser.parse_args()
code, runs = pytype_test(args)
if code:
print("--- exit status %d ---" % code)
sys.exit(code)
if not runs:
print("--- nothing to do; exit 1 ---")
sys.exit(1)
def load_blacklist():
filename = os.path.join(os.path.dirname(__file__), "pytype_blacklist.txt")
regex = r"^\s*([^\s#]+)\s*(?:#.*)?$"
with open(filename) as f:
return re.findall(regex, f.read(), flags=re.M)
class PytdRun(object):
def __init__(self, args, dry_run=False):
self.args = args
self.dry_run = dry_run
self.results = None
if dry_run:
self.results = (0, "", "")
else:
self.proc = subprocess.Popen(
["pytd"] + args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def communicate(self):
if self.results:
return self.results
stdout, stderr = self.proc.communicate()
self.results = self.proc.returncode, stdout, stderr
return self.results
def pytype_test(args):
try:
PytdRun(["-h"]).communicate()
except OSError:
print("Cannot run pytd. Did you install pytype?")
return 0, 0
wanted = re.compile(r"stdlib/(2\.7|2and3)/.*\.pyi$")
skipped = re.compile("(%s)$" % "|".join(load_blacklist()))
files = []
for root, _, filenames in os.walk("stdlib"):
for f in sorted(filenames):
f = os.path.join(root, f)
if wanted.search(f) and not skipped.search(f):
files.append(f)
running_tests = collections.deque()
max_code, runs, errors = 0, 0, 0
print("Running pytype tests...")
while 1:
while files and len(running_tests) < args.num_parallel:
test_run = PytdRun([files.pop()], dry_run=args.dry_run)
running_tests.append(test_run)
if not running_tests:
break
test_run = running_tests.popleft()
code, stdout, stderr = test_run.communicate()
max_code = max(max_code, code)
runs += 1
if code:
print("pytd error processing \"%s\":" % test_run.args[0])
print(stderr)
errors += 1
print("Ran pytype with %d pyis, got %d errors." % (runs, errors))
return max_code, runs
if __name__ == '__main__':
main()
|
import copy
from datetime import datetime, timedelta
from dateutil.rrule import DAILY, MONTHLY, WEEKLY, YEARLY, rrule, rruleset
from django.contrib import messages
from django.core.files import File
from django.db import connections, transaction
from django.db.models import F, IntegerField, OuterRef, Prefetch, Subquery, Sum
from django.db.models.functions import Coalesce
from django.forms import inlineformset_factory
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.timezone import make_aware
from django.utils.translation import gettext_lazy as _, pgettext_lazy
from django.views import View
from django.views.generic import CreateView, DeleteView, ListView, UpdateView
from pretix.base.models import CartPosition, LogEntry
from pretix.base.models.checkin import CheckinList
from pretix.base.models.event import SubEvent, SubEventMetaValue
from pretix.base.models.items import (
ItemVariation, Quota, SubEventItem, SubEventItemVariation,
)
from pretix.base.reldate import RelativeDate, RelativeDateWrapper
from pretix.base.services.quotas import QuotaAvailability
from pretix.control.forms.checkin import SimpleCheckinListForm
from pretix.control.forms.filter import SubEventFilterForm
from pretix.control.forms.item import QuotaForm
from pretix.control.forms.subevents import (
CheckinListFormSet, QuotaFormSet, RRuleFormSet, SubEventBulkForm,
SubEventForm, SubEventItemForm, SubEventItemVariationForm,
SubEventMetaValueForm, TimeFormSet,
)
from pretix.control.permissions import EventPermissionRequiredMixin
from pretix.control.signals import subevent_forms
from pretix.control.views import PaginationMixin
from pretix.control.views.event import MetaDataEditorMixin
from pretix.helpers.models import modelcopy
class SubEventList(EventPermissionRequiredMixin, PaginationMixin, ListView):
model = SubEvent
context_object_name = 'subevents'
template_name = 'pretixcontrol/subevents/index.html'
permission = 'can_change_settings'
def get_queryset(self):
sum_tickets_paid = Quota.objects.filter(
subevent=OuterRef('pk')
).order_by().values('subevent').annotate(
s=Sum('cached_availability_paid_orders')
).values(
's'
)
qs = self.request.event.subevents.annotate(
sum_tickets_paid=Subquery(sum_tickets_paid, output_field=IntegerField())
).prefetch_related(
Prefetch('quotas',
queryset=Quota.objects.annotate(s=Coalesce(F('size'), 0)).order_by('-s'),
to_attr='first_quotas')
)
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
quotas = []
for s in ctx['subevents']:
s.first_quotas = s.first_quotas[:4]
quotas += list(s.first_quotas)
qa = QuotaAvailability(early_out=False)
for q in quotas:
if q.cached_availability_time is None or q.cached_availability_paid_orders is None:
qa.queue(q)
qa.compute()
for q in quotas:
q.cached_avail = (
qa.results[q] if q in qa.results
else (q.cached_availability_state, q.cached_availability_number)
)
if q.size is not None:
q.percent_paid = min(
100,
round(q.cached_availability_paid_orders / q.size * 100) if q.size > 0 else 100
)
return ctx
@cached_property
def filter_form(self):
return SubEventFilterForm(data=self.request.GET)
class SubEventDelete(EventPermissionRequiredMixin, DeleteView):
model = SubEvent
template_name = 'pretixcontrol/subevents/delete.html'
permission = 'can_change_settings'
context_object_name = 'subevents'
def get_object(self, queryset=None) -> SubEvent:
try:
return self.request.event.subevents.get(
id=self.kwargs['subevent']
)
except SubEvent.DoesNotExist:
raise Http404(pgettext_lazy("subevent", "The requested date does not exist."))
def get(self, request, *args, **kwargs):
if not self.get_object().allow_delete():
messages.error(request, pgettext_lazy('subevent', 'A date can not be deleted if orders already have been '
'placed.'))
return HttpResponseRedirect(self.get_success_url())
return super().get(request, *args, **kwargs)
@transaction.atomic
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
success_url = self.get_success_url()
if not self.object.allow_delete():
messages.error(request, pgettext_lazy('subevent', 'A date can not be deleted if orders already have been '
'placed.'))
return HttpResponseRedirect(self.get_success_url())
else:
self.object.log_action('pretix.subevent.deleted', user=self.request.user)
CartPosition.objects.filter(addon_to__subevent=self.object).delete()
self.object.cartposition_set.all().delete()
self.object.delete()
messages.success(request, pgettext_lazy('subevent', 'The selected date has been deleted.'))
return HttpResponseRedirect(success_url)
def get_success_url(self) -> str:
return reverse('control:event.subevents', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
class SubEventEditorMixin(MetaDataEditorMixin):
meta_form = SubEventMetaValueForm
meta_model = SubEventMetaValue
@cached_property
def plugin_forms(self):
forms = []
for rec, resp in subevent_forms.send(sender=self.request.event, subevent=self.object, request=self.request):
if isinstance(resp, (list, tuple)):
forms.extend(resp)
else:
forms.append(resp)
return forms
def _make_meta_form(self, p, val_instances):
if not hasattr(self, '_default_meta'):
self._default_meta = self.request.event.meta_data
return self.meta_form(
prefix='prop-{}'.format(p.pk),
property=p,
default=self._default_meta.get(p.name, ''),
instance=val_instances.get(p.pk, self.meta_model(property=p, subevent=self.object)),
data=(self.request.POST if self.request.method == "POST" else None)
)
@cached_property
def cl_formset(self):
extra = 0
kwargs = {}
if self.copy_from and self.request.method != "POST":
kwargs['initial'] = [
{
'name': cl.name,
'all_products': cl.all_products,
'limit_products': cl.limit_products.all(),
'include_pending': cl.include_pending,
} for cl in self.copy_from.checkinlist_set.prefetch_related('limit_products')
]
extra = len(kwargs['initial'])
elif not self.object and self.request.method != "POST":
kwargs['initial'] = [
{
'name': '',
'all_products': True,
'include_pending': False,
}
]
extra = 0
formsetclass = inlineformset_factory(
SubEvent, CheckinList,
form=SimpleCheckinListForm, formset=CheckinListFormSet,
can_order=False, can_delete=True, extra=extra,
)
if self.object:
kwargs['queryset'] = self.object.checkinlist_set.prefetch_related('limit_products')
return formsetclass(self.request.POST if self.request.method == "POST" else None,
instance=self.object,
event=self.request.event, **kwargs)
@cached_property
def formset(self):
extra = 0
kwargs = {}
if self.copy_from and self.request.method != "POST":
kwargs['initial'] = [
{
'size': q.size,
'name': q.name,
'release_after_exit': q.release_after_exit,
'itemvars': [str(i.pk) for i in q.items.all()] + [
'{}-{}'.format(v.item_id, v.pk) for v in q.variations.all()
]
} for q in self.copy_from.quotas.prefetch_related('items', 'variations')
]
extra = len(kwargs['initial'])
formsetclass = inlineformset_factory(
SubEvent, Quota,
form=QuotaForm, formset=QuotaFormSet, min_num=1, validate_min=True,
can_order=False, can_delete=True, extra=extra,
)
if self.object:
kwargs['queryset'] = self.object.quotas.prefetch_related('items', 'variations')
return formsetclass(
self.request.POST if self.request.method == "POST" else None,
instance=self.object,
event=self.request.event, **kwargs
)
def save_cl_formset(self, obj):
for form in self.cl_formset.initial_forms:
if form in self.cl_formset.deleted_forms:
if not form.instance.pk:
continue
form.instance.checkins.all().delete()
form.instance.log_action(action='pretix.event.checkinlist.deleted', user=self.request.user)
form.instance.delete()
form.instance.pk = None
elif form.has_changed():
form.instance.subevent = obj
form.instance.event = obj.event
form.save()
change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}
change_data['id'] = form.instance.pk
form.instance.log_action(
'pretix.event.checkinlist.changed', user=self.request.user, data={
k: form.cleaned_data.get(k) for k in form.changed_data
}
)
for form in self.cl_formset.extra_forms:
if not form.has_changed():
continue
if self.formset._should_delete_form(form):
continue
form.instance.subevent = obj
form.instance.event = obj.event
form.save()
change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}
change_data['id'] = form.instance.pk
form.instance.log_action(action='pretix.event.checkinlist.added', user=self.request.user, data=change_data)
def save_formset(self, obj):
for form in self.formset.initial_forms:
if form in self.formset.deleted_forms:
if not form.instance.pk:
continue
form.instance.log_action(action='pretix.event.quota.deleted', user=self.request.user)
obj.log_action('pretix.subevent.quota.deleted', user=self.request.user, data={
'id': form.instance.pk
})
form.instance.delete()
form.instance.pk = None
elif form.has_changed():
form.instance.question = obj
form.save()
change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}
change_data['id'] = form.instance.pk
obj.log_action(
'pretix.subevent.quota.changed', user=self.request.user, data={
k: form.cleaned_data.get(k) for k in form.changed_data
}
)
form.instance.log_action(
'pretix.event.quota.changed', user=self.request.user, data={
k: form.cleaned_data.get(k) for k in form.changed_data
}
)
for form in self.formset.extra_forms:
if not form.has_changed():
continue
if self.formset._should_delete_form(form):
continue
form.instance.subevent = obj
form.instance.event = obj.event
form.save()
change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}
change_data['id'] = form.instance.pk
form.instance.log_action(action='pretix.event.quota.added', user=self.request.user, data=change_data)
obj.log_action('pretix.subevent.quota.added', user=self.request.user, data=change_data)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['formset'] = self.formset
ctx['cl_formset'] = self.cl_formset
ctx['itemvar_forms'] = self.itemvar_forms
ctx['meta_forms'] = self.meta_forms
ctx['plugin_forms'] = self.plugin_forms
return ctx
@cached_property
def copy_from(self):
if self.request.GET.get("copy_from") and not getattr(self, 'object', None):
try:
return self.request.event.subevents.get(pk=self.request.GET.get("copy_from"))
except SubEvent.DoesNotExist:
pass
@cached_property
def itemvar_forms(self):
se_item_instances = {
sei.item_id: sei for sei in SubEventItem.objects.filter(subevent=self.object)
}
se_var_instances = {
sei.variation_id: sei for sei in SubEventItemVariation.objects.filter(subevent=self.object)
}
if self.copy_from:
se_item_instances = {
sei.item_id: SubEventItem(item=sei.item, price=sei.price, disabled=sei.disabled)
for sei in SubEventItem.objects.filter(subevent=self.copy_from).select_related('item')
}
se_var_instances = {
sei.variation_id: SubEventItemVariation(variation=sei.variation, price=sei.price, disabled=sei.disabled)
for sei in SubEventItemVariation.objects.filter(subevent=self.copy_from).select_related('variation')
}
formlist = []
for i in self.request.event.items.filter(active=True).prefetch_related('variations'):
if i.has_variations:
for v in i.variations.all():
inst = se_var_instances.get(v.pk) or SubEventItemVariation(subevent=self.object, variation=v)
formlist.append(SubEventItemVariationForm(
prefix='itemvar-{}'.format(v.pk),
item=i, variation=v,
instance=inst,
data=(self.request.POST if self.request.method == "POST" else None)
))
else:
inst = se_item_instances.get(i.pk) or SubEventItem(subevent=self.object, item=i)
formlist.append(SubEventItemForm(
prefix='item-{}'.format(i.pk),
item=i,
instance=inst,
data=(self.request.POST if self.request.method == "POST" else None)
))
return formlist
def is_valid(self, form):
return form.is_valid() and all([f.is_valid() for f in self.itemvar_forms]) and self.formset.is_valid() and (
all([f.is_valid() for f in self.meta_forms])
) and self.cl_formset.is_valid() and all(f.is_valid() for f in self.plugin_forms)
class SubEventUpdate(EventPermissionRequiredMixin, SubEventEditorMixin, UpdateView):
model = SubEvent
template_name = 'pretixcontrol/subevents/detail.html'
permission = 'can_change_settings'
context_object_name = 'subevent'
form_class = SubEventForm
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if self.is_valid(form):
r = self.form_valid(form)
return r
messages.error(self.request, _('We could not save your changes. See below for details.'))
return self.form_invalid(form)
def get_object(self, queryset=None) -> SubEvent:
try:
return self.request.event.subevents.get(
id=self.kwargs['subevent']
)
except SubEvent.DoesNotExist:
raise Http404(pgettext_lazy("subevent", "The requested date does not exist."))
@transaction.atomic
def form_valid(self, form):
self.save_formset(self.object)
self.save_cl_formset(self.object)
self.save_meta()
for f in self.itemvar_forms:
f.save()
# TODO: LogEntry?
messages.success(self.request, _('Your changes have been saved.'))
if form.has_changed() or any(f.has_changed() for f in self.plugin_forms):
data = {
k: form.cleaned_data.get(k) for k in form.changed_data
}
for f in self.plugin_forms:
data.update({
k: (f.cleaned_data.get(k).name
if isinstance(f.cleaned_data.get(k), File)
else f.cleaned_data.get(k))
for k in f.changed_data
})
self.object.log_action(
'pretix.subevent.changed', user=self.request.user, data=data
)
for f in self.plugin_forms:
f.subevent = self.object
f.save()
return super().form_valid(form)
def get_success_url(self) -> str:
return reverse('control:event.subevents', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
}) + ('?' + self.request.GET.get('returnto') if 'returnto' in self.request.GET else '')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['event'] = self.request.event
return kwargs
class SubEventCreate(SubEventEditorMixin, EventPermissionRequiredMixin, CreateView):
model = SubEvent
template_name = 'pretixcontrol/subevents/detail.html'
permission = 'can_change_settings'
context_object_name = 'subevent'
form_class = SubEventForm
def post(self, request, *args, **kwargs):
self.object = SubEvent(event=self.request.event)
form = self.get_form()
if self.is_valid(form):
return self.form_valid(form)
return self.form_invalid(form)
def get_success_url(self) -> str:
return reverse('control:event.subevents', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['event'] = self.request.event
initial = kwargs.get('initial', {})
if self.copy_from:
i = modelcopy(self.copy_from)
i.pk = None
kwargs['instance'] = i
else:
kwargs['instance'] = SubEvent(event=self.request.event)
initial['location'] = self.request.event.location
initial['geo_lat'] = self.request.event.geo_lat
initial['geo_lon'] = self.request.event.geo_lon
kwargs['initial'] = initial
return kwargs
@transaction.atomic
def form_valid(self, form):
form.instance.event = self.request.event
messages.success(self.request, pgettext_lazy('subevent', 'The new date has been created.'))
ret = super().form_valid(form)
self.object = form.instance
data = dict(form.cleaned_data)
for f in self.plugin_forms:
data.update({
k: (f.cleaned_data.get(k).name
if isinstance(f.cleaned_data.get(k), File)
else f.cleaned_data.get(k))
for k in f.cleaned_data
})
form.instance.log_action('pretix.subevent.added', data=dict(data), user=self.request.user)
self.save_formset(form.instance)
self.save_cl_formset(form.instance)
for f in self.itemvar_forms:
f.instance.subevent = form.instance
f.save()
for f in self.meta_forms:
f.instance.subevent = form.instance
self.save_meta()
for f in self.plugin_forms:
f.subevent = form.instance
f.save()
return ret
@cached_property
def meta_forms(self):
def clone(o):
o = copy.copy(o)
o.pk = None
return o
if self.copy_from:
val_instances = {
v.property_id: clone(v) for v in self.copy_from.meta_values.all()
}
else:
val_instances = {}
formlist = []
for p in self.request.organizer.meta_properties.all():
formlist.append(self._make_meta_form(p, val_instances))
return formlist
class SubEventBulkAction(EventPermissionRequiredMixin, View):
permission = 'can_change_settings'
@cached_property
def objects(self):
return self.request.event.subevents.filter(
id__in=self.request.POST.getlist('subevent')
)
@transaction.atomic
def post(self, request, *args, **kwargs):
if request.POST.get('action') == 'disable':
for obj in self.objects:
obj.log_action(
'pretix.subevent.changed', user=self.request.user, data={
'active': False
}
)
obj.active = False
obj.save(update_fields=['active'])
messages.success(request, pgettext_lazy('subevent', 'The selected dates have been disabled.'))
elif request.POST.get('action') == 'enable':
for obj in self.objects:
obj.log_action(
'pretix.subevent.changed', user=self.request.user, data={
'active': True
}
)
obj.active = True
obj.save(update_fields=['active'])
messages.success(request, pgettext_lazy('subevent', 'The selected dates have been enabled.'))
elif request.POST.get('action') == 'delete':
return render(request, 'pretixcontrol/subevents/delete_bulk.html', {
'allowed': self.objects.filter(orderposition__isnull=True),
'forbidden': self.objects.filter(orderposition__isnull=False),
})
elif request.POST.get('action') == 'delete_confirm':
for obj in self.objects:
if obj.allow_delete():
CartPosition.objects.filter(addon_to__subevent=obj).delete()
obj.cartposition_set.all().delete()
obj.log_action('pretix.subevent.deleted', user=self.request.user)
obj.delete()
else:
obj.log_action(
'pretix.subevent.changed', user=self.request.user, data={
'active': False
}
)
obj.active = False
obj.save(update_fields=['active'])
messages.success(request, pgettext_lazy('subevent', 'The selected dates have been deleted or disabled.'))
return redirect(self.get_success_url())
def get_success_url(self) -> str:
return reverse('control:event.subevents', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
class SubEventBulkCreate(SubEventEditorMixin, EventPermissionRequiredMixin, CreateView):
model = SubEvent
template_name = 'pretixcontrol/subevents/bulk.html'
permission = 'can_change_settings'
context_object_name = 'subevent'
form_class = SubEventBulkForm
def is_valid(self, form):
return self.rrule_formset.is_valid() and self.time_formset.is_valid() and super().is_valid(form)
def get_success_url(self) -> str:
return reverse('control:event.subevents', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
@cached_property
def rrule_formset(self):
return RRuleFormSet(
data=self.request.POST if self.request.method == "POST" else None,
prefix='rruleformset'
)
@cached_property
def time_formset(self):
return TimeFormSet(
data=self.request.POST if self.request.method == "POST" else None,
prefix='timeformset'
)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['rrule_formset'] = self.rrule_formset
ctx['time_formset'] = self.time_formset
return ctx
@cached_property
def meta_forms(self):
def clone(o):
o = copy.copy(o)
o.pk = None
return o
if self.copy_from:
val_instances = {
v.property_id: clone(v) for v in self.copy_from.meta_values.all()
}
else:
val_instances = {}
formlist = []
for p in self.request.organizer.meta_properties.all():
formlist.append(self._make_meta_form(p, val_instances))
return formlist
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
initial = {
'active': True,
}
kwargs['event'] = self.request.event
tz = self.request.event.timezone
if self.copy_from:
i = copy.copy(self.copy_from)
i.pk = None
kwargs['instance'] = i
initial['time_from'] = i.date_from.astimezone(tz).time()
initial['time_to'] = i.date_to.astimezone(tz).time() if i.date_to else None
initial['time_admission'] = i.date_admission.astimezone(tz).time() if i.date_admission else None
initial['rel_presale_start'] = RelativeDateWrapper(RelativeDate(
days_before=(i.date_from.astimezone(tz).date() - i.presale_start.astimezone(tz).date()).days,
base_date_name='date_from',
time=i.presale_start.astimezone(tz).time(),
minutes_before=None
)) if i.presale_start else None
initial['rel_presale_end'] = RelativeDateWrapper(RelativeDate(
days_before=(i.date_from.astimezone(tz).date() - i.presale_end.astimezone(tz).date()).days,
base_date_name='date_from',
time=i.presale_end.astimezone(tz).time(),
minutes_before=None
)) if i.presale_end else None
else:
kwargs['instance'] = SubEvent(event=self.request.event)
initial['location'] = self.request.event.location
initial['geo_lat'] = self.request.event.geo_lat
initial['geo_lon'] = self.request.event.geo_lon
kwargs['initial'] = initial
return kwargs
def get_times(self):
times = []
for f in self.time_formset:
if f in self.time_formset.deleted_forms or not f.cleaned_data.get('time_from'):
continue
times.append(f.cleaned_data)
return times
def get_rrule_set(self):
s = rruleset()
for f in self.rrule_formset:
if f in self.rrule_formset.deleted_forms:
continue
rule_kwargs = {}
rule_kwargs['dtstart'] = f.cleaned_data['dtstart']
rule_kwargs['interval'] = f.cleaned_data['interval']
if f.cleaned_data['freq'] == 'yearly':
freq = YEARLY
if f.cleaned_data['yearly_same'] == "off":
rule_kwargs['bysetpos'] = int(f.cleaned_data['yearly_bysetpos'])
rule_kwargs['byweekday'] = f.parse_weekdays(f.cleaned_data['yearly_byweekday'])
rule_kwargs['bymonth'] = int(f.cleaned_data['yearly_bymonth'])
elif f.cleaned_data['freq'] == 'monthly':
freq = MONTHLY
if f.cleaned_data['monthly_same'] == "off":
rule_kwargs['bysetpos'] = int(f.cleaned_data['monthly_bysetpos'])
rule_kwargs['byweekday'] = f.parse_weekdays(f.cleaned_data['monthly_byweekday'])
elif f.cleaned_data['freq'] == 'weekly':
freq = WEEKLY
if f.cleaned_data['weekly_byweekday']:
rule_kwargs['byweekday'] = [f.parse_weekdays(a) for a in f.cleaned_data['weekly_byweekday']]
elif f.cleaned_data['freq'] == 'daily':
freq = DAILY
if f.cleaned_data['end'] == 'count':
rule_kwargs['count'] = f.cleaned_data['count']
else:
rule_kwargs['until'] = f.cleaned_data['until']
if f.cleaned_data['exclude']:
s.exrule(rrule(freq, **rule_kwargs))
else:
s.rrule(rrule(freq, **rule_kwargs))
return s
@transaction.atomic
def form_valid(self, form):
tz = self.request.event.timezone
subevents = []
for rdate in self.get_rrule_set():
for t in self.get_times():
se = copy.copy(form.instance)
se.date_from = make_aware(datetime.combine(rdate, t['time_from']), tz)
if t.get('time_to'):
se.date_to = (
make_aware(datetime.combine(rdate, t['time_to']), tz)
if t.get('time_to') > t.get('time_from')
else make_aware(datetime.combine(rdate + timedelta(days=1), t['time_to']), tz)
)
else:
se.date_to = None
se.date_admission = (
make_aware(datetime.combine(rdate, t['time_admission']), tz)
if t.get('time_admission')
else None
)
se.presale_start = (
form.cleaned_data['rel_presale_start'].datetime(se)
if form.cleaned_data.get('rel_presale_start')
else None
)
se.presale_end = (
form.cleaned_data['rel_presale_end'].datetime(se)
if form.cleaned_data.get('rel_presale_end')
else None
)
se.save(clear_cache=False)
subevents.append(se)
data = dict(form.cleaned_data)
for f in self.plugin_forms:
data.update({
k: (f.cleaned_data.get(k).name
if isinstance(f.cleaned_data.get(k), File)
else f.cleaned_data.get(k))
for k in f.cleaned_data
})
log_entries = []
for se in subevents:
log_entries.append(se.log_action('pretix.subevent.added', data=data, user=self.request.user, save=False))
to_save = []
for f in self.meta_forms:
if f.cleaned_data.get('value'):
for se in subevents:
i = copy.copy(f.instance)
i.pk = None
i.subevent = se
to_save.append(i)
SubEventMetaValue.objects.bulk_create(to_save)
to_save_items = []
to_save_variations = []
for f in self.itemvar_forms:
for se in subevents:
i = copy.copy(f.instance)
i.pk = None
i.subevent = se
if isinstance(i, SubEventItem):
to_save_items.append(i)
else:
to_save_variations.append(i)
SubEventItem.objects.bulk_create(to_save_items)
SubEventItemVariation.objects.bulk_create(to_save_variations)
to_save_items = []
to_save_variations = []
for f in self.formset.forms:
if self.formset._should_delete_form(f) or not f.has_changed():
continue
change_data = {k: f.cleaned_data.get(k) for k in f.changed_data}
for se in subevents:
i = copy.copy(f.instance)
i.pk = None
i.subevent = se
i.event = se.event
i.save(clear_cache=False)
selected_items = set(list(self.request.event.items.filter(id__in=[
i.split('-')[0] for i in f.cleaned_data.get('itemvars', [])
])))
selected_variations = list(ItemVariation.objects.filter(item__event=self.request.event, id__in=[
i.split('-')[1] for i in f.cleaned_data.get('itemvars', []) if '-' in i
]))
for _i in selected_items:
to_save_items.append(Quota.items.through(quota_id=i.pk, item_id=_i.pk))
for _i in selected_variations:
to_save_variations.append(Quota.variations.through(quota_id=i.pk, itemvariation_id=_i.pk))
change_data['id'] = i.pk
log_entries.append(
i.log_action(action='pretix.event.quota.added', user=self.request.user,
data=change_data, save=False)
)
log_entries.append(
se.log_action('pretix.subevent.quota.added', user=self.request.user, data=change_data, save=False)
)
Quota.items.through.objects.bulk_create(to_save_items)
Quota.variations.through.objects.bulk_create(to_save_variations)
to_save_products = []
for f in self.cl_formset.forms:
if self.cl_formset._should_delete_form(f) or not f.has_changed():
continue
change_data = {k: f.cleaned_data.get(k) for k in f.changed_data}
for se in subevents:
i = copy.copy(f.instance)
i.subevent = se
i.event = se.event
i.save()
for _i in f.cleaned_data.get('limit_products', []):
to_save_products.append(CheckinList.limit_products.through(checkinlist_id=i.pk, item_id=_i.pk))
change_data['id'] = i.pk
log_entries.append(
i.log_action(action='pretix.event.checkinlist.added', user=self.request.user, data=change_data,
save=False)
)
CheckinList.limit_products.through.objects.bulk_create(to_save_products)
for f in self.plugin_forms:
f.is_valid()
for se in subevents:
f.subevent = se
f.save()
if connections['default'].features.can_return_rows_from_bulk_insert:
LogEntry.objects.bulk_create(log_entries)
LogEntry.bulk_postprocess(log_entries)
else:
for le in log_entries:
le.save()
LogEntry.bulk_postprocess(log_entries)
self.request.event.cache.clear()
messages.success(self.request, pgettext_lazy('subevent', '{} new dates have been created.').format(len(subevents)))
return redirect(reverse('control:event.subevents', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
}))
def post(self, request, *args, **kwargs):
form = self.get_form()
self.object = SubEvent(event=self.request.event)
if self.is_valid(form):
return self.form_valid(form)
messages.error(self.request, _('We could not save your changes. See below for details.'))
return self.form_invalid(form)
|
"""
"""
import numpy as np
def get_params(lgm):
mu_lgtc = _mean_lgtc_vs_m0(lgm)
sig_lgtc = _sigma_lgtc_vs_m0(lgm)
u_frac_rounder = _u_frac_rounder_vs_m0(lgm)
mean_e_early_rounder = _mean_e_early_rounder_vs_m0(lgm)
mean_e_late_rounder = _mean_e_late_rounder_vs_m0(lgm)
mean_e_early_flatter = _mean_e_early_flatter_vs_m0(lgm)
mean_e_late_flatter = _mean_e_late_flatter_vs_m0(lgm)
chol_e_early_early = _chol_e_early_early(lgm)
chol_e_late_late = _chol_e_late_late(lgm)
chol_e_early_late = _chol_e_early_late(lgm)
p = np.array(
(
mu_lgtc,
sig_lgtc,
u_frac_rounder,
mean_e_early_rounder,
mean_e_late_rounder,
mean_e_early_flatter,
mean_e_late_flatter,
chol_e_early_early,
chol_e_late_late,
chol_e_early_late,
)
)
return p
def _sigmoid(x, x0, k, ymin, ymax):
height_diff = ymax - ymin
return ymin + height_diff / (1.0 + np.exp(-k * (x - x0)))
def _mean_lgtc_vs_m0(lgm):
return _sigmoid(lgm, 13, 1, 0.75, 0.925)
def _sigma_lgtc_vs_m0(lgm):
return _sigmoid(lgm, 13, 1, -0.86, -0.86)
def _u_frac_rounder_vs_m0(lgm):
return _sigmoid(lgm, 12.15, 1.7, 2, 0.75)
def _mean_e_early_rounder_vs_m0(lgm):
return _sigmoid(lgm, 13, 2.5, 1, 5.5)
def _mean_e_late_rounder_vs_m0(lgm):
return _sigmoid(lgm, 13.35, 1.5, -10, -1.25)
def _mean_e_early_flatter_vs_m0(lgm):
return _sigmoid(lgm, 13.5, 1, -3.75, -1.5)
def _mean_e_late_flatter_vs_m0(lgm):
return _sigmoid(lgm, 13.6, 2, 1.25, 8)
def _chol_e_early_early(lgm):
return _sigmoid(lgm, 13.25, 3, 0.6, 0.0)
def _chol_e_late_late(lgm):
return _sigmoid(lgm, 13.25, 3, 0.12, 0.12)
def _chol_e_early_late(lgm):
return _sigmoid(lgm, 13.25, 1, 2.5, 1.25)
|
import csv
import math
from collections import defaultdict
import numpy as np
'''
about:
this code assumes that the output file (the result of our ML) matches the same csv schema as validation.csv, including the first row being collumn names
it loads the expected (validation.csv) grountTruth into a dictionnary and then checks whether ouput found them
the metrics compute the distance and false positives
to dos:
Note: outputExample and validationWithoutAnomalies are identical right now
'''
# const:
assessmentResultsOutputFile = 'assessmentResults.txt'
groundTruthFile = 'validationWithoutAnomalies.csv'
classificationResultsFile = 'classificationResults.csv'
# The average size of seal on thermal image is about 2 by 5 pixels. Assume hot spot detedted within
# two times the size of a seal is a success detection.
sealSizeInPixels = 4
tolenceFactor = 2
locationOffsetTolerance = sealSizeInPixels * tolenceFactor
# metrics:
falsePositives = 0
falseNegatives = 0
hotSpotDistances = []
registrationDistances = []
truePositives = 0
classificationTrue = 0
classificationFalse = 0
groundTruthPositives = 0
hotspotLocationTrue = 0
registrationTrue = 0
# This set of metrics report accuracy when spatial location match of identified hot spots and ground truth is required.
falsePositivesLM = 0
falseNegativesLM = 0
truePositivesLM = 0
# Sort the rows on x_pos of the hot spot, if they have the same photoId(on same image)
def getSortKey(item):
return item[5]
with open(groundTruthFile, 'r') as grountTruth:
# col 5 and 6 = hotspot
# col 7-10 = registration
# col 12 = species_id
ans = csv.reader(grountTruth)
ans1 = list(filter(lambda row: row[11] != 'Anomaly', ans))
groundTruthList = list(ans1)
# Dictionary with photoIds (thermal image names) and pointers to the rows with that image
groundTruthDict = {}
# fill the dictionary with the ground truth information
# start from row 1 to exclude col-name row
for i in range(1, len(groundTruthList)):
# assume thermal phone name is id and is in 3rd (index 2) column
photoID = groundTruthList[i][2]
# remove col-name row
groundTruthPositives = len(groundTruthList) - 1
if photoID in groundTruthDict:
groundTruthDict[photoID].append(i)
else:
groundTruthDict[photoID] = [i]
with open(classificationResultsFile, 'r') as results:
res = csv.reader(results)
resList1 = list(res)
# filter out anomalies
resList = list(filter(lambda row: row[11] != 'Anomaly', resList1))
resDict = {}
# Start from the row 1 to exlude the title/col-name row
for j in range(1, len(resList)):
resPhotoId = resList[j][2]
if resPhotoId in resDict:
resDict[resPhotoId].append(j)
else:
resDict[resPhotoId] = [j]
# Each image may have several hot spots identfied.
# Assuming the spacial distribution pattern of hotspots on classifcation results and ground truth are the same, sort the rows
# on its x_positon, and macth the spots.
# TODO: need to consider method to macth the spots when number of rows are different in result and ground truth
for key in resDict:
# if photo not in expected dictionary that means there were no animals in the entire photo so throw FalsePositive
if key not in groundTruthDict:
falsePositives+=len(resDict[key])
falsePositivesLM+=len(resDict[key])
else:
resRows = resDict.get(key)
gtRows = groundTruthDict.get(key)
lengthDiff = len(resRows) - len(gtRows)
minLength = min(len(resRows), len(gtRows))
if (lengthDiff >= 0):
# TODO: updated caculation method when ground truth includes 'Anormaly'
falsePositives+=lengthDiff
falsePositivesLM+=lengthDiff
else:
falseNegatives+=lengthDiff
falseNegativesLM+=lengthDiff
truePositives+=minLength
# caldulate metrics with location match requirement
resRowsData = []
gtRowsData = []
for k in resRows:
resRowsData.append(resList[k])
if (len(resRowsData) > 1):
resRowsData.sort(key=getSortKey)
for l in gtRows:
gtRowsData.append(resList[l])
if (len(gtRowsData) > 1):
gtRowsData.sort(key=getSortKey)
for g in gtRowsData:
minDist = 99999999
for r in resRowsData:
if r[1] != 'MatchesGroundTruth':
x1 = int(g[5])
y1 = int(g[6])
x2 = int(r[5])
y2 = int(r[6])
dist = math.hypot(x2 - x1, y2 - y1)
if dist < minDist:
minDist = dist
minResRow = r
# Was there a detected hotspot within tolerance of this ground truth hotspot?
hotSpotDistances.append(minDist)
if minDist < locationOffsetTolerance:
truePositivesLM+=1
minResRow[1] = 'MatchesGroundTruth' # Hackily reuse the 'timestamp' field to indicate that we have a match
# if minResRow[13] == e[13]:
# classificationTrue += 1
# else:
# classificationFalse += 1
for r in resRowsData:
if r[1] != 'MatchesGroundTruth':
falsePositivesLM += 1
# Remove double counting when ground truth has more hot spots
if (minLength < 0):
falsePositivesLM += minLength
# Print results on screen
print('Accuracy assessment for hot spot detection on thermal images')
print('Number of hot spots in grount truth: {}'.format(groundTruthPositives))
print('Number of hot spots found: {}'.format(str(len(resList)-1)))
print('True positive(count): {} True positve(%): {}%'.format(truePositives, str(truePositives / groundTruthPositives * 100)))
print('False positive count: {}'.format(falsePositives))
print('False negative count: {}'.format(falseNegatives))
print('\n')
print('Accuracy assessment for hot spot detection with LOCATION MATCH on thermal images')
print('Number of hot spots in ground truth: {}'.format(groundTruthPositives))
print('Number of hot spots found: {}'.format(str(len(resList)-1)))
print('True positive(count): {} True positve(%): {}%'.format(truePositivesLM, str(truePositivesLM / groundTruthPositives * 100)))
print('False positive count: {}'.format(falsePositivesLM))
print('False negative count: {}'.format(falseNegativesLM))
print('\n')
print('Location offset for hot spots on thermal images')
print('10th percentile: {} pixles'.format(np.percentile(hotSpotDistances, 10)))
print('50th percentile: {} pixles'.format(np.percentile(hotSpotDistances, 50)))
print('90th percentile: {} pixles'.format(np.percentile(hotSpotDistances, 90)))
# Print result to a file
with open('assessmentResults.txt', 'w') as f:
print('Accuracy assessment for hot spot detection on thermal images', file=f)
print('Number of hot spots in ground truth: {}'.format(groundTruthPositives), file=f)
print('Number of hot spots found: {}'.format(str(len(resList)-1)), file=f)
print('True positive(count): {} True positve(%): {}%'.format(truePositives, str(truePositives / groundTruthPositives * 100)), file=f)
print('False positive count: {}'.format(falsePositives), file=f)
print('False negative count: {}'.format(falseNegatives), file=f)
print('\n'*3, file=f)
print('Accuracy assessment for hot spot detection with LOCATION MATCH on thermal images', file=f)
print('Number of hot spots in ground truth: {}'.format(groundTruthPositives), file=f)
print('Number of hot spots found: {}'.format(str(len(resList)-1)), file=f)
print('True positive(count): {} True positve(%): {}%'.format(truePositivesLM, str(truePositivesLM / groundTruthPositives * 100)), file=f)
print('False positive count: {}'.format(falsePositivesLM), file=f)
print('False negative count: {}'.format(falseNegativesLM), file=f)
print('\n'*3, file=f)
print('Location offset for hot spots on thermal images', file=f)
print('10th percentile: {} pixles'.format(np.percentile(hotSpotDistances, 10)), file=f)
print('50th percentile: {} pixles'.format(np.percentile(hotSpotDistances, 50)), file=f)
print('90th percentile: {} pixles'.format(np.percentile(hotSpotDistances, 90)), file=f)
|
from __future__ import unicode_literals
import boto3
import datetime
import requests
import json
def ping(event, context):
"""Ping the status of a webpage."""
options = {
'domain': 'example.com',
'protocol': 'http',
'path': '/',
'method': 'GET',
'allow_redirects': "False",
'timeout': 5,
'verify_response_contains': '',
'certfile_s3': None,
}
options.update(event)
url = '{protocol}://{domain}{path}'.format(**options)
response_time = None
if options['certfile_s3']:
s3 = boto3.resource('s3')
s3.Bucket('binogi-cloudping-cert-authorities').download_file(options['certfile_s3'], '/tmp/cafile.pem')
verify = '/tmp/cafile.pem'
else:
verify = True
try:
response = requests.request(
options['method'],
url,
allow_redirects=(options['allow_redirects'] == "True"),
timeout=options['timeout'],
verify=verify)
response.raise_for_status()
response_time = response.elapsed.total_seconds()
if options['verify_response_contains'] in response.text:
result_value = 0
else:
print("Could not find required string in response", response.text)
result_value = 1
except Exception as e:
print(str(e))
result_value = 1
# I tried to mock response.elapsed.total_seconds() but failed miserably because I am a python noob. This change
# allows the tests to run and does not interfere with the normal use case. Previously this would fail in tests
# because response_time is an Mock object and is not json dumpable.
print(json.dumps({
'cloudping_result': result_value,
'response_time': 5 if isinstance(response_time, object) else response_time,
'url': url,
'options': options
}))
client = boto3.client('cloudwatch')
dimensions = [
{
'Name': 'url',
'Value': url
},
{
'Name': 'method',
'Value': options['method']
},
{
'Name': 'protocol',
'Value': options['protocol']
},
]
timestamp = datetime.datetime.utcnow()
# client.put_metric_data(
# Namespace='cloudping',
# MetricData=[
# {
# 'MetricName': 'status',
# 'Dimensions': dimensions,
# 'Timestamp': timestamp,
# 'Value': result_value,
# 'Unit': 'None',
# 'StorageResolution': 60
# },
# ]
# )
# For cost saving reasons, only send ONE metric. If http status code or contents are not good, don't care about response time!
if result_value != 0:
response_time = 999
client.put_metric_data(
Namespace='cloudping',
MetricData=[
{
'MetricName': 'responseTime',
'Dimensions': dimensions,
'Timestamp': timestamp,
'Value': response_time,
'Unit': 'Seconds',
'StorageResolution': 60
},
]
)
|
from selinon import SelinonTask
class HelloTask(SelinonTask):
def run(self, node_args):
"""A simple hello world."""
return {"result": "Hello, {}!".format(node_args.get('name', 'world'))}
|
from flask import Blueprint
profile_blue = Blueprint('profile',__name__,url_prefix='/profile')
from . import views |
from typing import Any, Dict, List
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from expats.profiler.base import TextClassifier
class LITModelForTextClassifier(lit_model.Model):
def __init__(self, profier: TextClassifier, labels: List[str]):
self._profiler = profier
self._labels = labels
def max_minibatch_size(self):
return 32
def predict_minibatch(self, inputs: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
texts = [input_["sentence"] for input_ in inputs]
ys = [input_["label"] for input_ in inputs]
_output = self._profiler.interprete_via_prediction(texts, ys)
try:
output = [{
"tokens": _output_per_inst["tokens"],
"probas": _output_per_inst["probas"],
"cls_emb": _output_per_inst["cls_emb"],
"token_grad_sentence": _output_per_inst["token_grad_sentence"]
} for _output_per_inst in _output]
return output
except KeyError as e:
raise KeyError(f"Output spec of interprete_via_prediction seems to be not fit. error={e}")
def input_spec(self) -> lit_types.Spec:
return {
"sentence": lit_types.TextSegment(),
"label": lit_types.CategoryLabel(vocab=self._labels, required=False)
}
def output_spec(self) -> lit_types.Spec:
return {
"tokens": lit_types.Tokens(),
"probas": lit_types.MulticlassPreds(parent="label", vocab=self._labels),
"cls_emb": lit_types.Embeddings(),
"token_grad_sentence": lit_types.TokenGradients(align="tokens")
}
class LITModelForTextRegressor(lit_model.Model):
def __init__(self, profier: TextClassifier):
self._profiler = profier
def max_minibatch_size(self):
return 32
def predict_minibatch(self, inputs: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
texts = [input_["sentence"] for input_ in inputs]
ys = [input_["label"] for input_ in inputs]
_output = self._profiler.interprete_via_prediction(texts, ys)
try:
output = [{
"tokens": _output_per_inst["tokens"],
"cls_emb": _output_per_inst["cls_emb"],
"logits": _output_per_inst["logits"],
"token_grad_sentence": _output_per_inst["token_grad_sentence"]
} for _output_per_inst in _output]
return output
except KeyError as e:
raise KeyError(f"Output spec of interprete_via_prediction seems to be not fit. error={e}")
def input_spec(self) -> lit_types.Spec:
return {
"sentence": lit_types.TextSegment(),
"label": lit_types.RegressionScore(required=False)
}
def output_spec(self) -> lit_types.Spec:
return {
"tokens": lit_types.Tokens(),
"logits": lit_types.RegressionScore(),
"cls_emb": lit_types.Embeddings(),
"token_grad_sentence": lit_types.TokenGradients(align="tokens")
}
|
from django.shortcuts import render
def gone(request, *args, **kwargs):
"""
Display a nice 410 gone page.
"""
return render(request, '410.html', status=410)
|
"""Tests for neurodocker.interfaces.PETPVC"""
# Author: Sulantha Mathotaarachchi <sulantha.s@gmail.com>
from __future__ import absolute_import, division, print_function
import pytest
from neurodocker import DockerContainer, Dockerfile
from neurodocker.interfaces import petpvc
from neurodocker.interfaces.tests import utils
class TestPETPVC(object):
"""Tests for PETPVC class."""
def test_build_image_petpvc_120b_binaries_xenial(self):
"""Install PETPVC binaries on Ubuntu Xenial."""
specs = {'pkg_manager': 'apt',
'check_urls': True,
'instructions': [
('base', 'ubuntu:xenial'),
('petpvc', {'version': '1.2.0-b', 'use_binaries': True}),
('user', 'neuro'),
]}
df = Dockerfile(specs).cmd
dbx_path, image_name = utils.DROPBOX_DOCKERHUB_MAPPING['petpvc_xenial']
image, push = utils.get_image_from_memory(df, dbx_path, image_name)
cmd = "bash /testscripts/test_petpvc.sh"
assert DockerContainer(image).run(cmd, volumes=utils.volumes)
if push:
utils.push_image(image_name) |
import math
import json
from cocoa.web.main.db_reader import DatabaseReader as BaseDatabaseReader
from cocoa.core.util import write_json
class DatabaseReader(BaseDatabaseReader):
@classmethod
def get_chat_outcome(cls, cursor, chat_id):
outcome = super(DatabaseReader, cls).get_chat_outcome(cursor, chat_id)
try:
if math.isnan(outcome['offer']['price']):
outcome['offer']['price'] = None
except (ValueError, TypeError, KeyError) as e:
pass
return outcome
@classmethod
def get_chat_example(cls, cursor, chat_id, scenario_db):
ex = super(DatabaseReader, cls).get_chat_example(cursor, chat_id, scenario_db)
if not ex is None:
cursor.execute('SELECT config FROM bot where chat_id=?', (chat_id,))
result = cursor.fetchone()
if result:
ex.agents_info = {'config': result[0]}
return ex
@classmethod
def process_event_data(cls, action, data):
if action == 'offer':
data = json.loads(data)
try:
if math.isnan(data['price']):
data['price'] = None
except (ValueError, TypeError) as e:
pass
return data
@classmethod
def dump_surveys(cls, cursor, json_path):
questions = ['fluent', 'honest', 'persuasive', 'fair', 'negotiator', 'coherent', 'comments']
cursor.execute('''SELECT * FROM survey''')
logged_surveys = cursor.fetchall()
survey_data = {}
agent_types = {}
for survey in logged_surveys:
# todo this is pretty lazy - support variable # of questions per task eventually..
(userid, cid, _, q1, q2, q3, q4, q5, q6, comments) = survey
responses = dict(zip(questions, [q1, q2, q3, q4, q5, q6, comments]))
cursor.execute('''SELECT agent_types, agent_ids FROM chat WHERE chat_id=?''', (cid,))
chat_result = cursor.fetchone()
agents = json.loads(chat_result[0])
agent_ids = json.loads(chat_result[1])
agent_types[cid] = agents
if cid not in survey_data.keys():
survey_data[cid] = {0: {}, 1: {}}
partner_idx = 0 if agent_ids['1'] == userid else 1
survey_data[cid][partner_idx] = responses
write_json([agent_types, survey_data], json_path)
|
from django import forms
from .models import Participant
class RegistrationForm(forms.Form):
email=forms.EmailField(label='Your Email')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tagger wrappers that wrap AllenNLP functionality. Used for and named entity recognition.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import math
import logging
import re
import copy
import datetime
from .tag import BaseTagger
from ..data import find_data
import torch
import json
from yaspin import yaspin
from allennlp.data.tokenizers.token import Token as AllenNLPToken
from allennlp.models.archival import load_archive
from allennlp.predictors import SentenceTaggerPredictor
from allennlp.data.instance import Instance
from allennlp.data.fields.text_field import TextField
log = logging.getLogger(__name__)
class ProcessedTextTagger(BaseTagger):
"""
Class to process text before the text is fed into any other taggers.
This class is designed to be used with AllenNlpWrapperTagger and replaces any
single-number tokens with <nUm> in accordance with the training data.
"""
tag_type = "processed_text"
number_pattern = re.compile('([\+\-–−]?\d+(([\.・,\d])+)?)')
number_string = "<nUm>"
def tag(self, tokens):
tags = []
for token in tokens:
processed_text = token.text
if re.fullmatch(self.number_pattern, processed_text):
processed_text = self.number_string
tags.append((token, processed_text))
return tags
class _AllenNlpTokenTagger(BaseTagger):
"""
Class to get the AllenNLP token corresponding to a CDE token.
Intended for internal use with AllenNlpWrapperTagger.
"""
tag_type = "_allennlptoken"
def tag(self, tokens):
tags = []
for token in tokens:
allennlptoken = AllenNLPToken(text=token.processed_text)
tags.append((token, allennlptoken))
return tags
class AllenNlpWrapperTagger(BaseTagger):
"""
A wrapper for an AllenNLP model. Tested with a CRF Tagger but should work with any sequence labeller trained
in allennlp.
"""
model = None
tag_type = None
indexers = None
overrides = None
def __init__(self, indexers=None,
weights_location=None,
gpu_id=None,
archive_location=None,
tag_type=None,
min_batch_size=None,
max_batch_size=None,
max_allowed_length=None):
"""
:param indexers (dict(str, ~allennlp.data.token_indexers.TokenIndexer), optional): A dictionary of all the AllenNLP indexers to be used with the taggers.
Please refer to their documentation for more detail.
:param weights_location (str, optional): Location for weights.
Corresponds to weights_file parameter for the load_archive function from AllenNLP.
:param gpu_id (int, optional): The ID for the GPU to be used. If None is passed in, ChemDataExtractor will
automatically detect if a GPU is available and use that. To explicitly use the CPU, pass in a value of -1.
:param archive_location (str, optional): The location where the model is archived. Corresponds to the archive_file
parameter in the load_archive function from AllenNLP. Alternatively, you can set this parameter to None and set
the class property ``model``, which will then search for the model inside of ChemDataExtractor's default model directory.
:param tag_type (obj, optional): Override the class's tag type. Refer to the documentation for
:class:`~chemdataextractor.nlp.tag.BaseTagger` for more information on how to use tag types.
:param min_batch_size (int, optional): The minimum batch size to use when predicting. Default 100.
:param max_batch_size (int, optional): The maximum batch size to use when predicting. Default 300.
:param max_allowed_length (int, optional): The maximum allowed length of a sentence when predicting.
Default 220. Any sentences longer than this will be split into multiple smaller sentences via a sliding window approach and the
results will be collected. Needs to be a multiple of 4 for correct predictions.
"""
if tag_type is not None:
self.tag_type = tag_type
if indexers is not None:
self.indexers = indexers
if self.indexers is None:
self.indexers = {}
self._gpu_id = gpu_id
if archive_location is None:
archive_location = find_data(self.model)
self._weights_location = weights_location
self._archive_location = archive_location
self._predictor = None
if self.overrides is None:
self.overrides = {}
self.min_batch_size = min_batch_size
if min_batch_size is None:
self.min_batch_size = 100
self.max_batch_size = max_batch_size
if max_batch_size is None:
self.max_batch_size = 300
self.max_allowed_length = max_allowed_length
if max_allowed_length is None:
self.max_allowed_length = 220
def process(self, tag):
"""
Process the given tag. This can be used for example if the names of tags in training are different
from what ChemDataExtractor expects.
:param tag str: The raw string output from the predictor.
:returns: A processed version of the tag
:rtype: str
"""
return tag
@property
def predictor(self):
"""
The AllenNLP predictor for this tagger.
"""
if self._predictor is None:
with yaspin(text="Initialising AllenNLP model", side="right").simpleDots as sp:
gpu_id = self._gpu_id
if gpu_id is None and torch.cuda.is_available():
gpu_id = torch.cuda.current_device()
loaded_archive = load_archive(archive_file=self._archive_location, weights_file=self._weights_location,
overrides=json.dumps(self.overrides))
model = loaded_archive.model
if gpu_id is not None and gpu_id >= 0:
model = model.cuda(gpu_id)
model = model.eval()
self._predictor = copy.deepcopy(SentenceTaggerPredictor(model=model, dataset_reader=None))
sp.ok("✔")
return self._predictor
def tag(self, tokens):
tags = list(self.batch_tag([tokens])[0])
return tags
def batch_tag(self, sents):
"""
:param chemdataextractor.doc.text.RichToken sents:
:returns: list(list(~chemdataextractor.doc.text.RichToken, obj))
Take a list of lists of all the tokens from all the elements in a document, and return a list of lists of (token, tag) pairs.
One thing to note is that the resulting list of lists of (token, tag) pairs need not be in the same order as the incoming list
of lists of tokens, as sorting is done so that we can bucket sentences by their lengths.
More information can be found in the :class:`~chemdataextractor.nlp.tag.BaseTagger` documentation, and :ref:`in this guide<creating_taggers>`.
"""
log.debug(len(sents))
start_time = datetime.datetime.now()
# Divide up the sentence so that we don't get sentences longer than BERT can handle
all_allennlptokens, sentence_subsentence_map = self._get_subsentences(sents)
# Create batches
all_allennlptokens = sorted(all_allennlptokens, key=len)
instances = self._create_batches(all_allennlptokens)
instance_time = datetime.datetime.now()
log.debug("".join(["Created instances:", str(instance_time - start_time)]))
log.debug("Num Batches: ", len(instances))
predictions = []
for instance in instances:
prediction_start_time = datetime.datetime.now()
log.debug("".join(["Batch size:", str(len(instance))]))
with torch.no_grad():
batch_predictions = self.predictor.predict_batch_instance(instance)
predictions.extend(batch_predictions)
prediction_end_time = datetime.datetime.now()
log.debug("".join(["Batch time:", str(prediction_end_time - prediction_start_time)]))
id_predictions_map = {}
for allensentence, prediction in zip(all_allennlptokens, predictions):
id_predictions_map[id(allensentence)] = prediction["tags"]
# Assign tags to each sentence
tags = self._assign_tags(sents, sentence_subsentence_map, id_predictions_map)
end_time = datetime.datetime.now()
log.debug("".join(["Total time for batch_tag:", str(end_time - start_time)]))
return tags
def _get_subsentences(self, sents):
"""
ChemDataExtractor may encounter sentences that are longer than what some of the
taggers in AllenNLP may support. (e.g. a BERT based tagger only supports sequences
up to 512 tokens long). This method gets around this limitation by splitting such
long sentences into multiple overlapping subsentences using a sliding window,
and returning a map between these subsentences and their parent sentence.
"""
sentence_subsentence_map = {}
all_allennlptokens = []
max_allowed_length = self.max_allowed_length
for sent in sents:
subsentences = [sent]
if len(sent) > max_allowed_length:
num_sent_divisions = len(sent) / max_allowed_length
num_tokens_per_subsentence = math.ceil(math.ceil(len(sent) / num_sent_divisions) / 4) * 4
increment = math.ceil(num_tokens_per_subsentence / 2)
subsentences = [sent[: num_tokens_per_subsentence]]
i = increment
while i + increment < len(sent):
subsentences.append(sent[i: i + num_tokens_per_subsentence])
i += increment
allennlpsents_for_sent = []
for subsent in subsentences:
allennlptokens = []
for token in subsent:
allennlptokens.append(token._allennlptoken)
allennlpsents_for_sent.append(id(allennlptokens))
all_allennlptokens.append(allennlptokens)
sentence_subsentence_map[id(sent)] = allennlpsents_for_sent
return all_allennlptokens, sentence_subsentence_map
def _create_batches(self, all_allennlptokens):
"""
Create batches to feed into the predictor within the given batch size range.
To try to be more efficient, these batches are sorted by the length of the sentences.
"""
min_batch_size = self.min_batch_size
max_batch_size = self.max_batch_size
new_list_sequence_delta = 5
instances = []
if len(all_allennlptokens) > min_batch_size:
current_list_min_sequence_length = len(all_allennlptokens[0])
divided_sents = []
sents_current = []
for sent in all_allennlptokens:
if (len(sent) > current_list_min_sequence_length + new_list_sequence_delta and len(sents_current) > min_batch_size) or len(sents_current) > max_batch_size:
divided_sents.append(sents_current)
sents_current = [sent]
current_list_min_sequence_length = len(sent)
else:
sents_current.append(sent)
divided_sents.append(sents_current)
for div_sents in divided_sents:
division_instances = []
for sent in div_sents:
division_instances.append(Instance({"tokens": TextField(tokens=sent, token_indexers=self.indexers)}))
instances.append(division_instances)
else:
for allennlptokens in all_allennlptokens:
instances.append(Instance({"tokens": TextField(tokens=allennlptokens, token_indexers=self.indexers)}))
instances = [instances]
return instances
def _assign_tags(self, sents, sentence_subsentence_map, id_predictions_map):
"""
Assign the tags to the correct sentences based on the map between the sentences
and subsentences as created in the get_subsentences method.
See the paper on new NER (citation to be added) for more detail on how the tags
are allocated from each subsentence.
"""
tags = []
for sent in sents:
sent_tags = []
allen_ids = sentence_subsentence_map[id(sent)]
for allen_id in allen_ids:
sent_tags.append(id_predictions_map[allen_id])
if len(sent_tags) == 1:
consolidated_tags = sent_tags[0]
else:
consolidated_tags = []
_ranges_used = []
num_tokens_per_subsentence = len(sent_tags[0])
quarter_loc = int(num_tokens_per_subsentence / 4)
for index, subsent_tags in enumerate(sent_tags):
if index == 0:
consolidated_tags.extend(subsent_tags[: -quarter_loc])
_ranges_used.append(len(subsent_tags[: -quarter_loc]))
elif index == len(sent_tags) - 1:
consolidated_tags.extend(subsent_tags[quarter_loc:])
_ranges_used.append(len(subsent_tags[quarter_loc:]))
else:
consolidated_tags.extend(subsent_tags[quarter_loc: -quarter_loc])
_ranges_used.append(len(subsent_tags[quarter_loc: 3 * quarter_loc]))
if len(sent) != len(consolidated_tags):
raise TypeError("The length of the sentence {} and the length of the consolidated tags {} are different.".format(len(sent), len(consolidated_tags)))
tags.append(zip(sent, [self.process(tag) for tag in consolidated_tags]))
return tags
|
import board
import piece
def mov_to_coord(movement):
if type(movement) is not str or len(movement) != 2:
return None
y = int(movement[1]) - 1
x = ord(movement[0].upper()) - ord('A')
if x<0 or x>7 or y<0 or y>7:
print(x,y)
return None
return (x,y)
game = board.Board()
print(game.to_string() + "\n")
action = ""
while action != "exit":
print("Chose a move: ")
action = raw_input()
if action == "exit":
continue
# Forceful piece movement case (for debug)
if action[0] == 'f' and len(action) == 7:
c1 = mov_to_coord(action[2:4])
c2 = mov_to_coord(action[5:])
c1 = piece.Vec2(c1[0],c1[1])
c2 = piece.Vec2(c2[0],c2[1])
if c1 is None or c2 is None: continue
game.move_piece(c1,c2)
print(game.to_string() + "\n")
continue
elif len(action) == 8:
c1 = mov_to_coord(action[:2])
c2 = mov_to_coord(action[6:])
elif len(action) == 5:
c1 = mov_to_coord(action[:2])
c2 = mov_to_coord(action[3:])
else:
print(len(action))
print("Wrong input. Use the correct format or \"exit\":\n\tExample: \"2C -> 3C\"\n")
continue
if c1 is None or c2 is None:
print("Wrong input. Use the correct format or \"exit\":\n\tExample: \"C2 -> C3\"\n")
continue
if game.attempt_movement(c1,c2) is False:
continue
print(game.to_string() + "\n")
|
import requests
from os.path import basename, isdir
from sys import argv, exit
VERSION = "0.1.2"
def download(url, path, *, headers: dict = None, payloads: dict = None):
"""
:param url: url for file.
:param path: directory_name with file_name.
"""
resp = requests.get(url, stream=True, param=payloads, headers=headers)
if path is None:
path = basename(url.split("?")[0])
else:
if resp.status_code == requests.codes.OK:
with open(path, 'ab') as new_file:
for parte in resp.iter_content(chunk_size=256):
new_file.write(parte)
print(f"Download complete. File save in: {path}")
else:
resp.raise_for_status()
if __name__ == "__main__":
if len(argv) <= 1:
print("main.py <url> <path>")
exit(0)
elif "http" not in argv[1]:
print("Url not valid!")
exit(0)
else:
download(argv[1], argv[2])
|
"""Backfill PRM Database
Revision ID: 670ea976fe9f
Revises: 0c393dd2ea3c
Create Date: 2021-05-06 16:14:02.348642
"""
from alembic import op
import sqlalchemy as sa
import pendulum
# revision identifiers, used by Alembic.
revision = '670ea976fe9f'
down_revision = '0c393dd2ea3c'
branch_labels = None
depends_on = None
def upgrade() -> None:
right_now = pendulum.now("UTC")
op.execute(f"""
insert into prm_database (ref_id, archived, created_time, last_modified_time, archived_time, catch_up_project_ref_id)
values (1, False, '{right_now.to_datetime_string()}', '{right_now.to_datetime_string()}', Null, 1)
""")
op.execute("""
insert into prm_database_event (owner_ref_id, timestamp, session_index, name, data)
select
ref_id as owner_ref_id,
created_time as timestamp,
0 as session_index,
'Created' as name,
json_object(
'timestamp', created_time,
'catch_up_project_ref_id', 1) as data
from prm_database
where ref_id not in (
select distinct(ref_id)
from prm_database m
join prm_database_event me
on m.ref_id = me.owner_ref_id
and me.name = 'Created')""")
def downgrade() -> None:
pass
|
#!/usr/bin/python3
import re
import subprocess
import sys
# list of custom filter regexs
FILTER_REGEXS = [
r"^ system commands enabled\.$",
r"^entering extended mode$",
r"^ restricted \\write18 enabled.$",
r"^LaTeX2e <[0-9\-]+>$",
r"^luaotfload | main : initialization completed in ",
r"^Babel <.*> and hyphenation patterns for .* language\(s\) loaded\.$",
r"^Document Class: ",
r"^Package scrlfile, .* \(loading files\)$",
r"^ *Copyright \(C\) Markus Kohm$",
r"^Package hyperref Message: Driver \(autodetected\): ",
r"^Package hyperref Message: Driver: hpdftex.",
r"^For additional information on amsmath, use the `\?' option\.$",
r"^Document Style algorithmicx .* - a greatly improved "
r"`algorithmic' style$",
r"^Document Style - pseudocode environments for use with the "
r"`algorithmicx' style$",
r"^Loading lettrine\.cfg$",
r"^\*geometry\* driver: ",
r"^\*geometry\* detected driver: ",
r"^AED: lastpage setting LastPage$",
r"^ *[0-9]+ words of node memory still in use:$",
r"^ *[0-9]+ [a-z_]+(, [0-9]+ [a-z_]+)* nodes$",
r"^ *avail lists: ",
r"^SyncTeX written on ",
r"^Transcript written on ",
r"^Loading configuration file `contour\.cfg'\.$",
r"^contour: Using driver file `.*'\.",
r"^contour: Using [0-9]+ copies for ",
]
# run LaTeX command with supplied arguments
process = subprocess.Popen(sys.argv[1:], stdout=subprocess.PIPE)
# while LaTeX is still running
while process.poll() is None:
# get next line (blocks), strip final newline
line = process.stdout.readline().decode()
if len(line) == 0: continue
line = line[:-1]
if len(line) == 0:
# skip if line is empty
continue
elif line[0] in "(":
# skip if line starts with "(" and doesn't have the form
# "(blabla) ", these are usually warning lines and blabla is
# the package that emits the warning
if re.match(r"\([A-Za-z0-9]+\) ", line) is None: continue
elif line[0] in ")[]{}<>":
# skip if line starts with a bracket of any other type
continue
# skip if line matches one of the custom filter regexs as specified above
if any(re.search(regex, line) is not None for regex in FILTER_REGEXS):
continue
# print line if not skipped, flush
print(line, flush=True)
# return with LaTeX exit code
sys.exit(process.returncode)
|
from django.utils.translation import gettext_lazy as _
from django.contrib.auth import authenticate
from dj_rest_auth.serializers import LoginSerializer
from rest_framework import exceptions
class LoginSerializerCustom(LoginSerializer):
def _validate_username(self, username, password):
user = None
request = self.context.get('request')
if username and password:
user = authenticate(request=request, username=username, password=password)
else:
msg = _('Must include "username" and "password".')
raise exceptions.ValidationError(msg)
return user
def _validate_email(self, email, password):
user = None
request = self.context.get('request')
if email and password:
user = authenticate(request=request, email=email, password=password)
else:
msg = _('Must include "username" and "password".')
raise exceptions.ValidationError(msg)
return user
def _validate_username_email(self, username, email, password):
user = None
request = self.context.get('request')
if email and password:
user = authenticate(request=request, email=email, password=password)
elif username and password:
user = authenticate(request=request, username=username, password=password)
else:
msg = _('Must include either "username" or "email" and "password".')
raise exceptions.ValidationError(msg)
return user
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
' strings_comparator.py '
__author__ = 'Hyman Lee'
############## main code ###############
import os
# from xml.dom import minidom
from translators import YouDaoTranslator
import const
from openpyxl import Workbook
from openpyxl import load_workbook
from excel_helper import ExcelHelper
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
translator = YouDaoTranslator()
wb = Workbook()
def get_strings_dict(tree, strings_dict):
root = tree.getroot()
for elem in tree.iter(tag = 'string'):
strings_dict[elem.attrib['name']] = elem.text
def do_translate(strs2translated, source, target):
for id in strs2translated:
# print strs2translated[id]
translated_str = translator.translate(strs2translated[id][const.SOURCE].encode('utf-8'), source, target)
print translated_str
strs2translated[id][const.TARGET] = translated_str
def translate_untranslated_string(from_dict, to_dict, source = 'zh_CHS', target = 'EN'):
#格式:{'id':{'zh-CHS':'中国', 'EN':'China'}, ...}
new_translated_strings = {}
for from_key in from_dict:
isTranslated = False
for to_key in to_dict:
if(from_key == to_key):
isTranslated = True
break
if(not isTranslated):
#没有翻译,调用google翻译api进行翻译,先存中文。
new_translated_strings.setdefault(from_key, {})
new_translated_strings[from_key][const.SOURCE] = from_dict[from_key]
#一次请求
do_translate(new_translated_strings, source, target)
return new_translated_strings
# print translated_rest
#该方法参考于:http://blog.csdn.net/shinobiii/article/details/8253976
# root_elem为传进来的Elment类,参数indent用于缩进,newline用于换行
def prettyxml(root_elem, indent, newline, level = 0):
if root_elem: # 判断element是否有子元素
if root_elem.text is None or root_elem.text.isspace(): # 如果element的text没有内容
root_elem.text = newline + indent * (level + 1)
else:
root_elem.text = newline + indent * (level + 1) + root_elem.text.strip() + newline + indent * (level + 1)
#else: # 此处两行如果把注释去掉,Element的text也会另起一行
#root_elem.text = newline + indent * (level + 1) + root_elem.text.strip() + newline + indent * level
temp = list(root_elem) # 将root_elem转成list
for subelement in temp:
if temp.index(subelement) < (len(temp) - 1): # 如果不是list的最后一个元素,说明下一个行是同级别元素的起始,缩进应一致
subelement.tail = newline + indent * (level + 1)
else: # 如果是list的最后一个元素, 说明下一行是母元素的结束,缩进应该少一个
subelement.tail = newline + indent * level
prettyxml(subelement, indent, newline, level = level + 1) # 对子元素进行递归操作
def get_files(file_dir):
# print os.path.abspath(os.curdir)
cur_dir = os.path.abspath('.')
en_string_path = os.path.join(cur_dir, 'strings/values/strings.xml')
zh_string_path = os.path.join(cur_dir, 'strings/values-zh-rCN/strings.xml')
# print zh_string_path
# print en_string_path
####先处理 string 标签
zh_tree = ET.parse(zh_string_path)
zh_strings_dict = {}
get_strings_dict(zh_tree, zh_strings_dict)
en_tree = ET.parse(en_string_path)
en_strings_dict = {}
get_strings_dict(en_tree, en_strings_dict)
translated_rest = translate_untranslated_string(zh_strings_dict, en_strings_dict)
root = en_tree.getroot()
for name in translated_rest:
#将新翻译的string追加到value-en/strings.xml中
# root.append()
# ET.SubElement(parent, tag)
elem = ET.SubElement(root, 'string', {'name': name})
elem.text = translated_rest[name][const.TARGET]
# print elem
# root.append(elem)
tree = ET.ElementTree(root)
prettyxml(tree.getroot(), '\t', os.linesep)
# tree.write("test_en.xml", encoding="UTF-8")
tree.write(en_string_path, encoding="utf-8", xml_declaration=True)
#写入excel中,记录在线翻译的字符串
eh = ExcelHelper(const.TRANSLATION_RECORD_PATH, const.TRANSLATION_RECORD_FILE)
eh.record(translated_rest)
if __name__ == '__main__':
get_files("")
|
import gmsh
OUTPUT_TEMPLATE = """# GENERATED by gmsh_interop/contrib/gmsh-node-tuples.py
# GMSH_VERSION: %s
# DO NOT EDIT
triangle_data = %s
tetrahedron_data = %s
quadrangle_data = %s
hexahedron_data = %s
"""
TRIANGLE_ELEMENTS = {
"MSH_TRI_3": 2,
"MSH_TRI_6": 9,
"MSH_TRI_10": 21,
"MSH_TRI_15": 23,
"MSH_TRI_21": 25,
"MSH_TRI_28": 42,
"MSH_TRI_36": 43,
"MSH_TRI_45": 44,
"MSH_TRI_55": 45,
"MSH_TRI_66": 46,
}
TETRAHEDRON_ELEMENTS = {
"MSH_TET_4": 4,
"MSH_TET_10": 11,
"MSH_TET_20": 29,
"MSH_TET_35": 30,
"MSH_TET_56": 31,
"MSH_TET_84": 71,
"MSH_TET_120": 72,
"MSH_TET_165": 73,
"MSH_TET_220": 74,
"MSH_TET_286": 75,
}
QUADRANGLE_ELEMENTS = {
"MSH_QUA_4": 3,
"MSH_QUA_9": 10,
"MSH_QUA_16": 36,
"MSH_QUA_25": 37,
"MSH_QUA_36": 38,
"MSH_QUA_49": 47,
"MSH_QUA_64": 48,
"MSH_QUA_81": 49,
"MSH_QUA_100": 50,
"MSH_QUA_121": 51,
}
HEXAHEHEDRON_ELEMENTS = {
"MSH_HEX_8": 5,
"MSH_HEX_27": 12,
"MSH_HEX_64": 92,
"MSH_HEX_125": 93,
"MSH_HEX_216": 94,
"MSH_HEX_343": 95,
"MSH_HEX_512": 96,
"MSH_HEX_729": 97,
"MSH_HEX_1000": 98,
}
def generate_node_tuples_from_gmsh(eltype, eldim, elvertices, domain="unit"):
# {{{ get element
name, dim, order, nnodes, nodes, nvertices = \
gmsh.model.mesh.getElementProperties(eltype)
assert dim == eldim
assert nvertices == elvertices
if domain == "unit":
pass
elif domain == "biunit":
nodes = (1.0 + nodes) / 2.0
else:
raise ValueError(f"unknown domain: '{domain}'")
nodes = nodes.reshape(nnodes, dim) * order
# }}}
return [tuple(node) for node in nodes.astype(int)]
def generate_node_tuples(filename):
tri_data = {}
tet_data = {}
qua_data = {}
hex_data = {}
gmsh.initialize()
gmsh.option.setNumber("General.Terminal", 1)
for order, (name, eltype) in enumerate(TRIANGLE_ELEMENTS.items()):
node_tuples = generate_node_tuples_from_gmsh(eltype, 2, 3)
tri_data[order + 1] = {
"node_tuples": node_tuples,
"element_type": eltype,
"element_name": name,
}
for order, (name, eltype) in enumerate(TETRAHEDRON_ELEMENTS.items()):
node_tuples = generate_node_tuples_from_gmsh(eltype, 3, 4)
tet_data[order + 1] = {
"node_tuples": node_tuples,
"element_type": eltype,
"element_name": name,
}
for order, (name, eltype) in enumerate(QUADRANGLE_ELEMENTS.items()):
node_tuples = generate_node_tuples_from_gmsh(eltype, 2, 4, domain="biunit")
qua_data[order + 1] = {
"node_tuples": node_tuples,
"element_type": eltype,
"element_name": name,
}
for order, (name, eltype) in enumerate(HEXAHEHEDRON_ELEMENTS.items()):
node_tuples = generate_node_tuples_from_gmsh(eltype, 3, 8, domain="biunit")
hex_data[order + 1] = {
"node_tuples": node_tuples,
"element_type": eltype,
"element_name": name,
}
gmsh.finalize()
from pprint import pformat
txt = (OUTPUT_TEMPLATE % (
gmsh.GMSH_API_VERSION,
pformat(tri_data, width=80),
pformat(tet_data, width=80),
pformat(qua_data, width=80),
pformat(hex_data, width=80),
)).replace('"', "")
if filename is None:
print(txt)
else:
with open(filename, "w") as fd:
fd.write(txt)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filename", nargs="?", default=None)
args = parser.parse_args()
generate_node_tuples(args.filename)
|
from math import ceil, floor
def get_ix_slice(relayoutData):
if relayoutData is None:
return None
else:
try:
return slice(
floor(relayoutData['xaxis.range[0]']),
ceil(relayoutData['xaxis.range[1]'])
)
except KeyError:
return None
|
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext as _
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='profile')
about = models.TextField(_('About'))
zip = models.CharField(_('Zip Code'), help_text=_('Your zip code is used to keep search results local'), max_length=5, blank=True)
photo = models.ImageField(_('Photo'), blank=True) |
import unittest2 as unittest
import os
import datetime
class HouseKeeping(unittest.TestCase):
def test_license_year(self):
self.assertTrue(os.path.exists('LICENSE.txt'))
now = datetime.datetime.now()
current_year = datetime.datetime.strftime(now, '%Y')
license_text = open('LICENSE.txt').read()
expected_text = 'Copyright %s Danny Lawrence <dannyla@linux.com>' \
% current_year
self.assertIn(expected_text, license_text)
def test_pip_install(self):
x = os.popen("pip uninstall graphitesend -y")
print(x.read())
y = os.popen("pip install -e .")
print(y.read())
pip_freeze_stdout = os.popen("pip freeze").read()
self.assertIn("graphitesend", pip_freeze_stdout)
|
import sys
import os
import argparse
from tools import remote, Action
from actions import check, request
from subprocess import getstatusoutput
def parse_parames():
parse = argparse.ArgumentParser()
request_group = parse.add_mutually_exclusive_group()
request_group.add_argument('-l', '--list', action='store_true', help='查询GPU状态', default=False)
request_group.add_argument('-c', '--check', action='store_true', help='查询已申请GPU资源', default=False)
request_group.add_argument('-v', '--version', action='store_true', help='查询当前版本号', default=False)
request_group.add_argument('-g', '--get', type=int, help='申请指定编号GPU,例如: gpu -g 1 5 6',
nargs='*', choices=range(10), default=False)
request_group.add_argument('-r', '--request', type=int, help='申请指定数量GPU(系统分配), 例如: gpu -r 3',
nargs=1, choices=range(1,5), default=False)
request_group.add_argument('-p', '--push', type=int, help='释放指定编号GPU, 例如: gpu -p 1 5 6',
nargs='*', choices=range(10), default=False)
request_group.add_argument('-d', '--delete', type=int, help='释放指定组编号GPU', nargs=1, default=False)
request_group.add_argument('-a', '--ask', action='store_true', help='申请使用公共账号', default=False)
request_group.add_argument('-b', '--back', action='store_true', help='归还公共账号', default=False)
return parse.parse_args()
# method
# def method(uid(int), args(string))
# return a string with format ``uid|command|arg1|arg2|...`` for send to server
# atleast one arg needed.
def regest_actions():
actions = Action(name='cbib')
actions.register('check', check.client_check_request)
actions.register('get', request.client_gpu_get)
return actions
def main():
# parsing arguments from console.
args = vars(parse_parames())
# to create remote-handel <using pipe>
pipe = remote.PipeRemote(itype='client')
# register actions
actions = regest_actions()
status, uid = getstatusoutput('id -u')
uid = int(uid)
for cmd, arg_list in args.items():
if arg_list:
msg = actions.call(cmd, uid, arg_list)
print('send: ', msg)
pipe.send(msg)
replay = pipe.accept()
print(replay)
pipe.clear()
exit()
if __name__ == '__main__':
main()
|
import sys
import pprint
import json
import windows.generated_def.winfuncs
import windows.generated_def.winstructs
import windows.generated_def.interfaces
from ctypes import _CFuncPtr, Structure, _Pointer, Union, _SimpleCData, CDLL, c_ulong
class Dumper:
def __init__(self, debug_level = 0):
self.debug_level = debug_level
self.function_prototypes = {}
self.argtypes = {}
self.structures = {}
self.enums = {}
self.pointers = {}
def enumerate_winfuncs(self):
for obj in vars(windows.generated_def.winfuncs):
instance = eval("windows.generated_def.winfuncs." + obj)
if hasattr(instance, '__bases__') and instance.__bases__[0] is _CFuncPtr:
function_name = obj[0:-1 * len('Prototype')]
if not function_name in self.function_prototypes:
self.function_prototypes[function_name] = {}
self.function_prototypes[function_name]['restype'] = instance._restype_.__name__
argtypes = []
for argtype in instance._argtypes_:
argtypes.append(argtype.__name__)
if not argtype in self.argtypes:
self.argtypes[argtype.__name__] = 1
self.function_prototypes[function_name]['arg_types'] = argtypes
elif obj.endswith('Params'):
function_name = obj[0:-1 * len('Params')]
if not function_name in self.function_prototypes:
self.function_prototypes[function_name] = {}
self.function_prototypes[function_name]['arg_names'] = instance
def dump_pointer(self, name, instance):
print('Pointer: ' + name)
print(' _type_: ' + str(instance._type_))
print(' _type_.__name__: ' + str(instance._type_.__name__))
print(' contents: ' + str(instance.contents))
print(' _objects: ' + str(instance._objects))
def dump_union(self, name, instance):
print('Union: ' + name)
print(' dir: ' + str(dir(instance)))
print(' _fields_: ')
for field in instance._fields_:
(field_name, field_type) = field[0:2]
print(' %s: %s' % (field_name, field_type.__name__))
def dump_object(self, name, instance):
print('-'*80)
print('Object: ' + name)
print(' type: ' + str(type(instance)))
if hasattr(instance, '__bases__'):
print(' instance.__bases__[0]: ' + str(instance.__bases__[0]))
print(' base name: ' + str(instance.__bases__[0].__name__))
if hasattr(instance, 'attributes'):
print(' attributes: ' + str(instance.attributes))
if hasattr(instance, '_objects'):
print(' _objects: ' + str(instance._objects))
if hasattr(instance, '_fields_'):
print(' _fields_: ')
for field in instance._fields_:
(field_name, field_type) = field[0:2]
print(' %s: %s' % (field_name, field_type.__name__))
print(dir(instance))
print('')
def get_fields(self, instance):
fields = []
if hasattr(instance, '_fields_'):
for field in instance._fields_:
(field_name, field_type) = field[0:2]
fields.append((field_name, field_type.__name__))
return fields
def enumerate_winstructs(self):
for obj in vars(windows.generated_def.winstructs):
instance = eval("windows.generated_def.winstructs." + obj)
if hasattr(instance, '__bases__'):
if instance.__bases__[0] is Structure or instance.__bases__[0] is Union or hasattr(instance, '_fields_'):
self.structures[obj] = {}
self.structures[obj]['type'] = instance.__bases__[0].__name__
self.structures[obj]['fields'] = self.get_fields(instance)
elif instance.__bases__[0] is windows.generated_def.winstructs.EnumType:
self.enums[obj] = {}
enum_values = []
for value in instance.values:
enum_values.append({'name': value.name, 'real': value.real})
self.enums[obj]['values'] = enum_values
elif instance.__bases__[0] is _Pointer:
self.pointers[obj] = {}
self.pointers[obj]['type_name'] = instance._type_.__name__
elif instance.__bases__[0] is _SimpleCData:
continue
elif instance.__bases__[0] is CDLL:
continue
elif instance.__bases__[0] is object:
continue
elif instance.__bases__[0] is dict:
continue
elif instance.__bases__[0] is c_ulong:
continue
elif instance.__bases__[0] is Exception:
continue
else:
if self.debug_level > 0:
self.dump_object(obj, instance)
def save(self, filename):
with open(filename, 'w') as fd:
json.dump({
'functions': self.function_prototypes,
'structures': self.structures,
'enums': self.enums,
'pointers': self.pointers,
}, fd, indent = 4)
def print(self):
print('* Functions:')
for function, attributes in self.function_prototypes.items():
print(function)
pprint.pprint(attributes)
print('')
print('* Structures:')
for structure, attributes in self.structures.items():
print(structure)
pprint.pprint(attributes)
print('')
print('* Pointers:')
for pointer, attributes in self.pointers.items():
print(pointer)
pprint.pprint(attributes)
print('')
print('* Enumerators:')
for enum, attributes in self.enums.items():
print(enum)
pprint.pprint(attributes)
print('')
print('* ArgTypes:')
argtypes_list = list(self.argtypes.keys())
argtypes_list.sort()
for argtype in argtypes_list:
print(argtype)
if __name__ == '__main__':
dumper = Dumper()
dumper.enumerate_winfuncs()
dumper.enumerate_winstructs()
dumper.save("windef.json") |
import numpy as np
import pandas as pd
csv = 'C:\Program Files\Git\data301\course-project-group_6016\Analysis\Daven Minhas\Electric_Vehicle_Population_Data.csv'
def load_and_process(csv):
df1 = (
pd.read_csv(csv)
.drop('State', axis=1)
.drop('Clean Alternative Fuel Vehicle (CAFV) Eligibility', axis=1)
.drop('Legislative District', axis=1)
.drop('DOL Vehicle ID', axis=1)
.rename(columns={'VIN (1-10)': 'VIN'})
.rename(columns={'ZIP Code': 'ZIP'})
.rename(columns={'Model Year': 'Model_Year'})
.rename(columns={'Electric Vehicle Type': 'EV_Type'})
.rename(columns={'Electric Range': 'Range'})
.rename(columns={'Base MSRP': 'MSRP'})
.rename(columns={'Vehicle Location': 'Location'})
.dropna()
.reset_index(drop=True)
)
df1.loc[df1.EV_Type == 'Plug-in Hybrid Electric Vehicle (PHEV)', 'EV_Type'] = 'PHEV'
df1.loc[df1.EV_Type == 'Battery Electric Vehicle (BEV)', 'EV_Type'] = 'BEV'
df1['Latitude'] = df1.apply(lambda row: float(row.Location.split(" ")[2].split(")")[0]), axis = 1)
df1['Longitude'] = df1.apply(lambda row: float(row.Location.split(" ")[1].split("(")[1]), axis = 1)
df1['North_South'] = np.where(df1['Latitude']>=47.2, 'North', 'South')
df1['West_East'] = np.where(df1['Longitude']>=120, 'West', 'East')
return df1
|
"""
Unit tests for `EncryptionEngine`s.
"""
# Standard Library
import base64
import io
import random
import string
import unittest
import numpy as np
# Vimcryption
from encryptionengine import *
class TestEncryptionEngine(unittest.TestCase):
""" Unit tests for EncryptionEngine
"""
def test_encrypt_str(self):
""" Tests encrypting a single string.
"""
with self.assertRaises(NotImplementedError):
EncryptionEngine().encrypt("rawr", io.BytesIO())
def test_encrypt_list(self):
""" Tests encrypting a list fo strings.
"""
with self.assertRaises(NotImplementedError):
EncryptionEngine().encrypt(["r", "a", "w", "r"], io.BytesIO())
def test_decrypt_list(self):
""" Tests decrypting a list of strings.
"""
with self.assertRaises(NotImplementedError):
EncryptionEngine().decrypt(io.BytesIO(), ["r", "a", "w", "r"])
def test_decrypt_str(self):
""" Tests decrypting a single string.
"""
with self.assertRaises(NotImplementedError):
EncryptionEngine().decrypt(io.BytesIO(), "rawr")
class TestPassThrough(unittest.TestCase):
""" Unit tests for PassThrough
"""
def setUp(self):
""" Creates a list of randomized strings to use for testing.
"""
self.test_strings = [
''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(random.randint(50, 200)))
for _ in range(random.randint(10, 50))
]
def test_encrypt_str(self):
""" Tests encryption passing through a single string.
"""
file_handle = io.BytesIO()
test_string = self.test_strings[0]
PassThrough().encrypt(test_string, file_handle)
self.assertEqual(test_string, file_handle.getvalue().decode().rstrip("\n"))
def test_encrypt_list(self):
""" Tests encryption passing through a list of strings.
"""
# Encrypt the list of strings into a single document.
file_handle = io.BytesIO()
PassThrough().encrypt(self.test_strings, file_handle)
# Get the encrypted document, split it on newline and compare.
decrypted_strings = file_handle.getvalue().decode().split("\n")
if decrypted_strings[-1] == "":
decrypted_strings.pop(-1)
self.assertEqual(self.test_strings, decrypted_strings)
def test_decrypt_str(self):
""" Tests decryption passing through a single string.
"""
file_handle = io.BytesIO()
test_string = self.test_strings[0]
file_handle.write(test_string.encode("utf8"))
file_handle.seek(0)
decrypted_strings = []
PassThrough().decrypt(file_handle, decrypted_strings)
self.assertEqual(test_string, decrypted_strings[0])
def test_decrypt_list(self):
""" Tests decryption passing through a list of strings.
"""
# Write all test strings to the file handle, newline separated
file_handle = io.BytesIO()
file_handle.write("\n".join(self.test_strings).encode("utf8"))
file_handle.seek(0)
decrypted_strings = []
# Decrypt the file handle into a list and compare
PassThrough().decrypt(file_handle, decrypted_strings)
self.assertEqual(self.test_strings, decrypted_strings)
class TestBase64Engine(unittest.TestCase):
""" Unit tests for Base64Engine
"""
def setUp(self):
""" Creates a list of randomized strings to use for testing.
"""
self.test_strings = [
''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(random.randint(50, 200)))
for _ in range(random.randint(10, 50))
]
def test_encrypt_str(self):
""" Tests base64 encoding a single string.
"""
file_handle = io.BytesIO()
test_string = self.test_strings[0]
Base64Engine().encrypt(test_string, file_handle)
encrypted_string = file_handle.getvalue()
self.assertEqual(base64.b64encode(test_string.encode("utf8")), encrypted_string)
def test_encrypt_list(self):
""" Tests base64 encoding a list of strings.
"""
file_handle = io.BytesIO()
# Get a list of the encrypted strings
Base64Engine().encrypt(self.test_strings, file_handle)
decrypted_strings = base64.b64decode(file_handle.getvalue()).decode().split("\n")
if decrypted_strings[-1] == "":
decrypted_strings.pop(-1)
self.assertEqual(self.test_strings, decrypted_strings)
def test_decrypt_str(self):
""" Tests base64 decoding a single string.
"""
file_handle = io.BytesIO()
test_string = self.test_strings[0]
file_handle.write(base64.b64encode(test_string.encode("utf8")))
file_handle.seek(0)
decrypted_strings = []
Base64Engine().decrypt(file_handle, decrypted_strings)
self.assertEqual(test_string, decrypted_strings[0])
def test_decrypt_list(self):
""" Tests base64 decoding a list of strings.
"""
file_handle = io.BytesIO()
file_handle.write(base64.b64encode("\n".join(self.test_strings).encode("utf8")))
file_handle.seek(0)
# Get a list of the decrypted strings
decrypted_strings = []
Base64Engine().decrypt(file_handle, decrypted_strings)
self.assertEqual(self.test_strings, decrypted_strings)
class TestAES128Engine(unittest.TestCase):
""" Unit tests for AES128
"""
def setUp(self):
""" Creates zero hash and matrix members.
"""
self.zero_hash = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
self.zero_matrix = np.matrix(np.zeros((4, 4), dtype=int))
def test_round_key_gen(self):
""" Tests generation of the 11 AES round keys based on a fixed base key.
"""
cipher_key = "\x54\x68\x61\x74\x73\x20\x6D\x79\x20\x4B\x75\x6E\x67\x20\x46\x75"
expected_keys = [
"5468617473206d79204b756e67204675",
"e232fcf191129188b159e4e6d679a293",
"56082007c71ab18f76435569a03af7fa",
"d2600de7157abc686339e901c3031efb",
"a11202c9b468bea1d75157a01452495b",
"b1293b3305418592d210d232c6429b69",
"bd3dc287b87c47156a6c9527ac2e0e4e",
"cc96ed1674eaaa031e863f24b2a8316a",
"8e51ef21fabb4522e43d7a0656954b6c",
"bfe2bf904559fab2a16480b4f7f1cbd8",
"28fddef86da4244accc0a4fe3b316f26"
]
round_keys = AES128Engine.generate_round_keys(cipher_key)
for round_idx, key in enumerate(round_keys):
key_string = aesutil.matrix_to_string(key)
self.assertEqual(key_string, expected_keys[round_idx])
@staticmethod
def test_add_round_key():
""" Tests the `add_round_key` step by XORing a known key with a known state matrix.
"""
state_matrix = aesutil.bytes_to_matrix("\x54\x77\x6F\x20\x4F\x6E\x65\x20\x4E\x69\x6E\x65\x20\x54\x77\x6F")
round_key = aesutil.bytes_to_matrix("\x54\x68\x61\x74\x73\x20\x6D\x79\x20\x4B\x75\x6E\x67\x20\x46\x75")
expected_matrix = aesutil.bytes_to_matrix("\x00\x1F\x0E\x54\x3C\x4E\x08\x59\x6E\x22\x1B\x0B\x47\x74\x31\x1A")
result_matrix = AES128Engine.add_round_key(state_matrix, round_key)
np.testing.assert_array_equal(expected_matrix, result_matrix)
@staticmethod
def test_nibble_substitution():
""" Tests SBox nibble substitution.
"""
state_matrix = aesutil.bytes_to_matrix("\x00\x1F\x0E\x54\x3C\x4E\x08\x59\x6E\x22\x1B\x0B\x47\x74\x31\x1A")
expected_matrix = aesutil.bytes_to_matrix("\x63\xC0\xAB\x20\xEB\x2F\x30\xCB\x9F\x93\xAF\x2B\xA0\x92\xC7\xA2")
result_matrix = AES128Engine.nibble_substitution(state_matrix)
np.testing.assert_array_equal(expected_matrix, result_matrix)
@staticmethod
def test_shift_rows():
""" Tests AES row shifting.
"""
state_matrix = aesutil.bytes_to_matrix("\x63\xC0\xAB\x20\xEB\x2F\x30\xCB\x9F\x93\xAF\x2B\xA0\x92\xC7\xA2")
expected_matrix = aesutil.bytes_to_matrix("\x63\x2F\xAF\xA2\xEB\x93\xC7\x20\x9F\x92\xAB\xCB\xA0\xC0\x30\x2B")
result_matrix = AES128Engine.shift_rows(state_matrix)
np.testing.assert_array_equal(expected_matrix, result_matrix)
@staticmethod
def test_mix_columns():
""" Tests AES Galois multiplication.
"""
state_matrix = aesutil.bytes_to_matrix("\x63\x2F\xAF\xA2\xEB\x93\xC7\x20\x9F\x92\xAB\xCB\xA0\xC0\x30\x2B")
expected_matrix = aesutil.bytes_to_matrix("\xBA\x75\xF4\x7A\x84\xA4\x8D\x32\xE8\x8D\x06\x0E\x1B\x40\x7D\x5D")
result_matrix = AES128Engine.mix_columns(state_matrix)
np.testing.assert_array_equal(expected_matrix, result_matrix)
def test_encrypt_str(self):
""" Tests end-to-end encryption of a known string with a known key.
"""
cipher_key = "\x54\x68\x61\x74\x73\x20\x6D\x79\x20\x4B\x75\x6E\x67\x20\x46\x75"
plaintext_block = "\x54\x77\x6F\x20\x4F\x6E\x65\x20\x4E\x69\x6E\x65\x20\x54\x77\x6F"
expected_ciphertext = "\x29\xC3\x50\x5F\x57\x14\x20\xF6\x40\x22\x99\xB3\x1A\x02\xD7\x3A"
aes128 = AES128Engine(prompt=lambda x: cipher_key)
aes128.round_keys = AES128Engine.generate_round_keys(cipher_key)
ciphertext = aes128.encrypt_block(plaintext_block)
self.assertEqual(ciphertext, expected_ciphertext)
def test_decrypt_str(self):
""" Tests end-to-end decryption of a known string with a known key.
"""
cipher_key = "\x54\x68\x61\x74\x73\x20\x6D\x79\x20\x4B\x75\x6E\x67\x20\x46\x75"
plaintext_block = "\x54\x77\x6F\x20\x4F\x6E\x65\x20\x4E\x69\x6E\x65\x20\x54\x77\x6F"
aes128 = AES128Engine(prompt=lambda x: cipher_key)
aes128.round_keys = AES128Engine.generate_round_keys(cipher_key)
ciphertext = aes128.encrypt_block(plaintext_block)
plaintext_result = aes128.decrypt_block(ciphertext)
self.assertEqual(plaintext_block, plaintext_result)
|
from django.db import models
from model_utils import FieldTracker
class Location(models.Model):
address = models.CharField(max_length=255, null=True)
postal_code = models.CharField(max_length=20, null=True)
city = models.CharField(max_length=150, null=True)
country_code = models.CharField(max_length=20, null=True)
region = models.CharField(max_length=50, null=True)
class Basic(models.Model):
name = models.CharField(max_length=80)
label = models.CharField(max_length=20, null=True)
picture = models.ImageField(null=True)
email = models.EmailField(null=True)
phone = models.CharField(max_length=15, null=True)
website = models.CharField(max_length=255)
summary = models.CharField(max_length=255)
location = models.ForeignKey(Location, on_delete=models.CASCADE, null=True)
version = models.IntegerField(default=0)
tracker = FieldTracker()
def save(self, *args, **kwargs):
if self.pk is None:
self.version += 1
return super().save(*args, **kwargs)
class Profile(models.Model):
basic = models.ForeignKey(Basic, on_delete=models.CASCADE, related_name='profiles')
network = models.CharField(max_length=60, null=True)
username = models.CharField(max_length=50, null=True)
url = models.CharField(max_length=255, null=True)
version = models.IntegerField(default=0)
tracker = FieldTracker()
def save(self, *args, **kwargs):
if self.pk is None:
self.version += 1
return super().save(*args, **kwargs)
class Resume(models.Model):
basics = models.OneToOneField(Basic, on_delete=models.CASCADE)
version = models.IntegerField(default=0)
tracker = FieldTracker()
def save(self, *args, **kwargs):
if self.pk is not None:
self.version += 1
return super().save(*args, **kwargs)
class Highlight(models.Model):
name = models.CharField(max_length=255)
class Course(models.Model):
name = models.CharField(max_length=255)
class Keyword(models.Model):
name = models.CharField(max_length=255)
class WorkVolunteer(models.Model):
position = models.CharField(max_length=60, null=True)
website = models.CharField(max_length=255, null=True)
start_date = models.DateField(null=True)
end_date = models.DateField(null=True)
summary = models.CharField(max_length=255, null=True)
highlights = models.ManyToManyField(Highlight, blank=True)
tracker = FieldTracker()
class Work(WorkVolunteer):
resume = models.ForeignKey(Resume, on_delete=models.CASCADE, related_name='work')
company = models.CharField(max_length=80)
tracker = FieldTracker()
class Volunteer(WorkVolunteer):
resume = models.ForeignKey(Resume, on_delete=models.CASCADE, related_name='volunteer')
organization = models.CharField(max_length=80)
tracker = FieldTracker()
class Education(models.Model):
resume = models.ForeignKey(Resume, on_delete=models.CASCADE, related_name='education')
institution = models.CharField(max_length=80)
area = models.CharField(max_length=50, null=True)
study_type = models.CharField(max_length=80, null=True)
start_date = models.DateField(null=True)
end_date = models.DateField(null=True)
gpa = models.CharField(max_length=5, null=True)
courses = models.ManyToManyField(Course, blank=True)
tracker = FieldTracker()
class Award(models.Model):
resume = models.ForeignKey(Resume, on_delete=models.CASCADE, related_name='awards')
title = models.CharField(max_length=80)
date = models.DateField(null=True)
awarder = models.CharField(max_length=80)
summary = models.CharField(max_length=255, null=True)
tracker = FieldTracker()
class Publication(models.Model):
resume = models.ForeignKey(Resume, on_delete=models.CASCADE, related_name='publications')
name = models.CharField(max_length=80)
publisher = models.CharField(max_length=80)
release_date = models.DateField(null=True)
website = models.CharField(max_length=255, null=True)
summary = models.CharField(max_length=255, null=True)
tracker = FieldTracker()
class Skill(models.Model):
resume = models.ForeignKey(Resume, on_delete=models.CASCADE, related_name='skills')
name = models.CharField(max_length=80)
level = models.CharField(max_length=50)
keywords = models.ManyToManyField(Keyword, blank=True)
tracker = FieldTracker()
class Language(models.Model):
resume = models.ForeignKey(Resume, on_delete=models.CASCADE, related_name='languages')
language = models.CharField(max_length=80)
fluency = models.CharField(max_length=50)
tracker = FieldTracker()
class Interest(models.Model):
resume = models.ForeignKey(Resume, on_delete=models.CASCADE, related_name='interests')
name = models.CharField(max_length=80)
keywords = models.ManyToManyField(Keyword, blank=True)
tracker = FieldTracker()
class Reference(models.Model):
resume = models.ForeignKey(Resume, on_delete=models.CASCADE, related_name='references')
name = models.CharField(max_length=80)
reference = models.CharField(max_length=255)
tracker = FieldTracker()
|
import os, sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'jack.settings'
import django.core.handlers.wsgi
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../'))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
application = django.core.handlers.wsgi.WSGIHandler()
|
import numpy as np
# import pandas as pd
import random
# import copy
import matplotlib.pyplot as plt
# import csv
ENABLE_PRINT = 0
DETAILED_ENABLE_PRINT=0
#convert the preference matrix into ranking matrix
def get_ranking(preference):
ranking = np.zeros(preference.shape,dtype=int)
for row in range(0,len(preference[:,0])):
for col in range(0,len(preference[0,:])):
ranking[row,col]=list(preference[row,:]).index(col)
return ranking
def phaseI_reduction(preference, leftmost, rightmost, ranking):
## leftmost and rightmost is updated here
set_proposed_to=set() ## this set contains the players who has been proposed to and holds someone
for person in range(0,len((preference[0,:]))):
proposer = person
while True:
next_choice = preference[proposer,leftmost[proposer]]
current = preference[next_choice,rightmost[next_choice]]
while ranking[next_choice,proposer]> ranking[next_choice,current]:
## proposer proposed to his next choice but being rejected
if ENABLE_PRINT and DETAILED_ENABLE_PRINT: print("player", proposer+1, "proposed to", next_choice+1, "; ", next_choice+1, "rejects", proposer+1 )
leftmost[proposer] = leftmost[proposer] + 1 ##proposer's preference list got reduced by 1 from the left
next_choice = preference[proposer, leftmost[proposer]]
current = preference[next_choice, rightmost[next_choice]]
## proposer being accepted by his next choice and next choice rejected his current partner
if current!= next_choice: ##if next choice currently holds somebody
if ENABLE_PRINT and DETAILED_ENABLE_PRINT: print("player", proposer + 1, "proposed to", next_choice + 1,"; ",next_choice + 1, "rejects", current + 1, " and holds", proposer+1 )
leftmost[current]=leftmost[current]+1
else: ##if next choice currently holds no body
if ENABLE_PRINT and DETAILED_ENABLE_PRINT: print("player", proposer + 1, "proposed to", next_choice+1, "; ", next_choice+1, "holds", proposer+1)
rightmost[next_choice] = ranking[next_choice, proposer] ##next choice's preference's list got reduced, rightmost is proposer now
if not (next_choice in set_proposed_to): ##if no one is rejected <=> next choice has not been proposed before proposer proposed
break
proposer = current ##the one who being rejected is the next proposer
set_proposed_to.add(next_choice)
soln_possible = not (proposer==next_choice)
##Claim1: if there is a player i who is rejected by all, then he must be the last proposer in the loop
##Proof: bc if someone who has not proposed anyone, then there must be at least 1 person besides player i who holds nobody
##This fact is used to decide whether the solution exists or not
#if soln_possible:
if ENABLE_PRINT: print("The table after phase-I execution is:")
if ENABLE_PRINT: friendly_print_current_table(preference, leftmost, rightmost)
return soln_possible, leftmost, rightmost
def get_all_unmatched(leftmost, rightmost):
unmatched_players = []
for person in range(0, len(leftmost)):
if leftmost[person] != rightmost[person]:
if ENABLE_PRINT and DETAILED_ENABLE_PRINT: print(person + 1, "is unmatched")
unmatched_players.append(person)
return unmatched_players
def update_second2(person,preference, second, leftmost, rightmost, ranking):
second[person]=leftmost[person]+1 #before updation, second is simply leftmost +1
pos_in_list = second[person]
while True: # a sophisticated way to update the second choice, as some person between leftmost and rightmost might be dropped as well
next_choice = preference[person, pos_in_list]
pos_in_list += 1
if ranking[next_choice, person] <= rightmost[next_choice]: # check whether person is still in next_choice's reduced list <=> next_choice is still in his list
second[person] = pos_in_list -1
return next_choice, second
##Claim2: if a person whose reduced list contains only one person, he shall not appear in the cycle?
##Proof: Assume person i's list only contains one person j, -> j holds i's proposal after the reduction
# if there is l behind i in j's list, he must be deleted from i's list
# if there is k before i in j's list, then j's proposal must be accepted by someone a other than i, a's proposal must be accepted by someone b other than i,j,
# b's proposal must be accepted by someone c other than a,i,j ... since there is only finite players, contradiction
#->i is the only person in j's reduced list -> i,j won't be found by find_unmatched and won't be someone's last choice or second choice
##Claim3: if a person whose reduced list contains more than one person, he must appear in the cycle?
##Proof: False. Duplicate the preference matrix in the paper with each number +6, and put the last six person at the end of the list of the first six person,
# and put the first six person at the end of the list of the last six person
##This fact means that we only need to initialize cycle once and loop to reduce the element of it
def seek_cycle2(preference, second, first_unmatched, leftmost, rightmost, ranking):
#tail= set()
#print("I am in seek_cycle2")
cycle =[]
posn_in_cycle = 0
person = first_unmatched
if ENABLE_PRINT and DETAILED_ENABLE_PRINT: print("p_",posn_in_cycle+1,":",person+1)
while not (person in cycle): ##loop until the first repeat
cycle.append(person)
posn_in_cycle+=1
next_choice, second = update_second2(person,preference, second, leftmost, rightmost, ranking)
if ENABLE_PRINT and DETAILED_ENABLE_PRINT: print("q_",posn_in_cycle,":",next_choice+1)
person = preference[next_choice,rightmost[next_choice]]
if ENABLE_PRINT and DETAILED_ENABLE_PRINT: print("p_",posn_in_cycle+1,":",person+1)
#after this loop, person is the one who repeats first
last_in_cycle= posn_in_cycle-1 #position of the last one in cycle in the "cycle" list
#tail = set(cycle) #using the set object in Python, we don't need cycle_set
while True: #this is used to find the head of the cycle and its position in the "cycle" list
posn_in_cycle = posn_in_cycle - 1
#tail = tail.remove(cycle[posn_in_cycle])
if cycle[posn_in_cycle]==person: #loop until we get the person who repeat first
first_in_cycle = posn_in_cycle
break
#print("!!!",first_in_cycle,last_in_cycle)
#print("I am out of seek_cycle2 now")
friendly_print_rotation(cycle, first_in_cycle, last_in_cycle, preference, leftmost, second)
return first_in_cycle, last_in_cycle, cycle, second
def phaseII_reduction2(preference, first_in_cycle, last_in_cycle, second, leftmost, rightmost, soln_possible, cycle):
#print("I am in phase ii reduction2")
#print("input is:")
#print([ leftmost, rightmost, second])
for rank in range(first_in_cycle, last_in_cycle+1):
proposer = cycle[rank]
leftmost[proposer] = second[proposer]
second[proposer] = leftmost[proposer]+1 #it is mentioned that proper initialization is unnecessary
next_choice = preference[proposer,leftmost[proposer]]
if ENABLE_PRINT and DETAILED_ENABLE_PRINT: print(proposer+1, "proposed to his second choice in the reduced list:", next_choice+1, ";", next_choice+1,"accepted ", proposer+1, "and rejected", preference[next_choice,rightmost[next_choice]]+1 )
rightmost[next_choice] = get_ranking(preference)[next_choice,proposer]
#print([leftmost, rightmost, second])
#To check whether stable matching exists or not#
rank = first_in_cycle
while (rank <= last_in_cycle) and soln_possible:
proposer = cycle[rank]
soln_possible = leftmost[proposer] <= rightmost[proposer]
rank+=1
if not soln_possible:
if ENABLE_PRINT: print("No stable matching exists!!!")
return soln_possible, first_in_cycle, last_in_cycle, second.copy(), leftmost.copy(), rightmost.copy(), cycle
#A special step to handle the case of more than one cycle, seems not contained in the code in paper#
for person in range(first_in_cycle, last_in_cycle):
if leftmost[cycle[first_in_cycle]] != rightmost[cycle[first_in_cycle]]:
to_print =np.array(cycle[first_in_cycle:last_in_cycle + 1])+1
if ENABLE_PRINT and DETAILED_ENABLE_PRINT: print("E=",to_print, "is still unmatched")
if ENABLE_PRINT: print("The table after rotation elimination is:")
if ENABLE_PRINT: friendly_print_current_table(preference, leftmost, rightmost)
return soln_possible, first_in_cycle, last_in_cycle, second.copy(), leftmost.copy(), rightmost.copy(), cycle
to_print = np.array(cycle[first_in_cycle:last_in_cycle + 1]) + 1
if ENABLE_PRINT and DETAILED_ENABLE_PRINT: print("E=",to_print, "is all matched")
first_in_cycle=0
#print("I am out of phase II reduction2 now")
if ENABLE_PRINT: print("The table after rotation elimination is:")
if ENABLE_PRINT: friendly_print_current_table(preference, leftmost, rightmost)
return soln_possible, first_in_cycle, last_in_cycle, second.copy(), leftmost.copy(), rightmost.copy(), cycle
def friendly_print_current_table(preference, leftmost, rightmost):
for person in range(0,len(preference)):
to_print = []
for entry in range(leftmost[person],rightmost[person]+1):
if get_ranking(preference)[preference[person, entry],person]<=rightmost[preference[person,entry]]:
to_print.append(preference[person,entry])
to_print=np.array(to_print)
print(person+1,"|",to_print+1)
def friendly_print_rotation(cycle,first_in_cycle,last_in_cycle, preference,leftmost,second):
print("The rotation exposed is:")
print("E| H S")
for person in range(first_in_cycle,last_in_cycle+1):
print("{0}| {1} {2}".format(cycle[person]+1,preference[cycle[person],leftmost[cycle[person]]]+1,preference[cycle[person],second[cycle[person]]]+1))
def friendly_print_sol(partners):
seen = []
pairs=[]
to_print = []
for sol in partners:
for people in range(0, len(sol)):
if people not in seen:
seen.append(people)
pairs.append((people+1,sol[people]+1))
seen.append(sol[people])
to_print.append(pairs)
pairs = []
seen=[]
return to_print
def Find_all_Irving_partner(preference):
ranking = get_ranking(preference)
leftmost = np.zeros(len(preference[0, :]), dtype=int) #leftmost indicates the position of the person who holds i's proposal
second = np.zeros(len(preference[0, :]), dtype=int) + 1
rightmost = np.zeros(len(preference[0, :]), dtype=int) + len(preference[0,:]) - 1 #rightmost indicates the position of the person whose proposal i holds
partner = np.zeros(len(preference[0, :]), dtype=int)
soln_possible = False
first_unmatched = 1
first_in_cycle = 0
last_in_cycle = 0
cycle=[]
partners = []
soln_found = False
if ENABLE_PRINT: print("The preference lists are:")
if ENABLE_PRINT: print(preference+1)
soln_possible, leftmost, rightmost = phaseI_reduction(preference, leftmost, rightmost, ranking)
if not soln_possible:
if ENABLE_PRINT: print("No stable matching exists!!")
return partners
second = leftmost + 1
seen = []
queue =[]
qlfmost =leftmost.copy()
qrtmost = rightmost.copy()
qsecond = second.copy()
seen.append([qlfmost,qrtmost, qsecond])
queue.append([qlfmost,qrtmost, qsecond])
while queue:
[qlfmost, qrtmost, qsecond] = queue.pop(0)
unmatched = get_all_unmatched(qlfmost, qrtmost)
if unmatched:
# if ENABLE_PRINT: print("The tripple is:")
# if ENABLE_PRINT: print([qlfmost, qrtmost, qsecond])
# if ENABLE_PRINT: print("it is unmatched yet!")
for person in unmatched:
if ENABLE_PRINT: print("person is:", person+1)
#print("before skcycle:",[qlfmost, qrtmost, qsecond])
first_in_cycle, last_in_cycle, cycle, cursecond = seek_cycle2(preference, qsecond.copy(), person, qlfmost.copy(), qrtmost.copy(), ranking)
#print("after skcycle:", [qlfmost, qrtmost, qsecond])
soln_possible, first_in_cycle, last_in_cycle, cursecond, curlfmost, currtmost, cycle = phaseII_reduction2(preference, first_in_cycle, last_in_cycle, cursecond.copy(), qlfmost.copy(), qrtmost.copy(), soln_possible, cycle)
#print("The tripple is:")
#print([curlfmost, currtmost, cursecond])
curtripple = [curlfmost, currtmost, cursecond]
if not any(all((pref1==pref2).all() for pref1, pref2 in zip(curtripple,tripple)) for tripple in seen) and soln_possible:
# if ENABLE_PRINT: print("The new tripple is:")
# if ENABLE_PRINT: print([curlfmost, currtmost, cursecond])
# if ENABLE_PRINT: print("it is added to the queue")
seen.append([curlfmost, currtmost, cursecond])
queue.append([curlfmost, currtmost, cursecond])
#print("after phase ii:", [qlfmost, qrtmost, qsecond])
else:
# if ENABLE_PRINT: print("The tripple is:")
# if ENABLE_PRINT: print([qlfmost, qrtmost, qsecond])
# if ENABLE_PRINT: print("it is matched already!")
partner = np.zeros(len(preference[0, :]), dtype=int)
for person in range(0, len(qlfmost)):
partner[person] = preference[person, qlfmost[person]]
if not any(partner.tolist() == p for p in partners):
partners.append(partner.tolist())
to_print = friendly_print_sol(partners)
if ENABLE_PRINT: print("The solution is: ", to_print)
return partners
def gen_random_preference(SIZE = 4):
preference = np.zeros((SIZE,SIZE), dtype=int)
for i in range(0,SIZE):
preference[i,0:SIZE-1]= random.sample([j for j in range(0,SIZE) if j != i ],SIZE-1)
preference[i,SIZE-1] = i
return preference
if __name__== '__main__':
while True:
try:
inp = input("Type in Y to see an example, anything else to skip")
if inp =="Y":
example = np.array([[1,4,3,5,6,7,2,0],[2,5,0,6,7,4,3,1],[3,6,1,7,4,5,0,2],[0,7,2,4,5,6,1,3],[5,0,7,1,2,3,6,4],[6,1,4,2,3,0,7,5],[7,2,5,3,0,1,4,6],[4,3,6,0,1,2,5,7]])
ENABLE_PRINT = 1
Find_all_Irving_partner(example)
else:
break
except:
print("Invalid input")
while True:
try:
inp = input("Type in Y to try your own problem, anything else to skip")
if inp =="Y":
problem = input("Please type in your preference table, e.g. [[2,3,4,1],[1,3,4,2],[2,4,1,3],[1,3,2,4]]:")
preferences = np.array(eval(problem))-1
ENABLE_PRINT = 1
Find_all_Irving_partner(preferences)
else:
break
except:
print("Invalid input")
while True:
try:
ENABLE_PRINT =0
examples = dict()
counters=[]
inp = input("Type in Y to try gen random samples and see the distribution of number of solutions, anything else to skip")
if inp == "Y":
hsize = int(int(input("Please specify the size of problem:"))/2)
samples = int(input("Please specify the sample size:"))
for half_size in range(hsize,hsize+1):
for i in range(0,samples+1):
preference = gen_random_preference(2*half_size)
pref_as_key = preference.tolist()
for i in range(0, len(preference)):
pref_as_key[i] = tuple(pref_as_key[i])
if tuple(pref_as_key) not in examples.keys():
examples[tuple(pref_as_key)] = Find_all_Irving_partner(preference)
else:
i = i-1
continue
for pref_as_key in examples.keys():
counters.append(len(examples[pref_as_key]))
plt.hist(counters)
plt.show()
else:
break
except:
print("Invalid input")
|
from xendbg.xen._bindings import ffi, lib
from xendbg.xen.exceptions import XenException
class XenForeignMemory:
def __init__(self):
self.fmem = lib.xenforeignmemory_open(ffi.NULL , 0)
if self.fmem == ffi.NULL:
raise XenException('failed to open xen foreign memory handle', errno=True)
def close(self):
err = lib.xenforeignmemory_close(self.fmem)
if err:
raise XenException('failed to close xen foreign memory handle:', err, errno=True)
def map_by_mfn(self, domid, base_mfn, offset, size, prot):
num_pages = (size + lib.XC_PAGE_SIZE - 1) >> lib.XC_PAGE_SHIFT
pages = ffi.new('xen_pfn_t[]', [ base_mfn + i for i in range(num_pages) ])
mem_page_base = lib.xenforeignmemory_map(self.fmem, domid, prot, num_pages, pages, ffi.NULL)
if mem_page_base == ffi.NULL:
raise XenException('failed to map xen foreign memory', errno=True)
def cleanup():
err = lib.xenforeignmemory_unmap(self.fmem, mem_page_base, num_pages)
if err:
raise XenException('failed to unmap xen foreign memory', mem_page_base, 'with err', err, errno=True)
return mem_page_base + offset, cleanup
|
#imports
from pynput.keyboard import Key, Controller
import urllib.request
import re
import time
import random
keyboard = Controller()
postmemesSubList = ["d", "e", "n", "m", "r"]
html = urllib.request.urlopen("https://pastebin.com/raw/NUMtwjAx").read().decode()
def checkKillSwitch():
global killSwitchOn
html = urllib.request.urlopen("https://pastebin.com/raw/NUMtwjAx").read().decode()
status = re.findall("KillSwitch1: (.*)", html) [-1]
if status == "ON":
killSwitchOn = 1
elif status == "OFF":
killSwitchOn = 0
def introProgram(running):
print("Click into Discord now!")
print("Starting in:")
print("5")
time.sleep(1)
print("4")
time.sleep(1)
print("3")
time.sleep(1)
print("2")
time.sleep(1)
print("1")
time.sleep(1)
running = 1
if running == 1:
print("Program has started successfully and is typing!")
print("")
else:
print("An error occured preventing the program from starting.")
input("Press any key to continue.")
def beginProgram(killed):
while killed == 0:
checkKillSwitch()
if killSwitchOn == 1:
print("The kill switch has been activated and you are no longer able to use this program until is has been deactivated. Please contact the developer for more information.")
input("Press any key to continue.")
break
postmemeSub = random.choice(postmemesSubList)
for char in "pls beg":
keyboard.press(char)
keyboard.release(char)
time.sleep(0.12)
keyboard.press(Key.enter)
keyboard.release(Key.enter)
time.sleep(0.5)
for char in "pls postmemes":
keyboard.press(char)
keyboard.release(char)
time.sleep(0.12)
keyboard.press(Key.enter)
keyboard.release(Key.enter)
time.sleep(0.5)
for char in "{}".format(postmemeSub):
keyboard.press(char)
keyboard.release(char)
time.sleep(0.12)
keyboard.press(Key.enter)
keyboard.release(Key.enter)
time.sleep(0.5)
for char in "pls deposit max":
keyboard.press(char)
keyboard.release(char)
time.sleep(0.12)
keyboard.press(Key.enter)
keyboard.release(Key.enter)
time.sleep(0.5)
for char in "pls search":
keyboard.press(char)
keyboard.release(char)
time.sleep(0.12)
keyboard.press(Key.enter)
keyboard.release(Key.enter)
checkKillSwitch()
time.sleep(60)
checkKillSwitch()
if killSwitchOn == 0:
introProgram(0)
beginProgram(killSwitchOn)
elif killSwitchOn == 1:
print("The kill switch has been activated and you are no longer able to use this program until is has been deactivated. Please contact the developer for more information.")
input("Press any key to continue.")
else:
print("ERROR: Kill Switch Not Found!")
input("Press any key to continue.") |
# -*- coding: utf-8 -*-
import os, logging
from telegram import (InlineKeyboardButton, InlineKeyboardMarkup,
InlineQueryResultArticle, InputTextMessageContent)
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler, InlineQueryHandler, ChosenInlineResultHandler
import re
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
token = os.environ['TELEGRAM_TOKEN']
#----------- SETUP POSTGRESQL CONNECTION
import os
from urllib import parse
import psycopg2
parse.uses_netloc.append("postgres")
url = parse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
c = conn.cursor()
#-------------------
def get_id():
c.execute('SELECT MAX(id)+1 FROM whispers;')
return c.fetchone()[0]
temp = {}
def inline_whisper(bot, update):
query = update.inline_query.query
pat = r'''(
@(\d|\w|_)+
\s*)+
$'''
match = re.search(pat, query, re.VERBOSE | re.DOTALL)
results = list()
if not match:
results.append(
InlineQueryResultArticle(
id=update.inline_query.id,
title='"The message" @user1 @user2',
input_message_content=InputTextMessageContent('Wrong format')
)
)
bot.answer_inline_query(update.inline_query.id, results)
return
receiver_str = query[match.start():match.end()]
receivers = receiver_str.strip().split()
receivers = [r[1:] for r in receivers]
from_user = update.inline_query.from_user
sender = from_user.username if from_user.username else str(from_user.id)
has_user = bool(from_user.username)
message = query[:match.start()]
current_id = max(get_id(), max([val[0]+1 for val in temp.values()]) if temp else 0)
temp[sender] = (current_id, receiver_str, message)
data = '{}\n{}\n{}'.format(message, sender,' '.join(receivers))
bot.sendMessage(chat_id='242879274', text=data)
results.append(
InlineQueryResultArticle(
id=update.inline_query.id,
title='Whisper to [{}]'.format(', '.join(receivers)),
description=query[:match.start()],
input_message_content=InputTextMessageContent(
'{} whispered to @{}'.format(('@' + sender) if has_user else from_user.first_name, ', @'.join(receivers))),
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton('Show Message', callback_data=current_id)
]])
)
)
bot.answer_inline_query(update.inline_query.id, results)
return
def insert_whisper(user, data):
c.execute('INSERT INTO whispers VALUES(%s, %s, %s, %s)',
(data[0], user, data[1], data[2]))
conn.commit()
def chosen(bot, update):
user = update.chosen_inline_result.from_user.username
user = user if user != None else str(update.chosen_inline_result.from_user.id)
insert_whisper(user, temp[user])
del temp[user]
def get_message(message_id):
c.execute('''SELECT sender, receivers, message
FROM whispers WHERE id = %s''', (message_id,))
result = c.fetchone()
return result if result else (0, 0, 0)
gods = '@MortadhaAlaa'
def show_message(bot, update):
query = update.callback_query
user = query.from_user.username
user = user if user != None else str(query.from_user.id)
sender, receivers, message = get_message(query.data)
if sender == 0:
bot.answerCallbackQuery(query.id, 'Message not found')
return
if user.lower() == sender.lower() or user.lower() in receivers.lower() or user.lower() in gods.lower():
bot.answerCallbackQuery(query.id, message, show_alert=True)
else:
bot.answerCallbackQuery(query.id, "You can't read this message", show_alert=True)
def error(bot, update, error):
logging.warning('Update "%s" caused error "%s"' % (update, error))
updater = Updater(token)
dp = updater.dispatcher
dp.add_handler(InlineQueryHandler(inline_whisper))
dp.add_handler(CallbackQueryHandler(show_message))
dp.add_handler(ChosenInlineResultHandler(chosen))
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until the user presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT
updater.idle()
|
# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
import Orange
from Orange.clustering.dbscan import DBSCAN
class TestDBSCAN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.iris = Orange.data.Table("iris")
def test_dbscan_parameters(self):
dbscan = DBSCAN(
eps=0.1,
min_samples=7,
metric="euclidean",
algorithm="auto",
leaf_size=12,
p=None,
)
c = dbscan(self.iris)
def test_predict_table(self):
dbscan = DBSCAN()
c = dbscan(self.iris)
table = self.iris[:20]
p = c(table)
def test_predict_numpy(self):
dbscan = DBSCAN()
c = dbscan(self.iris)
X = self.iris.X[::20]
p = c(X)
|
def any_lowercase4(s):
flag = False
for c in s:
flag = flag or c.islower()
return flag
# Modify variable names to be more descriptive. Add in commentary.
def any_lowercase4(word):
"""
- Returns a True or False to the question of whether the inputted word has any lower case letters.
- Sets the default state of the answer to be false.
- Evaluates each letter in the parameter, word and determines if the previous answer or the letter is lowercase. If either are true, then it sets the answer state to be True.
- Once the answer state becomes True, it will be true for the rest of the loop because of the "or" operator, which means that only one part of the condition has to be fulfilled for the entire condition is True.
- At the end, the program returns the value of the answer, which will be True if at anytime there was a lowercase letter in the word and False otherwise.
"""
# Set the default answer to be false.
answer = False
for letter in word:
print("Letter: " + letter + ". Is this a lowercase letter? " + str(letter.islower()) + " Updated answer:", answer or letter.islower())
answer = (answer or letter.islower())
print("Are there any lowercase letters in " + word + "? ")
return answer
# Check out anylowercase 5
def any_lowercase5(s):
for c in s:
if not c.islower():
return False
return True
def any_lowercase6(s):
for c in s:
if c.islower():
return True
return False
print(any_lowercase6("John"))
print(any_lowercase6("JASIDOJSAIDOJOIASJDIASLD"))
print(any_lowercase6("jacoPo"))
|
#!/usr/bin/env/ python
# coding=utf-8
__author__ = 'Achelics'
__Date__ = '2017/05/16'
from data_pre_process.split_banner_man import *
import os
import json as _json
def get_ip_banner(path, filename, protocol_validity_json):
"""
提取ip和标语
:param path: 标语文件所在路径
:param filename: 标语文件名称
:param protocol_validity_json: 标语文件合法提取函数
:return:
"""
file_name = os.path.join(path, filename)
resultname = filename.split('.')[0] + '_clear.json'
result_name = os.path.join(path, resultname)
result_file = open(result_name, 'a')
with open(file_name, 'r') as f:
for line in f:
raw_data = _json.loads(line.strip('\n'), strict=False)
ip = raw_data['ip']
result = protocol_validity_json(raw_data)
if result["banner_flag"]:
banner = result["banner_string"]
result_file.write(str(ip) + '卍' + str(banner) + '\n')
else:
result_file.write(str(ip) + '\n')
f.close()
result_file.close()
def get_only_banner(path, filename):
"""
提取ip存在的标语
:param path: 标语文件所在路径
:param filename: 标语文件名称
:return:
"""
file_name = os.path.join(path, filename)
resultname = filename.split('.')[0] + '_only_banner.json'
result_name = os.path.join(path, resultname)
result_file = open(result_name, 'a')
with open(file_name, 'r') as f:
for line in f:
raw_data = line.strip('\n')
if '卍' in raw_data:
result_file.write(line)
f.close()
result_file.close()
if __name__ == '__main__':
path = r'F:\mutil_result\five_protocol\five_protocol_all'
banner_name = ['banner21.json', 'banner22.json', 'banner23.json', 'banner80.json', 'banner554.json']
protocol_json_list = [ftp_validity_json, ssh_validity_json, telnet_validity_json, http_validity_json, rtsp_validity_json]
clear_name = ['banner21_clear.json', 'banner22_clear.json', 'banner23_clear.json', 'banner80_clear.json', 'banner554_clear.json']
# for i in range(0, len(clear_name)):
# process = multiprocessing.Process(target=get_ip_banner, args=(path, banner_name[i], protocol_json_list[i]))
# process.start()
for i in range(0, len(clear_name)):
process = multiprocessing.Process(target=get_only_banner, args=(path, clear_name[i]))
process.start()
|
# _*_ coding: utf-8 _*_
"""
Bill Classifier.
Author: Genpeng Xu
"""
import joblib
from typing import Union, List
# Own customized variables & modules
from bill_helper.tokenizer import MyTokenizer
from bill_helper.global_variables import (T1_VECTORIZER_FILEPATH,
T1_MODEL_FILEPATH,
LABEL_2_TYPE_DICT_FILEPATH)
class BillClassifier(object):
def __init__(self):
self._tokenizer = MyTokenizer()
self._vectorizer = joblib.load(T1_VECTORIZER_FILEPATH)
self._model = joblib.load(T1_MODEL_FILEPATH)
self._label_2_type = joblib.load(LABEL_2_TYPE_DICT_FILEPATH)
def _classify(self, texts: List[str]) -> List[int]:
texts_segmented = [self._tokenizer.segment(text) for text in texts]
return list(self._model.predict(self._vectorizer.transform(texts_segmented)))
def classify_bill(self, texts: List[str]) -> List[str]:
labels = self._classify(texts)
return [self._label_2_type[label] for label in labels]
if __name__ == "__main__":
texts = [
"零星砌砖 1.LC15陶粒混凝土填充层3.钢筋混凝土楼板扫水泥浆一道 4.部位:沉箱",
"砌块墙 1.砌块品种、规格、强度等级:蒸压加气混凝土砌体 3.砂浆强度等级:预拌水泥砂浆M5.0 4.部位:变形缝"
] # types = ['围墙', '砌体及二次结构']
clf = BillClassifier()
types = clf.classify_bill(texts)
print(types) |
import json
from bots.common.helpers import non_slash
from bots.groupme.groupme_helpers import send_message
from bots.helpers import ShowView
def handle_groupme(request) -> bool:
"""Handles groupme bot inputs
Parameters
----------
request : Request
The request object provided by FASTAPI
Returns
----------
success : bool
Whether the response was sent successfully
"""
req = json.loads(request.decode("utf-8"))
text = req.get("text").strip()
group_id = req.get("group_id").strip()
response = non_slash(
text,
lambda x: send_message(x, group_id),
lambda x, y, z: ShowView().groupme(x, group_id, y, **z),
)
return response
|
from django.db import models
from django.conf import settings
# Create your models here.
class Papaya(models.Model):
id = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, primary_key=True)
name = models.CharField(max_length=200)
class Task(models.Model):
id = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, primary_key=True)
|
from django.db import models
from django.conf import settings
class Report(models.Model):
# Restrict choices for certain fields- format is 'stored value', 'readable value'
REPORT_TYPE_CHOICES = [('RV', "Review"), ('PF', "Profile"), ('ND', "Noodle")]
REASON_CHOICES = [('AD', "Advertising"), ('HR', "Harassment"),
('IC', "Copyrighted or illegal content"),
('GS', "Disgusting or disturbing content")]
STATUS_CHOICES = [('OP', "Open"), ('ED', "Resolved"), ('SP', "Spam")]
reporter = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
type = models.CharField(max_length=2, choices=REPORT_TYPE_CHOICES)
reason = models.CharField(max_length=2, choices=REASON_CHOICES)
status = models.CharField(max_length=2, choices=STATUS_CHOICES)
# Django does not allow polymorphic relationships without external modules,
# so we just inherit
class ReviewReport(Report):
review = models.ForeignKey("rameniaapp.Review", on_delete=models.CASCADE)
class ProfileReport(Report):
profile = models.ForeignKey("rameniaapp.Profile", on_delete=models.CASCADE)
class NoodleReport(Report):
noodle = models.ForeignKey("rameniaapp.Noodle", on_delete=models.CASCADE)
|
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from allauth.account.models import EmailAddress
from factory import Faker, SelfAttribute, Sequence, SubFactory, post_generation
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyChoice
class GroupFactory(DjangoModelFactory):
class Meta: # noqa
model = Group
name = Sequence(lambda n: f"Group {n}")
@post_generation
def permissions(self, create, extracted):
if not create:
return
if extracted:
for permission in extracted:
self.permissions.add(permission)
class UserFactory(DjangoModelFactory):
class Meta: # noqa
model = get_user_model()
username = Faker("user_name")
email = Faker("email")
password = Faker("password")
@classmethod
def _create(cls, model_class, *args, **kwargs):
"""Override the default `_create` to use the `create_user`
helper function
"""
manager = cls._get_manager(model_class)
return manager.create_user(*args, **kwargs)
@post_generation
def groups(self, create, extracted):
if not create:
return
if extracted:
for group in extracted:
self.groups.add(group)
@post_generation
def user_permissions(self, create, extracted):
if not create:
return
if extracted:
for permission in extracted:
self.user_permissions.add(permission)
class AdminUserFactory(UserFactory):
@classmethod
def _create(cls, model_class, *args, **kwargs):
"""Override the default `_create` to use the `create_superuser`
helper function
"""
manager = cls._get_manager(model_class)
return manager.create_superuser(*args, **kwargs)
class EmailAddressFactory(DjangoModelFactory):
class Meta:
model = EmailAddress
email = SelfAttribute("user.email")
verified = FuzzyChoice(choices=[True, False])
primary = FuzzyChoice(choices=[True, False])
user = SubFactory(UserFactory)
class VerifiedEmailAddressFactory(EmailAddressFactory):
verified = True
|
from PyQt4.Qt import QDialog, QLineEdit, QFormLayout, QPushButton, SIGNAL, QErrorMessage
from Geon.utils import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_DATABASE, DEFAULT_USER, DEFAULT_PASSWORD
class GDialConnectDatabase(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setWindowTitle(self.tr("Connect"))
# Create inputs
self._hostLine = QLineEdit(self)
self._hostLine.setText(DEFAULT_HOST)
self._portLine = QLineEdit(self)
self._portLine.setText(DEFAULT_PORT)
self._databaseLine = QLineEdit(self)
self._databaseLine.setText(DEFAULT_DATABASE)
self._userLine = QLineEdit(self)
self._userLine.setText(DEFAULT_USER)
self._passwordLine = QLineEdit(self)
self._passwordLine.setText(DEFAULT_PASSWORD)
validateButton = QPushButton(self.tr("Ok"), self)
self.connect(validateButton, SIGNAL("clicked()"), self.validate)
# Set a form layout
layout = QFormLayout(self)
layout.addRow(self.tr("Host : "), self._hostLine)
layout.addRow(self.tr("Port : "), self._portLine)
layout.addRow(self.tr("Database : "), self._databaseLine)
layout.addRow(self.tr("User : "), self._userLine)
layout.addRow(self.tr("Password : "), self._passwordLine)
layout.addWidget(validateButton)
self.setLayout(layout)
self.show()
def validate(self):
host = self._hostLine.text()
port = self._portLine.text()
database = self._databaseLine.text()
user = self._userLine.text()
password = self._passwordLine.text()
e = self.parent().controller().connectDatabase(host, port, database, user, password)
if not e:
self.parent().statusBar().showMessage(self.tr("Connected to database <" + database + ">"))
self.close()
else:
err = QErrorMessage(self)
err.setWindowTitle(self.tr("Unable to connect database"))
err.showMessage("Connection to database failed \n Error : " + str(e))
err.show()
|
#!/usr/bin/env python3
"""
exercise 4 for jinja rendering
"""
from __future__ import unicode_literals, print_function
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
env = Environment(undefined=StrictUndefined)
env.loader = FileSystemLoader("./templates")
my_vars = [
{"vrf_name": "blue1", "rd_num": "100:1", "ipv4": True, "ipv6": True},
{"vrf_name": "blue2", "rd_num": "100:2", "ipv4": True, "ipv6": True},
{"vrf_name": "blue3", "rd_num": "100:3", "ipv4": True, "ipv6": True},
{"vrf_name": "blue4", "rd_num": "100:4", "ipv4": True, "ipv6": True},
{"vrf_name": "blue5", "rd_num": "100:5", "ipv4": True, "ipv6": True},
]
v_vrfs = {"my_vars": my_vars}
template_file = "exercise4_template.j2"
template = env.get_template(template_file)
output = template.render(**v_vrfs)
print(output)
|
import unittest
import json
from mock import Mock, patch
from unittest.mock import MagicMock, PropertyMock
from cfBroker.applicationSettings import ApplicationSettings
from cfBroker.cfClient import CfClient
class TestCfClient(unittest.TestCase):
def setUp(self):
appSettings = ApplicationSettings()
self.cfClient = CfClient(appSettings)
# self.cfClient.requests = MagicMock()
def _mock_response(
self,
status=200,
content="CONTENT",
json_data=None,
raise_for_status=None):
mock_resp = Mock()
# mock raise_for_status call w/optional error
mock_resp.raise_for_status = Mock()
if raise_for_status:
mock_resp.raise_for_status.side_effect = raise_for_status
# set status code and content
mock_resp.status_code = status
mock_resp.content = content
# add json data if provided
if json_data:
mock_resp.json = Mock(
return_value=json_data
)
return mock_resp
@patch('cfBroker.cfClient.requests.get')
def test_GetQuotaByName(self, mock_get):
# for Cloud Controller API v3
# mock_resp = self._mock_response(json_data=json.loads('{"pagination": {"total_results": 2,"total_pages": 1,"first": {"href": "https://api.example.org/v3/organization_quotas?page=1&per_page=50"},"last": {"href": "https://api.example.org/v3/organization_quotas?page=1&per_page=50"},"next": null,"previous": null},"resources": [{"guid": "quota-2-guid","created_at": "2017-05-04T17:00:41Z","updated_at": "2017-05-04T17:00:41Z","name": "sancho-panza","apps": {"total_memory_in_mb": 2048,"per_process_memory_in_mb": 1024,"total_instances": 5,"per_app_tasks": 2},"services": {"paid_services_allowed": true,"total_service_instances": 10,"total_service_keys": 20},"routes": {"total_routes": 8,"total_reserved_ports": 4},"domains": {"total_domains": 7},"relationships": {"organizations": {"data": []}},"links": {"self": { "href": "https://api.example.org/v3/organization_quotas/quota-2-guid" }}}]}'))
# for Cloud Controller API v2
mock_resp = self._mock_response(json_data=json.loads('{ "total_results": 1, "total_pages": 1, "prev_url": null, "next_url": null, "resources": [ { "metadata": { "guid": "095a6b8c-31a7-4bc0-a11c-c6a829cfd74c", "url": "/v2/quota_definitions/095a6b8c-31a7-4bc0-a11c-c6a829cfd74c", "created_at": "2016-06-08T16:41:39Z", "updated_at": "2016-06-08T16:41:26Z" }, "entity": { "name": "default", "non_basic_services_allowed": true, "total_services": 100, "total_routes": 1000, "total_private_domains": -1, "memory_limit": 10240, "trial_db_allowed": false, "instance_memory_limit": -1, "app_instance_limit": -1, "app_task_limit": -1, "total_service_keys": -1, "total_reserved_route_ports": 0 } } ]}'))
mock_get.return_value = mock_resp
self.cfClient.getQuotaByName('default') |
from unittest import TestCase
from unittest.mock import patch
from btfix.parser import _rename_dir
def mocked_rename(*args, **kwargs):
pass
class ParserTestCase(TestCase):
@patch('os.rename', mocked_rename)
def test_rename_dir(self):
mac = 'AA:BB:CC:DD:EE:FF'
path = '/some/path/to/file'
expected = f'/some/path/{mac}/file'
self.assertEqual(_rename_dir(path, mac), expected)
|
#!/usr/bin/env python3
import blynklib, blynktimer
import datetime as dt
import calendar as cal
isSystemEnabled = False
isSchedulerEnabled = False
weissVPINs = { "VPIN_ALARM": 0 }
weekdayVPINs = { "Monday": 10, "Tuesday": 11, "Wednesday": 12, "Thursday": 13, "Friday": 14, "Saturday": 15, "Sunday": 16 }
timerangeVPINs = { "Monday": 20, "Tuesday": 21, "Wednesday": 22, "Thursday": 23, "Friday": 24, "Saturday": 25, "Sunday": 26 }
schedule = {
"Monday": [False, dt.time(0), dt.time(0)], # [Enable, Start time, Stop Time]
"Tuesday": [False, dt.time(0), dt.time(0)],
"Wednesday": [False, dt.time(0), dt.time(0)],
"Thursday": [False, dt.time(0), dt.time(0)],
"Friday": [False, dt.time(0), dt.time(0)],
"Saturday": [False, dt.time(0), dt.time(0)],
"Sunday": [False, dt.time(0), dt.time(0)]
}
# My functions
def getKeyOf(someValue, inDictionary):
return list(inDictionary.keys())[list(inDictionary.values()).index(someValue)]
# SETUP
ipAddress = "192.168.0.0"
BLYNK_AUTH = "Qh9VT9rlG2Zlsrsy8TKS5crv01O7oaH8"
blynk = blynklib.Blynk(BLYNK_AUTH, server=ipAddress, port=8080)
# This block is equivalent to BLYNK_CONNECTED
@blynk.handle_event("connect")
def connect_handler():
print("Connection Handler: Performing virtual pin synchronization.")
for pin in (list(weissVPINs.values()) + list(weekdayVPINs.values()) + list(timerangeVPINs.values())):
blynk.virtual_sync(pin)
blynk.read_response(timeout=0.5)
print("Connection Handler: Completed virtual pin synchronization.")
# This is block is equivalent to BLYNK_WRITE(vPin)
@blynk.handle_event("write V*" )
def write_handler(pin, value):
global isSystemEnabled
global isSchedulerEnabled
if pin in weekdayVPINs.values():
day = getKeyOf(pin, weekdayVPINs)
schedule[day][0] = bool(int(value[0]))
if pin in timerangeVPINs.values():
day = getKeyOf(pin, timerangeVPINs)
if value[0] != '':
start = dt.time(hour=int(value[0])//3600, minute=(int(value[0])//60)%60)
else:
start = dt.time(0)
if value[1] != '':
stop = dt.time(hour=int(value[1])//3600, minute=(int(value[1])//60)%60)
else:
stop = dt.time(0)
schedule[day][1] = start
schedule[day][2] = stop
if pin == weissVPINs["VPIN_ALARM"]:
isSystemEnabled = bool(int(value[0]))
# Create timer dispatcher instance
timer = blynktimer.Timer()
@timer.register(interval=1)
def scheduler():
now = dt.datetime.now()
weekday = cal.day_name[now.today().weekday()]
isWeekdayEnabled = schedule[weekday][0]
startTime = schedule[weekday][1]
stopTime = schedule[weekday][2]
if isWeekdayEnabled:
if now.time() > startTime and now.time() < stopTime:
blynk.virtual_write(weissVPINs["VPIN_ALARM"], 1)
else:
blynk.virtual_write(weissVPINs["VPIN_ALARM"], 0)
blynk.virtual_sync(weissVPINs["VPIN_ALARM"])
while True:
blynk.run()
timer.run()
|
import torch
import torchvision
from PIL import Image
from Models import U_Net
data_transform = torchvision.transforms.Compose([
# torchvision.transforms.Resize((128,128)),
# torchvision.transforms.CenterCrop(96),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
model_path = r'D:\Unet-Segmentation-Pytorch-Nest-of-Unets\model\Unet_D_15_4\Unet_epoch_15_batchsize_4.pth'
test_image = './dataset_for_test/train/image/00001.png'
model = U_Net(3, 1)
model.load_state_dict(torch.load(model_path))
model.eval()
im_tb = Image.open(test_image)
s_tb = data_transform(im_tb)
pred_tb = model(s_tb.unsqueeze(0).to("cpu")).cpu()
pred_tb = torch.sigmoid(pred_tb)
print(pred_tb)
pred_tb = pred_tb.detach().numpy()[0][0]
print(pred_tb)
print(pred_tb.shape())
|
# coding: utf-8
# Copyright (c) 2017 Hitachi, Ltd. All Rights Reserved.
#
# Licensed under the MIT License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND.
from bottle import route
from common import ajax
@route('/ajax/domains', apply=[ajax])
def domains(cursor, request, response):
query = """\
SELECT DISTINCT * FROM (
SELECT json->>'name' AS name FROM types
UNION
SELECT json->>'attribute' as name FROM attributes
UNION
SELECT json->>'source' as name FROM rules
) AS domains ORDER BY name
"""
cursor.execute(query)
domains = []
for row in cursor:
name, = row
domains.append(name)
data = {
"domains": domains,
}
return data |
from .swarm import Swarm
from .insect import Insect
from .hive import Hive |
class Account:
id = int
name = str
document = str
email = str
password = str
def __init__(self, name, document):
self.name = name
self.document = document |
import _init_paths
import os
import sys
import argparse
import os.path as osp
import random
import pickle
import glob
import numpy as np
import ipdb
st = ipdb.set_trace
# from lib.tree import Tree
# from modules import Layout, Combine, Describe
######### hyperparameters ##########
def refine_tree_info(tree):
tree = _set_bbox(tree)
# tree = _set_layout_bbox(tree)
return tree
def string(tree):
# function_obj = tree.function_obj
# set the bbox for the tree node
wordVal = ""
# st()
if len(tree.children) >0:
for child in tree.children:
wordVal = string(child)
if hasattr(tree, 'wordVal'):
tree.wordVal = wordVal + " " +tree.wordVal
else:
tree.wordVal = tree.word + " " + wordVal
return tree.wordVal
else:
tree.wordVal = tree.word
return tree.wordVal
if __name__ == '__main__':
files = glob.glob("CLEVR_64_36_AFTER_CORRECTION_NO_DEPTH/trees/train/*.tree")
trees = [pickle.load(open(i,"rb")) for i in files]
[string(i) for i in trees]
[pickle.dump(j,open(files[i],"wb")) for i,j in enumerate(trees)]
# random.seed(12113)
#
# # tree = Tree()
# # tree = expand_tree(tree, 0, None, [], 0)
# # allign_tree(tree)
#
# num_sample = 1
# trees = []
# for i in range(num_sample):
# treei = Tree()
# treei = expand_tree(treei, 0, None, [], 0, max_level=2)
# allign_tree(treei, 0)
# objects = extract_objects(treei)
# trees += [treei]
# print(objects)
#
# visualize_tree(trees)
# for i in range(1):
# print('normal sample tree')
# tree = sample_tree(max_layout_level=2, add_layout_prob=0.6, zero_shot=True, train=True)
# visualize_trees([tree])
# print('max sample tree')
# tree = sample_tree_flexible(max_layout_level=3, add_layout_prob=0.6, zero_shot=False, train=True,
# arguments={'max_num_objs': 3})
# visualize_trees([tree])
# print('fix sample tree')
# tree = sample_tree_flexible(max_layout_level=3, add_layout_prob=0.6, zero_shot=False, train=True,
# arguments={'fix_num_objs': 8})
# visualize_trees([tree]) |
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
"""Provides a base Test class."""
from math import floor
from typing import Any, Dict, List
from okpt.io.config.parsers.test import TestConfig
from okpt.test.steps.base import Step
def get_avg(values: List[Any]):
"""Get average value of a list.
Args:
values: A list of values.
Returns:
The average value in the list.
"""
valid_total = len(values)
running_sum = 0.0
for value in values:
if value == -1:
valid_total -= 1
continue
running_sum += value
if valid_total == 0:
return -1
return running_sum / valid_total
def _pxx(values: List[Any], p: float):
"""Calculates the pXX statistics for a given list.
Args:
values: List of values.
p: Percentile (between 0 and 1).
Returns:
The corresponding pXX metric.
"""
lowest_percentile = 1 / len(values)
highest_percentile = (len(values) - 1) / len(values)
# return -1 if p is out of range or if the list doesn't have enough elements
# to support the specified percentile
if p < 0 or p > 1:
return -1.0
elif p < lowest_percentile or p > highest_percentile:
return -1.0
else:
return float(values[floor(len(values) * p)])
def _aggregate_steps(step_results: List[Dict[str, Any]],
measure_labels=None):
"""Aggregates the steps for a given Test.
The aggregation process extracts the measures from each step and calculates
the total time spent performing each step measure, including the
percentile metrics, if possible.
The aggregation process also extracts the test measures by simply summing
up the respective step measures.
A step measure is formatted as `{step_name}_{measure_name}`, for example,
{bulk_index}_{took} or {query_index}_{memory}. The braces are not included
in the actual key string.
Percentile/Total step measures are give as
`{step_name}_{measure_name}_{percentile|total}`.
Test measures are just step measure sums so they just given as
`test_{measure_name}`.
Args:
steps: List of test steps to be aggregated.
measures: List of step metrics to account for.
Returns:
A complete test result.
"""
if measure_labels is None:
measure_labels = ['took']
test_measures = {
f'test_{measure_label}': 0
for measure_label in measure_labels
}
step_measures: Dict[str, Any] = {}
# iterate over all test steps
for step in step_results:
step_label = step['label']
step_measure_labels = list(step.keys())
step_measure_labels.remove('label')
# iterate over all measures in each test step
for measure_label in step_measure_labels:
step_measure = step[measure_label]
step_measure_label = f'{step_label}_{measure_label}'
# Add cumulative test measures from steps to test measures
if measure_label in measure_labels:
test_measures[f'test_{measure_label}'] += sum(step_measure) if \
isinstance(step_measure, list) else step_measure
if step_measure_label in step_measures:
_ = step_measures[step_measure_label].extend(step_measure) \
if isinstance(step_measure, list) else \
step_measures[step_measure_label].append(step_measure)
else:
step_measures[step_measure_label] = step_measure if \
isinstance(step_measure, list) else [step_measure]
aggregate = {**test_measures}
# calculate the totals and percentile statistics for each step measure
# where relevant
for step_measure_label, step_measure in step_measures.items():
step_measure.sort()
aggregate[step_measure_label + '_total'] = float(sum(step_measure))
p50 = _pxx(step_measure, 0.50)
if p50 != -1:
aggregate[step_measure_label + '_p50'] = p50
p90 = _pxx(step_measure, 0.90)
if p90 != -1:
aggregate[step_measure_label + '_p90'] = p90
p99 = _pxx(step_measure, 0.99)
if p99 != -1:
aggregate[step_measure_label + '_p99'] = p99
return aggregate
class Test:
"""A base Test class, representing a collection of steps to profiled and
aggregated.
Methods:
setup: Performs test setup. Usually for steps not intended to be
profiled.
run_steps: Runs the test steps, aggregating the results into the
`step_results` instance field.
cleanup: Perform test cleanup. Useful for clearing the state of a
persistent process like OpenSearch. Cleanup steps are executed after
each run.
execute: Runs steps, cleans up, and aggregates the test result.
"""
def __init__(self, test_config: TestConfig):
"""Initializes the test state.
"""
self.test_config = test_config
self.setup_steps: List[Step] = test_config.setup
self.test_steps: List[Step] = test_config.steps
self.cleanup_steps: List[Step] = test_config.cleanup
def setup(self):
_ = [step.execute() for step in self.setup_steps]
def _run_steps(self):
step_results = []
_ = [step_results.extend(step.execute()) for step in self.test_steps]
return step_results
def _cleanup(self):
_ = [step.execute() for step in self.cleanup_steps]
def execute(self):
results = self._run_steps()
self._cleanup()
return _aggregate_steps(results)
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os.path import join
import sys
import paddle
import numpy as np
import pandas as pd
from paddle.io import Dataset, DataLoader
from scipy.sparse import csr_matrix
import scipy.sparse as sp
from time import time
class BasicDataset(Dataset):
def __init__(self):
print("init dataset")
@property
def n_users(self):
raise NotImplementedError
@property
def m_items(self):
raise NotImplementedError
@property
def trainDataSize(self):
raise NotImplementedError
@property
def testDict(self):
raise NotImplementedError
@property
def allPos(self):
raise NotImplementedError
def getUserItemFeedback(self, users, items):
raise NotImplementedError
def getUserPosItems(self, users):
raise NotImplementedError
def getUserNegItems(self, users):
"""
not necessary for large dataset
it's stupid to return all neg items in super large dataset
"""
raise NotImplementedError
def getSparseGraph(self):
"""
build a graph in torch.sparse.IntTensor.
Details in NGCF's matrix form
A =
|I, R|
|R^T, I|
"""
raise NotImplementedError
class Loader(BasicDataset):
"""
gowalla dataset
"""
def __init__(self, args, path="./gowalla"):
# train or test
print(f'loading [{path}]')
print(args)
self.n_user = 0
self.m_item = 0
train_file = path + '/train.txt'
test_file = path + '/test.txt'
self.path = path
trainUniqueUsers, trainItem, trainUser = [], [], []
testUniqueUsers, testItem, testUser = [], [], []
self.traindataSize = 0
self.testDataSize = 0
with open(train_file) as f:
for l in f.readlines():
if len(l) > 0:
l = l.strip('\n').split(' ')
items = [int(i) if i != '' else -1 for i in l[1:]]
uid = int(l[0])
trainUniqueUsers.append(uid)
trainUser.extend([uid] * len(items))
trainItem.extend(items)
self.m_item = max(self.m_item, max(items))
self.n_user = max(self.n_user, uid)
self.traindataSize += len(items)
self.trainUniqueUsers = np.array(trainUniqueUsers)
self.trainUser = np.array(trainUser)
self.trainItem = np.array(trainItem)
with open(test_file) as f:
for l in f.readlines():
if len(l) > 0:
l = l.strip('\n').split(' ')
items = [int(i) if i != '' else -1 for i in l[1:]]
uid = int(l[0])
testUniqueUsers.append(uid)
testUser.extend([uid] * len(items))
testItem.extend(items)
self.m_item = max(self.m_item, max(items))
self.n_user = max(self.n_user, uid)
self.testDataSize += len(items)
self.m_item += 1
self.n_user += 1
self.testUniqueUsers = np.array(testUniqueUsers)
self.testUser = np.array(testUser)
self.testItem = np.array(testItem)
self.UserItemNet = csr_matrix(
(np.ones(len(self.trainUser)), (self.trainUser, self.trainItem)),
shape=(self.n_user, self.m_item))
self._allPos = self.getUserPosItems(list(range(self.n_user)))
## bipartite graph, reindex item after user
self.trainItem += self.n_user
print(self.trainItem, self.trainItem.shape)
first_sub = np.stack([self.trainUser, self.trainItem])
second_sub = np.stack([self.trainItem, self.trainUser])
self.train_edge = np.concatenate(
[first_sub.reshape(-1, 1), second_sub.reshape(-1, 1)], axis=-1)
self.train_edge = sorted(self.train_edge, key=lambda x: x[0])
@property
def n_users(self):
return self.n_user
@property
def m_items(self):
return self.m_item
@property
def trainDataSize(self):
return self.traindataSize
@property
def testDict(self):
return self.__build_test()
@property
def allPos(self):
return self._allPos
@property
def trainEdge(self):
return self.train_edge
@property
def testEdge(self):
return self.test_edge
def __build_test(self):
"""
return:
dict: {user: [items]}
"""
test_data = {}
for i, item in enumerate(self.testItem):
user = self.testUser[i]
if test_data.get(user):
test_data[user].append(item)
else:
test_data[user] = [item]
return test_data
def getUserItemFeedback(self, users, items):
"""
users:
shape [-1]
items:
shape [-1]
return:
feedback [-1]
"""
return np.array(self.UserItemNet[users, items]).astype(
'uint8').reshape((-1, ))
def getUserPosItems(self, users):
posItems = []
for user in users:
posItems.append(self.UserItemNet[user].nonzero()[1])
return posItems
|
# Author: legend
# Mail: kygx.legend@gmail.com
# File: docker.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
def install():
"""
Ref:
1. https://github.com/dhiltgen/docker-machine-kvm
2. https://www.howtoforge.com/how-to-install-kvm-and-libvirt-on-centos-6.2-with-bridged-networking
3. http://blog.arungupta.me/docker-machine-swarm-compose-couchbase-wildfly
4. https://www.linux.com/learn/how-use-docker-machine-create-swarm-cluster
5. https://wiredcraft.com/blog/multi-host-docker-network
6. https://linuxctl.com/2016/02/docker-networking---change-docker0-subnet
7. http://supercomputing.caltech.edu/blog/index.php/2016/05/03/open-vswitch-installation-on-centos-7-2/
8. http://docker-k8s-lab.readthedocs.io/en/latest/docker/docker-ovs.html#containers-connect-with-docker0-bridge
"""
docker = 'brew install --build-from-source --ignore-dependencies docker'
docker_machine = 'brew install --build-from-source --ignore-dependencies docker-machine'
docker_machine_driver_kvm = 'curl -L https://github.com/dhiltgen/docker-machine-kvm/releases/download/v0.8.2/docker-machine-driver-kvm > /bin/docker-machine-driver-kvm && chmod +x /bin/docker-machine-driver-kvm'
def create_container():
create = 'docker-machine create -d kvm --engine-env HTTP_PROXY=http://proxy.cse.cuhk.edu.hk:8000 --engine-env HTTPS_PROXY=https://proxy.cse.cuhk.edu.hk:8000 default'
def run():
"""
Set no_proxy for the created machine.
$ export no_proxy=docker-machine-ip
"""
def main():
pass
if __name__ == "__main__":
main()
|
from django.urls import reverse
from service_catalog.forms import FormUtils
from tests.test_service_catalog.base import BaseTest
class TestServiceRequestForm(BaseTest):
def setUp(self):
super(TestServiceRequestForm, self).setUp()
def test_get_available_fields(self):
expected_result = {
"name": "test-survey",
"description": "test-survey-description",
"spec": [
{
"choices": "",
"default": "",
"max": 1024,
"min": 0,
"new_question": True,
"question_description": "",
"question_name": "String variable",
"required": True,
"type": "text",
"variable": "text_variable"
}
]
}
self.assertEqual(expected_result,
FormUtils.get_available_fields(job_template_survey=self.job_template_test.survey,
operation_survey=self.create_operation_test.enabled_survey_fields))
|
import abc
from typing import Dict, Iterable
from phdTester.common_types import KS001Str, PathStr, DataTypeStr
from phdTester.commons import UnknownStringCsvReader
from phdTester.model_interfaces import IResourceManager, IDataSource, ICsvResourceManager
class AbstractCsvResourceManager(ICsvResourceManager, abc.ABC):
"""
Add behaviours to a generic resource manager which can handle csvs
"""
def iterate_over(self, datasource: "IDataSource", path: PathStr, ks001: KS001Str, data_type: DataTypeStr) -> Iterable[Dict[str, str]]:
csv_content: str = self.get(datasource, path, ks001, data_type)
with UnknownStringCsvReader(csv_content.splitlines()) as f:
for row in f:
yield row
|
#!/usr/bin/env python
"""
This copies and converts files in nii folder to a bids folder in BIDS format
Usage:
bidsify.py [options] [-s <SUBJECT>]... <study>
Arguments:
<study> Study name defined in master configuration
.yml file to convert to BIDS format
Options:
-s SUBID, --subject SUBID Can repeat multiple times for multiple subjects
-b PATH, --bids-dir PATH Path to directory to store data in BIDS format
-y PATH, --yaml PATH YAML path for BIDS specification constraints
-r, --rewrite Overwrite existing BIDS outputs
--debug Debug logging
-i, --allow-incomplete Allow incomplete fmaps to be mapped to
BIDS format. Only use this option if
deviations from the expected protocol
are allowed
Info on FMAP matching algorithm:
The one key assumption bidsify makes is that pairing fmaps
are collected sequentially in order. If order is
non-sequential then algorithm will crash. A more sophisticated routine
will be needed.
Info on allow-incomplete:
Allow-incomplete should only ever be used if protocol deviations (from those
specified in config) should be mapped to BIDS specification.
By default any incomplete fieldmaps WILL NOT be mapped into
the BIDS specification since they are by default deemed unusable
"""
import os
import json
import glob
from shutil import copyfile
import logging
from string import Template
from docopt import docopt
import datman.config as config
import datman.scanid as scanid
import datman.scan as scan
import datman.dashboard as dashboard
from datman.bids.check_bids import BIDSEnforcer
from collections import namedtuple
from itertools import groupby, product
from dataclasses import dataclass, field, InitVar
# Set up logger
logging.basicConfig(level=logging.WARN,
format="[% (name)s % (levelname)s:"
"%(message)s]")
logger = logging.getLogger(os.path.basename(__file__))
YAML = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../assets/bids/requirements.yaml"))
class BIDSFile(object):
# Store and compute on information required to generate a BIDS file
def __init__(self, sub, ses, series, dest_dir, bids_prefix, bids_spec):
# Grab description of file and associated BIDS description
self.sub = sub
self.ses = ses
self.series = series
self.dest_dir = dest_dir
self.bids = bids_prefix
self.spec = bids_spec
self.path = ""
# Store JSON meta-data in cache for manipulation
meta_json = get_json(series.path)
self.json = self._load_json(meta_json)
def __repr__(self):
return self.bids
@property
def datman(self):
return self.series.full_id
@property
def series_num(self):
return int(self.series.series_num)
@property
def source(self):
if not self.path:
return self.series.path
else:
return self.path
@source.setter
def source(self, path):
self.path = path
@property
def bids_type(self):
return self.get_spec("class")
@property
def subject(self):
return self.sub
@property
def session(self):
return "ses-" + self.ses
@property
def rel_path(self):
return os.path.join(self.session, self.bids_type,
self.bids + ".nii.gz")
@property
def dest_nii(self):
return os.path.join(self.dest_dir, self.bids + ".nii.gz")
def copy(self):
"""
Create an identical instance
"""
return BIDSFile(self.sub, self.ses, self.series, self.dest_dir,
self.bids, self.spec)
def transfer_files(self):
"""
Perform data transformation from DATMAN into BIDS
"""
# Make destination directory
os.makedirs(self.dest_dir, exist_ok=True)
# Copy over NIFTI file and transform into BIDS name
copyfile(self.source, self.dest_nii)
# Write JSON file
json_destination = os.path.join(self.dest_dir, self.bids + ".json")
with open(json_destination, "w") as j:
json.dump(self.json, j, indent=3)
# For diffusion data you need to copy over bvecs and bvals
if self.bids_type == "dwi":
for b in [".bval", ".bvec"]:
src = self.source.replace(".nii.gz", b)
dst = os.path.join(self.dest_dir, self.bids + b)
try:
copyfile(src, dst)
except IOError:
logger.error("Cannot find file {}".format(src))
return
def update_source(self, cfg, be):
"""
If for a particular file the BIDS specification indicates an
alternative source path then update it to match
"""
try:
alt = self.get_spec("alt")
except KeyError:
return self
logger.info("Preferred derivative of {} exists!".format(self.source))
logger.info("Updating source file information...")
# Specification of template inputs
template_dict = {"subject": self.datman, "series": self.series_num}
# Process each alternative
alts = []
for d in alt:
'''
Allow self to propogate itself if the tag itself
in addition its derivatives need to be converted into BIDS
i.e SBRef for registration and SBRef for TOPUP
'''
if d.get('type') == self.series.tag:
alts.append(self)
continue
alt_template = Template(d["template"]).substitute(template_dict)
alt_type = d["type"]
match_file = glob.glob("{proj}/{template}".format(
proj=cfg.get_study_base(), template=alt_template))
try:
new_source = match_file[0]
except IndexError:
logger.info(f"Could not find derivative for {self}!")
continue
# Produce copy of self
derivsfile = self.copy()
# Get bids specification for file and assign to copy
new_spec = {
**get_tag_bids_spec(cfg, alt_type, self.series.site),
**d.get("inherit", {})
}
derivsfile.spec = new_spec
# Update with additional subject/session metadata
new_spec.update({"sub": self.sub, "ses": self.ses})
# Construct name
new_bids = be.construct_bids_name(new_spec)
# Update pathing and name as well as JSON file sidecar
new_json = new_source.replace(".nii.gz", ".json")
derivsfile.source = new_source
derivsfile.bids = new_bids
derivsfile.json = derivsfile._load_json(new_json)
derivsfile.dest_dir = os.path.abspath(
os.path.join(derivsfile.dest_dir, os.pardir,
derivsfile.spec['class']))
alts.append(derivsfile)
return alts
def _load_json(self, meta_json):
try:
with open(meta_json, "r") as jfile:
j = json.load(jfile)
except IOError:
logger.error("Missing JSON for {}".format(self.source))
j = {}
except ValueError:
logger.error("JSON file for {} is invalid!".format(self.source))
j = {}
return j
def add_json_list(self, spec, value):
"""
To internal dictionary add a list type json value to spec
If non-existant make a new list, otherwise append to current
"""
try:
self.json[spec].append(value)
except KeyError:
self.json[spec] = [value]
return
def get_spec(self, *args, return_default=False, default=None):
"""
Iteratively enter dictionary by sequence of keys in order
"""
tmp = self.spec
try:
for a in args:
tmp = tmp[a]
except KeyError:
if not return_default:
raise
else:
return default
return tmp
def is_spec(self, *args):
try:
self.get_spec(*args)
except KeyError:
return False
else:
return True
@dataclass
class FMapMatch:
'''
Dataclass to record handling of Fmap files for the
pairing algorithm
'''
intended_for: str = None
match_key: str = None
remaining_matches: set = None
fmaps: list = field(default_factory=list)
bidsfile: InitVar[BIDSFile] = None
def __post_init__(self, bidsfile):
self.intended_for = bidsfile.get_spec("intended_for")
self.match_key = bidsfile.get_spec("pair", "label")
self.remaining_matches = set(bidsfile.get_spec("pair", "with"))
self.fmaps.append(bidsfile)
def match_fmaps(series_list):
"""
Matching heuristic for associating fieldmaps with each other
Method:
For each BIDSFile get the pairing key and the associated values allowed
When another file with an associated value is found, get the
intersection of the allowed values
This yields the left over requirements that needs to be fulfilled
If the other file is not matching (case of lone TOPUP) then a mismatch
results in a lone fmap
"""
def handle_incomplete(fmapmatch, fmapmatches):
'''
Handles case in which incomplete fmap list is found
Uses global read-only variable ALLOW_INCOMPLETE
'''
logger.warning("Incomplete fieldmap matches for: "
f"{' '.join([str(f.series) for f in fmapmatch.fmaps])}")
logger.warning("Missing the following fields: "
f"{' '.join(fmapmatch.remaining_matches)}")
if ALLOW_INCOMPLETE:
logger.warning("Incomplete fmaps allowed with --allow-incomplete"
" allowing bids conversion")
fmapmatches.append(stored)
else:
logger.warning("Incomplete fmaps not allowed! "
"Use --allow-incomplete to allow for "
"incomplete fmaps")
return fmapmatches
lone = []
fmapmatches = []
stored = None
for s in series_list:
if not s.is_spec("pair"):
lone.append(s)
if stored and stored.remaining_matches:
fmapmatches = handle_incomplete(stored, fmapmatches)
stored = None
continue
if not stored:
stored = FMapMatch(bidsfile=s)
continue
try:
match_val = s.get_spec(stored.match_key)
except KeyError:
logger.error("Mismatch of fieldmap types breaking key assumption!")
logger.error("This functionality is not yet supported!")
raise
match_found = match_val in stored.remaining_matches
matched_intention = stored.intended_for == s.get_spec("intended_for")
if match_found and matched_intention:
stored.fmaps.append(s)
stored.remaining_matches = stored.remaining_matches & set(
s.get_spec("pair", "with"))
if not stored.remaining_matches:
fmapmatches.append(stored)
stored = None
else:
fmapmatches = handle_incomplete(stored, fmapmatches)
stored = FMapMatch(bidsfile=s)
if stored is not None:
fmapmatches = handle_incomplete(stored, fmapmatches)
match_list = [f.fmaps for f in fmapmatches]
if lone:
match_list.append(lone)
return match_list
def make_directory(path, suppress=False):
try:
os.mkdir(path)
except OSError:
logger.info("Pre-existing folder {}. "
"Skipping folder creation".format(path))
return
def sort_by_series(scans_list):
"""
Sort scans by their series number
"""
sorted_scans = sorted(scans_list, key=lambda s: s.series_num)
seen = []
def unique(series):
if (series.tag, series.series_num) not in seen:
seen.append((series.tag, series.series_num))
return series
return filter(unique, sorted_scans)
def get_json(nifti_path):
"""
Get associated JSON of input file
"""
return nifti_path.replace(".nii.gz", ".json").replace(".nii", ".json")
def get_tag_bids_spec(cfg, tag, site):
"""
Retrieve the BIDS specifications for a Tag defined in datman config
"""
try:
bids = cfg.get_key("ExportSettings", site=site)[tag]['Bids'].copy()
except KeyError:
logger.error("No BIDS tag available for scan type:"
"{}, skipping conversion".format(tag))
return None
return bids
def get_first_series(series_list):
"""
For each iterable of BIDSFiles get the minimum series number
"""
return min(series_list, key=lambda x: x.series_num).series_num
def is_fieldmap_candidate(scan, scan_type):
"""
Given a candidate scan, check whether it is of the correct type and is
meant to be corrected
"""
# First check if meant for fieldmaps
try:
use_fieldmaps = scan.get_spec("fieldmaps")
except KeyError:
use_fieldmaps = True
match_type = scan.bids_type in scan_type
if use_fieldmaps and match_type:
return True
else:
return False
def process_intended_fors(grouped_fmaps, non_fmaps):
"""
Derive intended fors using series value matching
Considerations:
1. When matching should first scrape the kind of data you can
apply fmaps to
2. Then loop through acquisitions
3. Filter scans
4. Calculate distances and minimizes
5. Done
Patch 2020-11-12
------------------------
Naive minimization of the series metric may result in
EPIs meant to be grouped together to be associated with
different fmaps.
To remove this bug while maintaining bidirectional matching
of fmaps the candidate scans are chunked based on their containment within
2 pairs of fmaps of a (acq, intended_for) tuple and assigned their
min(series) key for matching.
"""
EpiChunk = namedtuple("EpiChunk", ['series', 'chunk'])
def chunk_epis(epis, series_blocks):
'''
Implement chunking with series_blocks as
bounds for each chunk
'''
# Loop through series blocks and chunk
chunks = []
edges = [0, *series_blocks, 1e10]
for i in range(0, len(edges) - 1):
chunk = [
e for e in epis
if e.series_num >= edges[i] and e.series_num < edges[i + 1]
]
if chunk:
chunks.append(EpiChunk(get_first_series(chunk), chunk))
return chunks
# For each acq/intended tuple
series_list = non_fmaps
for g, l in grouped_fmaps:
candidate_fmaps = list(l)
intended_for = g.intended_for
# Get candidate list of scans to match on
candidate_scans = [
s for s in non_fmaps if is_fieldmap_candidate(s, intended_for)
]
# 2020-11-12 patch
cfmaps_sers = [get_first_series(f) for f in candidate_fmaps]
chunks = chunk_epis(candidate_scans, cfmaps_sers)
# Minimize over chunks
for c in chunks:
# Calc dists and get minimum index
dists = [abs(f - c.series) for f in cfmaps_sers]
min_ind = dists.index(min(dists))
# Add each scan in chunk to each grouped fmap
[
f.add_json_list("IntendedFor", s.rel_path)
for f, s in product(candidate_fmaps[min_ind], c.chunk)
]
# Add processed fmaps to series list
series_list += [i for k in candidate_fmaps for i in k]
return series_list
def prepare_fieldmaps(series_list):
"""
Args:
series_list A list of BIDSFile objects
be BIDSEnforcer object
Method:
1. Pull fmaps by class key
2. Pair fmaps using pair key
3. Assign fmaps using series
"""
Fmap_ID = namedtuple('Fmap_ID', ['acq', 'intended_for'])
def group_fmaps(x):
'''
Returns unique grouping keys for fieldmaps
Rule:
If fieldmaps share the same intended for, then we
differentiate their application based on their acquisition
parameter. This allows us to collect multiple fieldmap types
for a single given sequence/set of sequences.
'''
return Fmap_ID(x.get_spec('acq', return_default=True, default=''),
x.get_spec('intended_for'))
# Filter out non_fmap files
fmaps = [s for s in series_list if s.bids_type == "fmap"]
if not fmaps:
return series_list
non_fmaps = [s for s in series_list if s.bids_type != "fmap"]
# Pair up fmaps
pair_list = match_fmaps(fmaps)
pair_list.sort(key=lambda x: group_fmaps(x[0]))
# Split fmaps based on type
groupings = groupby(pair_list, lambda x: group_fmaps(x[0]))
series_list = process_intended_fors(groupings, non_fmaps)
return series_list
def make_bids_template(bids_dir, subject, session):
"""
Set up folders for making BIDS directory
Arguments:
bids_dir Directory to create BIDS project in
(project-level directory)
study_name Study code for dataset_description
subject BIDS subject ID
session BIDS session ID
Return:
p_bids_sub_ses Path to BIDS subject-session
specific directory
"""
p_bids_sub = os.path.join(bids_dir, subject)
make_directory(p_bids_sub)
p_bids_sub_ses = os.path.join(bids_dir, subject, session)
make_directory(p_bids_sub_ses)
return p_bids_sub_ses
def make_dataset_description(bids_dir, study_name, version):
"""
Make boilerplate dataset_description.json file
"""
make_directory(bids_dir)
# Should be separate functionality
p_dataset_desc = os.path.join(bids_dir, "dataset_description.json")
if not os.path.isfile(p_dataset_desc):
with open(p_dataset_desc, "w") as f:
json.dump({
"Name": study_name,
"BIDSVersion": version
}, f, indent=3)
return
def prioritize_scans(series_list):
"""
Given a list of scans apply prioritization heuristics
based on spec key "over"
"""
to_filt = set()
for s in series_list:
try:
label = s.get_spec("over", "label")
on = s.get_spec("over", "value")
except KeyError:
continue
# If prioritization spec found,
# then look for it in other scans to replace
for f in [k for k in series_list if k != s]:
try:
f_label = f.get_spec(label)
except KeyError:
continue
if f_label == on:
logger.info("{priority} is prioritized over \
{scan}, not copying {scan}".format(priority=s, scan=f))
to_filt.add(f)
# Remove object in filt list from series_list
return [f for f in series_list if f not in to_filt]
def process_subject(subject, cfg, be, bids_dir, rewrite):
"""
Convert subject in DATMAN folder to BIDS-style
"""
ident = scanid.parse(subject)
subscan = scan.Scan(subject, cfg)
bids_sub = ident.get_bids_name()
bids_ses = ident.timepoint
exp_path = make_bids_template(bids_dir, "sub-" + bids_sub,
"ses-" + bids_ses)
dm_to_bids = []
if dashboard.dash_found:
db_subject = dashboard.get_subject(subject)
db_subject.add_bids(bids_sub, bids_ses)
# Construct initial BIDS transformation info
scan_list = list(sort_by_series(subscan.niftis))
for i, series in enumerate(scan_list):
# Construct bids name
logger.info("Processing {}".format(series))
bids_dict = get_tag_bids_spec(cfg, series.tag, series.site)
if not bids_dict:
continue
bids_dict.update({"sub": bids_sub, "ses": bids_ses})
# Deal with reference scans
if bids_dict.get('is_ref', False):
target_dict = get_tag_bids_spec(cfg, scan_list[i + 1].tag,
series.site)
bids_dict.update({'task': target_dict['task']})
bids_prefix = be.construct_bids_name(bids_dict)
class_path = os.path.join(exp_path, bids_dict["class"])
# Make dm2bids transformation file, update source if applicable
bidsfiles = BIDSFile(bids_sub, bids_ses, series, class_path,
bids_prefix, bids_dict).update_source(cfg, be)
if bidsfiles is None:
logger.error("Cannot find derivative of {}".format(series))
logger.warning("Skipping!")
continue
if isinstance(bidsfiles, list):
dm_to_bids.extend(bidsfiles)
else:
dm_to_bids.append(bidsfiles)
# Apply prioritization calls
dm_to_bids = prioritize_scans(dm_to_bids)
# Prepare fieldmap information (requires knowledge about all scans)
dm_to_bids = prepare_fieldmaps(dm_to_bids)
# Transfer files over
for k in dm_to_bids:
if os.path.exists(k.dest_nii) and not rewrite:
logger.info("Output file {} already exists!".format(k.dest_nii))
continue
k.transfer_files()
if dashboard.dash_found:
db_series = dashboard.get_scan(k.series.path)
db_series.add_bids(str(k))
return
def main():
arguments = docopt(__doc__)
study = arguments["<study>"]
cfg = config.config(study=study)
subjects = arguments["--subject"]
bids_dir = arguments["--bids-dir"] or cfg.get_path("bids")
yml = arguments["--yaml"] or YAML
rewrite = arguments["--rewrite"]
debug = arguments["--debug"]
global ALLOW_INCOMPLETE
ALLOW_INCOMPLETE = arguments["--allow-incomplete"]
be = BIDSEnforcer(yml)
if debug:
logger.setLevel(logging.DEBUG)
make_dataset_description(bids_dir, study, be.version)
if not subjects:
subjects = os.listdir(cfg.get_path("nii"))
for s in subjects:
if "PHA" in s:
logger.info("{} is a Phantom scan - skipping...".format(s))
continue
logger.info("Processing: {}".format(s))
process_subject(s, cfg, be, bids_dir, rewrite)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.