content stringlengths 5 1.05M |
|---|
from typing import List, Tuple, Union
import numpy as np
def beta_posteriors_all(
totals: List[int],
positives: List[int],
sim_count: int,
a_priors_beta: List[Union[float, int]],
b_priors_beta: List[Union[float, int]],
seed: Union[int, np.random.bit_generator.SeedSequence] = None,
) -> np.ndarray:
"""
Draw from beta posterior distributions for all variants at once.
Parameters
----------
totals : List of numbers of experiment observations (e.g. number of sessions) for each variant.
positives : List of numbers of ones (e.g. number of conversions) for each variant.
sim_count : Number of simulations to be used for probability estimation.
a_priors_beta : List of prior alpha parameters for Beta distributions for each variant.
b_priors_beta : List of prior beta parameters for Beta distributions for each variant.
seed : Random seed.
Returns
-------
beta_samples : List of lists of beta distribution samples for all variants.
"""
rng = np.random.default_rng(seed)
beta_samples = np.array(
[
rng.beta(
positives[i] + a_priors_beta[i],
totals[i] - positives[i] + b_priors_beta[i],
sim_count,
)
for i in range(len(totals))
]
)
return beta_samples
def normal_posteriors(
total: int,
sums: float,
sums_2: float,
sim_count: int = 20000,
prior_m: Union[float, int] = 1,
prior_a: Union[float, int] = 0,
prior_b: Union[float, int] = 0,
prior_w: Union[float, int] = 0.01,
seed: Union[int, np.random.bit_generator.SeedSequence] = None,
) -> Tuple[List[Union[float, int]], List[Union[float, int]]]:
"""
Drawing mus and sigmas from posterior normal distribution considering given aggregated data.
Parameters
----------
total : Number of data observations from normal data.
sums : Sum of original data.
sums_2 : Sum of squares of original data.
sim_count : Number of simulations.
prior_m : Prior mean.
prior_a : Prior alpha from inverse gamma dist. for unknown variance of original data.
In theory a > 0, but as we always have at least one observation, we can start at 0.
prior_b : Prior beta from inverse gamma dist. for unknown variance of original data.
In theory b > 0, but as we always have at least one observation, we can start at 0.
prior_w : Prior effective sample size.
seed : Random seed.
Returns
-------
mu_post : List of size sim_count with mus drawn from normal distribution.
sig_2_post : List of size sim_count with mus drawn from normal distribution.
"""
rng = np.random.default_rng(seed)
x_bar = sums / total
a_post = prior_a + (total / 2)
b_post = (
prior_b
+ (1 / 2) * (sums_2 - 2 * sums * x_bar + total * (x_bar ** 2))
+ ((total * prior_w) / (2 * (total + prior_w))) * ((x_bar - prior_m) ** 2)
)
# here it has to be 1/b as it is a scale, and not a rate
sig_2_post = 1 / rng.gamma(a_post, 1 / b_post, sim_count)
m_post = (total * x_bar + prior_w * prior_m) / (total + prior_w)
mu_post = rng.normal(m_post, np.sqrt(sig_2_post / (total + prior_w)))
return mu_post, sig_2_post
def lognormal_posteriors(
total: int,
sum_logs: float,
sum_logs_2: float,
sim_count: int = 20000,
prior_m: Union[float, int] = 1,
prior_a: Union[float, int] = 0,
prior_b: Union[float, int] = 0,
prior_w: Union[float, int] = 0.01,
seed: Union[int, np.random.bit_generator.SeedSequence] = None,
) -> List[float]:
"""
Drawing from posterior lognormal distribution using logarithms of original (lognormal) data
(logarithms of lognormal data are normal). Input data is in aggregated form.
Parameters
----------
total : Number of lognormal data observations.
Could be number of conversions in session data.
sum_logs : Sum of logarithms of original data.
sum_logs_2 : Sum of logarithms squared of original data.
sim_count : Number of simulations.
prior_m : Prior mean of logarithms of original data.
prior_a : Prior alpha from inverse gamma dist. for unknown variance of logarithms
of original data. In theory a > 0, but as we always have at least one observation,
we can start at 0.
prior_b : Prior beta from inverse gamma dist. for unknown variance of logarithms
of original data. In theory b > 0, but as we always have at least one observation,
we can start at 0.
prior_w : Prior effective sample size.
seed : Random seed.
Returns
-------
res : List of sim_count numbers drawn from lognormal distribution.
"""
if total <= 0:
return list(np.zeros(sim_count))
# normal posterior for aggregated data of logarithms of original data
normal_mu_post, normal_sig_2_post = normal_posteriors(
total, sum_logs, sum_logs_2, sim_count, prior_m, prior_a, prior_b, prior_w, seed
)
# final simulated lognormal means using simulated normal means and sigmas
res = np.exp(normal_mu_post + (normal_sig_2_post / 2))
return res
def dirichlet_posteriors(
concentration: List[int],
prior: List[Union[float, int]],
sim_count: int = 20000,
seed: Union[int, np.random.bit_generator.SeedSequence] = None,
) -> np.ndarray:
"""
Drawing from dirichlet posterior for a single variant.
Parameters
----------
concentration : List of numbers of observation for each possible category.
In dice example it would be numbers of observations for each possible face.
prior : List of prior values for each category in dirichlet distribution.
sim_count : Number of simulations.
seed : Random seed.
Returns
-------
res : List of lists of dirichlet samples.
"""
rng = np.random.default_rng(seed)
posterior_concentration = [sum(x) for x in zip(prior, concentration)]
res = rng.dirichlet(posterior_concentration, sim_count)
return res
|
# -*- coding: utf-8 -*- #
"""
Created on Thu Jun 8 14:00:08 2017
@author: grizolli
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xx=np.random.rand(50)
yy=np.random.rand(50)
zz=np.random.rand(50)
ax.scatter(xx,yy,zz, marker='o', s=20, c="goldenrod", alpha=0.6)
xx=np.random.rand(50)
yy=np.random.rand(50)
zz=np.random.rand(50)
ax.scatter(xx,yy,zz, c='b', marker='d', s=20, alpha=0.6)
ax.legend(['1', '2'])
ii_fname = 0
for ii in 2*np.pi*np.linspace(0, 1, 201):
text = ax.text2D(0.05, 0.95, str('{:.2}deg'.format(ii)),
transform=ax.transAxes)
ax.view_init(elev= 0 + 40*np.sin(2*ii), azim=90*np.sin(ii))
plt.savefig("movie{:03d}.png".format(ii_fname))
ii_fname += 1
text.remove()
plt.show()
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
from django.core import urlresolvers
from django.contrib.auth.models import User
from desktop.lib.djangothrift import ThriftField
from desktop.lib.thrift_util import simpler_string
from jobsub.server_models import *
import jobsubd.ttypes
from jobsubd.ttypes import SubmissionHandle
class TSubmissionPlan(jobsubd.ttypes.SubmissionPlan):
"""Wrapped submission class with simpler stringification."""
def __str__(self):
return simpler_string(self)
class JobDesign(models.Model):
"""
Contains CMS information for "job designs".
"""
owner = models.ForeignKey(User)
name = models.CharField(max_length=40)
description = models.CharField(max_length=1024)
last_modified = models.DateTimeField(auto_now=True)
# Type corresponds to a JobSubForm that gets registered in jobsub.forms.interface.registry
type = models.CharField(max_length=128)
# Data is serialized via JobSubFormInterface.serialize_[to|from]_string
data = models.CharField(max_length=4096)
def edit_url(self):
return urlresolvers.reverse("jobsub.views.edit_design", kwargs=dict(id=self.id))
def clone_url(self):
return urlresolvers.reverse("jobsub.views.clone_design", kwargs=dict(id=self.id))
def delete_url(self):
return urlresolvers.reverse("jobsub.views.delete_design", kwargs=dict(id=self.id))
def submit_url(self):
return urlresolvers.reverse("jobsub.views.submit_design", kwargs=dict(id=self.id))
def clone(self):
clone_kwargs = dict([(field.name, getattr(self, field.name)) for field in self._meta.fields if field.name != 'id']);
return self.__class__.objects.create(**clone_kwargs)
def to_jsonable(self):
return {
'owner': self.owner.username,
'name': self.name,
'last_modified': str(self.last_modified),
'type': self.type,
'data': repr(self.data)
}
class Submission(models.Model):
"""
Holds informations on submissions from the web app to the daemon.
The daemon should not update this directly.
"""
owner = models.ForeignKey(User)
submission_date = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=40, editable=False)
submission_plan = ThriftField(TSubmissionPlan, editable=False)
submission_handle = ThriftField(SubmissionHandle)
last_seen_state = models.IntegerField(db_index=True)
def last_seen_state_as_string(self):
return jobsubd.ttypes.State._VALUES_TO_NAMES.get(self.last_seen_state)
def watch_url(self):
return urlresolvers.reverse("jobsub.views.watch_submission", kwargs=dict(id=self.id))
class CheckForSetup(models.Model):
"""
A model which should have at most one row, indicating
whether jobsub_setup has run succesfully.
"""
setup_run = models.BooleanField()
|
# coding UTF-8
from sklearn import datasets
from sklearn import svm
from sklearn import metrics
import matplotlib.pyplot as plt
# 数字データの読み込み
digits = datasets.load_digits()
# データの形式を確認
# print (digits.data)
# print (digits.data.shape)
# データの数
n = len(digits.data)
# 画像と正解値の表示
# images = digits.images
# labels = digits.target
# for i in range(10):
# plt.subplot(2, 5, i + 1)
# plt.imshow(images[i], cmap=plt.cm.gray_r, interpolation="nearest")
# plt.axis("off")
# plt.title("Training: " + str(labels[i]))
# plt.show()
# サポートベクターマシーン
clf = svm.SVC(gamma=0.001, C=100.0)
# サポートベクターマシーンによる訓練(6割のデータを使用、残りの4割は検証用)
clf.fit(digits.data[:n*6//10 ], digits.target[:n*6//10])
# 最後の10個のデータをチェック
# 正解(マイナスを指定すると末尾からの範囲になる)
# print (digits.target[-10:])
# 予測を行う(数字を読み取る)
# print (clf.predict(digits.data[-10:]))
# 残り4割の画像から、数字を読み取る
# 正解
expected = digits.target[-n*4//10:]
# 予測
predicted = clf.predict(digits.data[-n*4//10:])
# 正解率
print (metrics.classification_report(expected, predicted))
# 誤認識のマトリックス
print(metrics.confusion_matrix(expected, predicted))
# 予測と画像の対応(一部)
images = digits.images[-n*4//10:]
for i in range(12):
plt.subplot(3, 4, i + 1)
plt.axis("off")
plt.imshow(images[i], cmap=plt.cm.gray_r, interpolation="nearest")
plt.title("Guess: " + str(predicted[i]))
plt.show()
|
import matplotlib
from matplotlib.patches import Rectangle, Patch
import numpy as np
import matplotlib.pyplot as plt
import csv
import cv2
import os
from matplotlib.colors import LinearSegmentedColormap
import scipy.stats as stats
import seaborn as sns
from matplotlib import cm
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
import subprocess
def filenames_from_folder(folder, filename_starts_with = None, filename_contains = None, filename_ends_with = None, filename_does_not_contain = None):
'''
Function that returns the filenames contained inside a folder.
The function can be provided with arguments to specify which files to look for. This includes what the filenames start and end with, as well as if something is contained in the filename.
'''
# Return the names of all the files in the folder
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in os.listdir(folder)]
# Check if filename_starts_with was given
if filename_starts_with != None:
# Return the names of all the files that start with ...
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in filenames if filename.startswith(filename_starts_with)]
# Check if filename_contains was given
if filename_contains != None:
if isinstance(filename_contains, list):
for item in filename_contains:
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in filenames if item in filename]
else:
# Return the names of all the files that contain ...
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in filenames if filename_contains in filename]
# Check if filename_ends_with was given
if filename_ends_with != None:
# Return the names of all the files that end with ...
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in filenames if filename.endswith(filename_ends_with)]
# Check if filename_does_not_contain was given
if filename_does_not_contain != None:
if isinstance(filename_does_not_contain, list):
for item in filename_does_not_contain:
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in filenames if item not in os.path.basename(filename)]
else:
# Return the names of all the files that do not contain ...
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in filenames if filename_does_not_contain not in filename]
return filenames
def convert_video(video_filename):
ffmpeg_command = f"ffmpeg -y -i \"{video_filename}\" -vcodec mjpeg \"{video_filename[:-4]}_converted.avi\""
return subprocess.call(ffmpeg_command)
def extract_tracking_points_from_csv(csv_file):
with open(csv_file) as f:
tracking_points = [[column for column in row if len(column) > 0] for row in csv.reader(f)]
tracking_points = np.array([np.array([np.array([point.split(', ') for point in fish_data[3:-3].split(")', '(")]).astype(float) for fish_data in data_point]) for data_point in tracking_points])
return tracking_points
def extract_tail_curvature_from_csv(csv_file):
with open(csv_file) as f:
tail_curvature = [[column for column in row if len(column) > 0] for row in csv.reader(f)]
tail_curvature = np.array([np.array([fish_data[1:-1].split(", ") for fish_data in data_point]).astype(float) for data_point in tail_curvature])
return tail_curvature
def reorder_data_for_identities_tail_curvature(tracking_data, tail_curvature, n_fish, n_tracking_points):
new_tracking_data = np.zeros((len(tracking_data), n_fish, n_tracking_points, 2))
new_tail_curvature = np.zeros((len(tail_curvature), n_fish, n_tracking_points - 1))
for i in range(len(tracking_data) - 1):
if i == 0:
first_tracking_data_arr = tracking_data[0]
new_tracking_data[0] = tracking_data[0]
new_tail_curvature[0] = tail_curvature[0]
second_tracking_data_arr = tracking_data[i + 1]
tail_curvature_arr = tail_curvature[i + 1]
new_tracking_data_arr, new_tail_curvature_arr, new_order = find_identities_tail_curvature(first_tracking_data_arr, second_tracking_data_arr, tail_curvature_arr, n_fish, n_tracking_points)
new_tracking_data[i + 1] = new_tracking_data_arr[new_order[1]]
new_tail_curvature[i + 1] = new_tail_curvature_arr[new_order[1]]
first_tracking_data_arr = new_tracking_data[i + 1]
return [new_tracking_data, new_tail_curvature]
def find_identities_tail_curvature(first_tracking_data_arr, second_tracking_data_arr, tail_curvature_arr, n_fish, n_tracking_points):
cost = cdist(first_tracking_data_arr[:, 0], second_tracking_data_arr[:, 0])
result = linear_sum_assignment(cost)
if second_tracking_data_arr.shape[0] < n_fish:
missed_index = [i for i in range(len(first_tracking_data_arr)) if i not in result[0]][0]
merged_index = np.where(cost[missed_index] == np.min(cost[missed_index]))[0][0]
second_tracking_data_arr = np.append(second_tracking_data_arr, second_tracking_data_arr[merged_index]).reshape(-1, n_tracking_points, 2)
tail_curvature_arr = np.append(tail_curvature_arr, tail_curvature_arr[merged_index]).reshape(-1, n_tracking_points - 1)
second_tracking_data_arr, tail_curvature_arr, result = find_identities_tail_curvature(first_tracking_data_arr, second_tracking_data_arr, tail_curvature_arr, n_fish, n_tracking_points)
return second_tracking_data_arr, tail_curvature_arr, result
def reorder_data_for_identities_tail_points(tracking_data, n_fish, n_tracking_points, start_index = 0):
new_tracking_data = np.zeros((len(tracking_data), n_fish, n_tracking_points, 2))
for i in range(len(tracking_data) - 1):
if i == 0:
first_tracking_data_arr = tracking_data[start_index]
new_tracking_data[0] = tracking_data[start_index]
second_tracking_data_arr = tracking_data[i + 1]
new_tracking_data_arr, new_order = find_identities_tail_points(first_tracking_data_arr, second_tracking_data_arr, n_fish, n_tracking_points)
new_tracking_data[i + 1] = new_tracking_data_arr[new_order[1]]
first_tracking_data_arr = new_tracking_data[i + 1]
return new_tracking_data
def find_identities_tail_points(first_tracking_data_arr, second_tracking_data_arr, n_fish, n_tracking_points):
cost = cdist(first_tracking_data_arr[:, 0], second_tracking_data_arr[:, 0])
result = linear_sum_assignment(cost)
if second_tracking_data_arr.shape[0] < n_fish:
missed_index = [i for i in range(len(first_tracking_data_arr)) if i not in result[0]][0]
merged_index = np.where(cost[missed_index] == np.min(cost[missed_index]))[0][0]
second_tracking_data_arr = np.append(second_tracking_data_arr, second_tracking_data_arr[merged_index]).reshape(-1, n_tracking_points, 2)
second_tracking_data_arr, result = find_identities_tail_points(first_tracking_data_arr, second_tracking_data_arr, n_fish, n_tracking_points)
return second_tracking_data_arr, result
def find_tracking_errors(tracking_data, window = None):
tracking_errors = np.zeros((tracking_data.shape[:2]))
if window is None:
for time_index, fish_data in enumerate(tracking_data):
dupes = np.unique(fish_data[:, 0], axis = 0, return_counts = True)
dupe_vals = dupes[0][dupes[1] > 1]
for fish_index, fish_val in enumerate(fish_data[:, 0]):
for dupe_val in dupe_vals:
if np.array_equal(fish_val, dupe_val):
tracking_errors[time_index, fish_index] = 1
else:
for time_index, fish_data in enumerate(tracking_data[int(window / 2) : -int(window / 2)]):
dupes = np.unique(fish_data[:, 0], axis = 0, return_counts = True)
dupe_vals = dupes[0][dupes[1] > 1]
for fish_index, fish_val in enumerate(fish_data[:, 0]):
for dupe_val in dupe_vals:
if np.array_equal(fish_val, dupe_val):
tracking_errors[time_index : time_index + int(window), fish_index] = 1
return tracking_errors
def remove_tracking_errors_from_tail_curvature(tail_curvature, tracking_errors):
processed_tail_curvature = tail_curvature.copy()
processed_tail_curvature[tracking_errors == 1] = 0
return processed_tail_curvature
def load_tracking_data(folder, prefix, n_fish, n_tracking_points):
tracking_data_csv = filenames_from_folder(folder, filename_contains = [prefix, "tracking-results"], filename_ends_with = ".csv")[0]
tail_curvature_csv = filenames_from_folder(folder, filename_contains = [prefix, "tail-curvature"], filename_ends_with = ".csv")[0]
tracking_data = extract_tracking_points_from_csv(tracking_data_csv)
tail_curvature = extract_tail_curvature_from_csv(tail_curvature_csv)
tracking_data, tail_curvature = reorder_data_for_identities(tracking_data, tail_curvature, n_fish, n_tracking_points)
return tracking_data, tail_curvature
def load_image(folder, prefix, example_index):
video_path = filenames_from_folder(folder, filename_contains = [prefix, "video"], filename_ends_with = ".avi")[0]
cap = cv2.VideoCapture(video_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, example_index)
image = cap.read()[1]
cap.release()
return image
def plot_image_with_tracking_overlay(tracking_data, image, save_path = None, example_index = 0, index_factor = 1):
tracking_colours = np.array([cm.get_cmap("hsv")(plt.Normalize(0, tracking_data.shape[1])(i)) for i in range(tracking_data.shape[1])])
new_image = image.copy()
for fish_index, fish_tracking_points in enumerate(tracking_data[int(example_index * index_factor)]):
print("Fish {0} - Colour {1}".format(fish_index + 1, tracking_colours[fish_index]*255))
cv2.circle(new_image, (int(float(fish_tracking_points[0, 0])), int(float(fish_tracking_points[0,1]))), 3, tracking_colours[fish_index] * 255, 1, cv2.LINE_AA)
cv2.putText(new_image, "Fish {0}".format(fish_index + 1), (int(float(fish_tracking_points[0, 0])) + 10, int(float(fish_tracking_points[0,1])) + 10), cv2.FONT_HERSHEY_SIMPLEX, 1, tracking_colours[fish_index] * 255)
fig = plt.figure(figsize = (10, 10), dpi = 300, constrained_layout = False)
im_ax = fig.add_subplot(1, 1, 1)
im_ax.imshow(new_image, aspect = "equal")
im_ax.set_axis_off()
if save_path is not None:
plt.savefig(save_path)
plt.show()
def calculate_paths(tracking_data, paths, linewidth):
for time_index in range(tracking_data.shape[0] - 1):
for fish_index in range(tracking_data[time_index].shape[0]):
point1 = (int(tracking_data[time_index, fish_index, 0, 0]), int(tracking_data[time_index, fish_index, 0, 1]))
point2 = (int(tracking_data[time_index + 1, fish_index, 0, 0]), int(tracking_data[time_index + 1, fish_index, 0, 1]))
if point1 != point2:
paths[fish_index] = cv2.line(paths[fish_index], point1, point2, (time_index + 1) / tracking_data.shape[0] * 255, linewidth)
return paths
def plot_paths(tracking_data, image, linewidth = 1, save_path = None):
tracking_colours = np.array([cm.get_cmap("hsv")(plt.Normalize(0, tracking_data.shape[1])(i)) for i in range(tracking_data.shape[1])])
path_colours = [LinearSegmentedColormap.from_list("cmap_{0}".format(fish_index + 1), [[np.min([1, tracking_colour[0] * 1.6 + 0.8]), np.min([1, tracking_colour[1] * 1.6 + 0.8]), np.min([1, tracking_colour[2] * 1.6 + 0.8]), 1], tracking_colour, [np.max([0, tracking_colour[0] * 0.6 - 0.2]), np.max([0, tracking_colour[1] * 0.6 - 0.2]), np.max([0, tracking_colour[2] * 0.6 - 0.2]), 1]]) for fish_index, tracking_colour in enumerate(tracking_colours)]
[path_colour.set_under(color = [1, 1, 1, 0]) for path_colour in path_colours]
paths = np.zeros((tracking_colours.shape[0], image.shape[0], image.shape[1]))
paths = calculate_paths(tracking_data, paths, linewidth)
fig = plt.figure(figsize = (10, 10), dpi = 300, constrained_layout = False)
path_axes = [fig.add_subplot(1, 1, 1, label = "{0}".format(index)) for index, path in enumerate(paths)]
[path_ax.imshow(path, cmap = path_colour, origin = "upper", vmin = 0.000000000001, vmax = 255, aspect = "equal") for path_ax, path, path_colour in zip(path_axes, paths, path_colours)]
[path_ax.set_facecolor([1, 1, 1, 0]) for path_ax in path_axes]
[path_ax.set_axis_off() for path_ax in path_axes]
if save_path is not None:
plt.savefig(save_path)
plt.show()
def plot_colorbar(tracking_data, save_path = None):
colorbar_data = np.linspace(0, 255, tracking_data.shape[0])[:, np.newaxis]
fig = plt.figure(figsize = (0.5, 10), dpi = 300, constrained_layout = False)
colorbar_ax = fig.add_subplot(1, 1, 1)
colorbar_ax.imshow(colorbar_data, cmap = "gray", aspect = "auto")
colorbar_ax.set_axis_off()
if save_path is not None:
plt.savefig(save_path)
plt.show()
def plot_tail_curvature(tail_curvature, save_path = None, imaging_FPS = 332, tracking_errors = None):
fig = plt.figure(figsize = (30, 5), dpi = 300, constrained_layout = False)
gs = fig.add_gridspec(ncols = 1, nrows = tail_curvature.shape[1], hspace = -0.3)
tc_axes = [fig.add_subplot(gs[i, 0]) for i in range(tail_curvature.shape[1])]
x_vals = np.linspace(0, tail_curvature.shape[0]/imaging_FPS, tail_curvature.shape[0])
y_vals_baseline = np.zeros((x_vals.shape[0]))
tracking_colours = np.array([cm.get_cmap("hsv")(plt.Normalize(0, tail_curvature.shape[1])(i)) for i in range(tracking_data.shape[1])])
for tc_index, tc_ax in enumerate(tc_axes):
tc_ax.plot(x_vals, np.mean(tail_curvature[:, tc_index, -3:], axis = 1), color = tracking_colours[tc_index], linewidth = 1, rasterized = True)
tc_ax.plot(x_vals, y_vals_baseline, color = tracking_colours[tc_index], linewidth = 1, rasterized = True, alpha = 0.5, ls = "--")
tc_ax.set_ylim(-150, 150)
tc_ax.set_xlim(-0.01, x_vals[-1])
tc_ax.spines['top'].set_visible(False)
tc_ax.spines['right'].set_visible(False)
tc_ax.set_yticks([])
tc_ax.set_xticks([])
tc_ax.set_facecolor([1, 1, 1, 0])
if tc_index == len(tc_axes) - 1:
tc_ax.spines['bottom'].set_bounds(0, 1)
tc_ax.spines['bottom'].set_linewidth(5)
tc_ax.spines['left'].set_bounds(0, 100)
tc_ax.spines['left'].set_linewidth(5)
else:
tc_ax.spines['bottom'].set_visible(False)
tc_ax.spines['left'].set_visible(False)
if tracking_errors is not None:
tc_ax.fill_between(x_vals, -150, 150, where = tracking_errors[tc_index] == 1, color = "red", alpha = 0.5, edgecolor = [1, 1, 1, 0])
# Save the plot and show
if save_path is not None:
plt.savefig(save_path)
plt.show()
def extract_stimulus_data(csv_file):
with open(csv_file) as f:
stimulus_data = [[column for column in row if len(column) > 0] for row in csv.reader(f)]
stimulus_data = np.array([np.array([prey_data.split(",") for prey_data in data_point]).astype(float) for data_point in stimulus_data])
return stimulus_data
def load_stimulus_data(folder, prefix):
stimulus_data_csv = filenames_from_folder(folder, filename_contains = [prefix, "stimulus-data"], filename_ends_with = ".csv")[0]
stimulus_data = extract_stimulus_data(stimulus_data_csv)
return stimulus_data
def load_data_from_filenames(filenames, dtype = float):
return [np.loadtxt(filename, dtype = dtype) for filename in filenames]
def calculate_stimulus(stimulus_data, stimulus_image, example_index = 0, index_factor = 1):
stimulus_sizes = stimulus_data[:, :, 2]
stimulus_positions = np.moveaxis(np.array([stimulus_data[:, :, 0] * image.shape[0] / 2 + image.shape[0] / 2, stimulus_data[:, :, 1] * image.shape[1] / 2 + image.shape[1] / 2]), 0, -1)
for stimulus_position, stimulus_size in zip(stimulus_positions[example_index * index_factor], stimulus_sizes[example_index * index_factor]):
stimulus_image = cv2.circle(stimulus_image, (int(stimulus_position[0]), int(stimulus_position[1])), int(stimulus_size), [255, 255, 255], -1, cv2.LINE_AA)
return stimulus_image
def plot_stimulus(stimulus_data, stimulus_image, save_path = None, example_index = 0, index_factor = 1):
stimulus_image = calculate_stimulus(stimulus_data, stimulus_image, example_index, index_factor)
fig = plt.figure(figsize = (10, 10), dpi = 300, constrained_layout = False)
im_ax = fig.add_subplot(1, 1, 1)
im_ax.imshow(stimulus_image, cmap = "gray", vmin = 0, vmax = 255, aspect = "equal")
im_ax.set_axis_off()
if save_path is not None:
plt.savefig(save_path)
plt.show()
def interpolate_NaNs(data, skip_start = False, skip_end = False):
if not skip_start and np.isnan(data[0]):
data[0] = 0
if not skip_end and np.isnan(data[-1]):
data[-1] = 0
if np.isnan(data).any():
nans = np.isnan(data)
nan_indices = np.where(nans)[0]
good_indices = np.where(nans == False)[0]
data[nan_indices] = np.interp(nan_indices, good_indices, data[good_indices])
return data
def calculate_prey_yaw_angle(time, moving_prey_speed = 0.6):
return [np.arctan2(np.sin((-np.pi / 3) * np.sin(t * moving_prey_speed)), np.cos((-np.pi / 3) * np.sin(t * moving_prey_speed))) for t in time]
def calculate_velocity(pos_x, pos_y, size_of_FOV_cm = 6, image_width = 1088, imaging_FPS = 332):
return [np.hypot(np.diff(x[:np.min([len(x), len(y)])]), np.diff(y[:np.min([len(x), len(y)])])) * size_of_FOV_cm * imaging_FPS / image_width for x, y in zip(pos_x, pos_y)]
def calculate_paths2(pos_x, pos_y, image, colour, linewidth = 1, radius_threshold = 100):
for x, y in zip(pos_x, pos_y):
for i in range(len(x) - 1):
point1 = (int(x[i]), int(y[i]))
point2 = (int(x[i+1]), int(y[i+1]))
if np.sqrt((point2[0] - (image.shape[0] / 2))**2 + (point2[1] - (image.shape[1] / 2))**2) > radius_threshold:
continue
if point1 != point2:
image = cv2.line(image, point1, point2, colour, linewidth)
return image
def calculate_trajectory(x, y, image, colour, linewidth = 1, radius_threshold = 100):
for i in range(len(x) - 1):
point1 = (int(x[i]), int(y[i]))
point2 = (int(x[i+1]), int(y[i+1]))
if np.sqrt((point2[0] - (image.shape[0] / 2))**2 + (point2[1] - (image.shape[1] / 2))**2) > radius_threshold:
continue
if point1 != point2:
image = cv2.line(image, point1, point2, colour, linewidth)
return image
def threshold_trajectory(pos_x, pos_y, image, radius_threshold = 100):
thresh = [np.ones(x.shape).astype(bool) for x in pos_x]
for i, (x, y) in enumerate(zip(pos_x, pos_y)):
for j in range(len(x) - 1):
point1 = (int(x[j]), int(y[j]))
point2 = (int(x[j+1]), int(y[j+1]))
if np.sqrt((point2[0] - (image.shape[0] / 2))**2 + (point2[1] - (image.shape[1] / 2))**2) > radius_threshold:
thresh[i][j:] = False
break
return thresh
def normalize_path_data(pos_x, pos_y, heading_angle, offset_x = 200, offset_y = 200):
new_pos_x = []
new_pos_y = []
for x, y, ha in zip(pos_x, pos_y, heading_angle):
x = x - x[0]
y = y - y[0]
xnew = (x * np.cos(ha[0] + (np.pi/2)) - y * np.sin(ha[0] + (np.pi/2))) + offset_x
ynew = (x * np.sin(ha[0] + (np.pi/2)) + y * np.cos(ha[0] + (np.pi/2))) + offset_y
new_pos_x.append(xnew)
new_pos_y.append(ynew)
return new_pos_x, new_pos_y
def calculate_max_heading_angle(heading_angle):
max_heading_angle = []
for ha in heading_angle:
max_i = np.argmax(ha)
min_i = np.argmin(ha)
if np.abs(ha[max_i]) < np.abs(ha[min_i]):
max_heading_angle.append(ha[min_i])
else:
max_heading_angle.append(ha[max_i])
return max_heading_angle
def detect_peaks(data, delta, x = None):
maxtab = []
mintab = []
if x is None:
x = np.arange(len(data))
data = np.asarray(data)
if len(data) != len(x):
sys.exit('Input vectors data and x must have same length')
if not np.isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = np.Inf, -np.Inf
mnpos, mxpos = np.NaN, np.NaN
lookformax = True
for i in np.arange(len(data)):
this = data[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx-delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn+delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
maxtab = np.array(maxtab)
mintab = np.array(mintab)
if len(maxtab) > 0 and len(mintab) > 0:
results = np.array(sorted(np.concatenate([maxtab, mintab], axis = 0), key = lambda x: x[0]))
else:
results = np.ones(2) * np.nan
return results
def detect_peaks_2(data, bout_threshold):
min_value = np.Inf
max_value = -np.Inf
find_max = True
peaks = []
bout_counter = 0
bout = False
for index, value in enumerate(data):
if value > max_value:
max_value = value
if len(peaks) > 0:
if not find_max and peaks[-1] == index - 1:
peaks[-1] = index
if value < min_value:
min_value = value
if len(peaks) > 0:
if find_max and peaks[-1] == index - 1:
peaks[-1] = index
if bout:
if find_max and value > min_value + bout_threshold:
max_value = value
find_max = False
peaks.append(index)
elif not find_max and value < max_value - bout_threshold:
min_value = value
find_max = True
peaks.append(index)
else:
if value > min_value + bout_threshold or value < max_value - bout_threshold:
bout = True
if value < max_value - bout_threshold:
find_max = False
if len(peaks) > 0:
return peaks
else:
return np.nan
def detect_bouts(data, bout_threshold, window_size):
min_value = np.Inf
max_value = -np.Inf
find_max = True
bout_detected = np.zeros(len(data)).astype(bool)
bout_counter = 0
prev_value = None
bout = False
for index, value in enumerate(data):
if value > max_value:
max_value = value
if value < min_value:
min_value = value
if bout:
if not find_max and value > min_value + bout_threshold:
max_value = value
if not find_max:
find_max = True
elif find_max and value < max_value - bout_threshold:
min_value = value
if find_max:
find_max = False
if np.abs(value - prev_value) > bout_threshold:
bout_counter = 0
else:
bout_counter += 1
else:
if value > min_value + bout_threshold or value < max_value - bout_threshold:
bout = True
bout_counter = 0
if value < max_value - bout_threshold:
find_max = False
if bout_counter > window_size:
min_value = np.Inf
max_value = -np.Inf
bout = False
find_max = True
bout_counter = 0
prev_value = value
bout_detected[index] = bout
return bout_detected
def calculate_frequency_from_peaks(data, data_length, framerate):
new_data = np.zeros(data_length)
for peak0, peak1 in zip(data[:-1], data[1:]):
frequency = framerate / (2 * (peak1 - peak0))
new_data[peak0:peak1] = frequency
new_data[-1] = frequency
return new_data
def calculate_zscore(data):
return (data - np.mean(data)) / np.std(data)
def find_outlier_indices(data, zscore_threshold = np.Inf, max_val_threshold = np.Inf, min_val_threshold = -np.Inf):
zscore = calculate_zscore(data)
max_vals = data > max_val_threshold
min_vals = data < min_val_threshold
zscore_vals = zscore > zscore_threshold
outlier_indices = np.where(np.logical_or.reduce([zscore_vals, max_vals, min_vals]))[0]
return outlier_indices
def plot_tbf_heatmap(frequency, save_path = None):
fig = plt.figure(figsize = (20, 8), dpi = 300, constrained_layout = False)
gs = fig.add_gridspec(ncols = 1, nrows = 2, height_ratios= [1, 0.02], hspace = 0)
hm_ax = fig.add_subplot(gs[0, 0])
sc_ax = fig.add_subplot(gs[1, 0])
hm_ax.imshow(frequency, origin = 'upper', aspect = 'auto', interpolation = "none", vmin = 0, vmax = 40, cmap = 'inferno', rasterized = True)
hm_ax.set_axis_off()
sc_ax.set_xlim(0, frequency.shape[1])
sc_ax.set_xticks([])
sc_ax.set_yticks([])
sc_ax.spines['top'].set_visible(False)
sc_ax.spines['right'].set_visible(False)
sc_ax.spines['left'].set_visible(False)
sc_ax.spines['bottom'].set_visible(False)
# sc_ax.spines['bottom'].set_bounds(0, 10 * imaging_FPS)
# sc_ax.spines['bottom'].set_linewidth(5)
# Save the plot and show
if save_path is not None:
plt.savefig(save_path)
plt.show()
def custom_smooth_data(data, thresh):
new_data = data.copy()
for index, (val1, val2, val3) in enumerate(zip(data[:-2], data[1:-1], data[2:])):
if np.abs(val2 - np.mean([val1, val3])) > thresh:
new_data[index + 1] = np.nan
# filter = np.array([1, 10, 1])
# filter2 = np.array([1, 10, 10, 1])
# indices = np.abs(np.convolve(data, filter / np.sum(filter), mode = "same")) + np.abs(np.convolve(data, filter2 / np.sum(filter2), mode = "same")) > thresh
# new_data[indices] = np.nan
return interpolate_NaNs(new_data)
def load_timestamped_data(file, timestamp_index = 0, format = "ms", skip_first_row = False):
# try:
data = np.genfromtxt(file, dtype = str)
n_indices = len(data[0].split(","))
if timestamp_index >= n_indices:
print("Error! Timestamp index is greater than the number of columns in the data.")
raise Exception
if timestamp_index == -1:
timestamp_index = n_indices - 1
if skip_first_row:
timestamps = np.array([val.split(",")[timestamp_index].split("T")[1].split("-")[0].split(":") for val in data[1:]]).astype(float)
else:
timestamps = np.array([val.split(",")[timestamp_index].split("T")[1].split("-")[0].split(":") for val in data]).astype(float)
value_indices = [index for index in range(n_indices) if index != timestamp_index]
if skip_first_row:
values = np.array([np.array(val.split(","))[value_indices] for val in data[1:]])
else:
values = np.array([np.array(val.split(","))[value_indices] for val in data])
return convert_timestamps(timestamps, format), values
# except:
# print("Error processing file: {0}".format(file))
# return
def convert_timestamps(timestamps, format = "ms"):
assert format == "ms" or format == "s" or format == "m" or format == "h", "Argument format should be one of the following: ms, s, m, or h."
if format == "ms":
timestamps = (timestamps[:,0] * 60 * 60 * 1000) + (timestamps[:,1] * 60 * 1000) + (timestamps[:,2] * 1000)
if format == "s":
timestamps = (timestamps[:,0] * 60 * 60) + (timestamps[:,1] * 60) + timestamps[:,2]
if format == "m":
timestamps = (timestamps[:,0] * 60) + timestamps[:,1] + (timestamps[:,2] / 60)
if format == "h":
timestamps = timestamps[:,0] + (timestamps[:,1] / 60) + (timestamps[:,2] / (60 * 60))
return timestamps
def normalize_data(data):
return (data - np.nanmin(data)) / (np.nanmax(data) - np.nanmin(data))
def order_data_by_timestamps(data, timestamps):
ordered_timestamps = np.unique([val for arr in timestamps for val in arr])
ordered_data = np.ones((len(data), ordered_timestamps.shape[0])) * np.nan
for i in range(ordered_data.shape[0]):
j = 0
for k in range(ordered_data.shape[1]):
if timestamps[i][j] <= ordered_timestamps[k]:
if j == len(timestamps[i]) - 1:
ordered_data[i, k:] = data[i][j]
break
else:
j += 1
if j > 0:
ordered_data[i, k] = data[i][j-1]
return ordered_data, ordered_timestamps - ordered_timestamps[0]
def order_data_by_timestamps_with_NaNs(data, timestamps):
ordered_timestamps = np.unique([val for arr in timestamps for val in arr])
ordered_data = np.ones((len(data), ordered_timestamps.shape[0])) * np.nan
for i in range(ordered_data.shape[0]):
j = 0
for k in range(ordered_data.shape[1]):
if timestamps[i][j] <= ordered_timestamps[k]:
if j == len(timestamps[i]) - 1:
ordered_data[i, k] = data[i][j]
break
else:
ordered_data[i, k] = data[i][j]
j += 1
return ordered_data, ordered_timestamps - ordered_timestamps[0]
# Old 1d closed-loop data analysis
def insert_neg_val_reshape(arr):
# Insert a negative value into array at fixed intervals and reshape array
return np.insert(arr, np.arange(4, len(arr) + 1, 4), -1).reshape(-1, 50)
def load_data(folders, exclude = None):
# Get the list of filenames from each of the folders and sort them based on time of acquiring data
phase_csvs = [filenames_from_folder(folder, filename_contains = ["_phase"], filename_ends_with = ".csv", filename_does_not_contain = exclude) for folder in folders]
phase_csvs = [i for list in phase_csvs for i in list]
phase_csvs = list(sorted(phase_csvs, key = lambda x: [int(x.split("\\")[-1].split("_")[0].split("-")[0]), int(x.split("\\")[-1].split("_")[0].split("-")[1]), int(x.split("\\")[-1].split("_")[0].split("-")[2]), int(x.split("\\")[-1][x.split("\\")[-1].index("fish"):].split("_")[0].split("-")[1])]))
trial_num_csvs = [filenames_from_folder(folder, filename_contains = ["_trial-num"], filename_ends_with = ".csv", filename_does_not_contain = exclude) for folder in folders]
trial_num_csvs = [i for list in trial_num_csvs for i in list]
trial_num_csvs = list(sorted(trial_num_csvs, key = lambda x: [int(x.split("\\")[-1].split("_")[0].split("-")[0]), int(x.split("\\")[-1].split("_")[0].split("-")[1]), int(x.split("\\")[-1].split("_")[0].split("-")[2]), int(x.split("\\")[-1][x.split("\\")[-1].index("fish"):].split("_")[0].split("-")[1])]))
tail_curvature_csvs = [filenames_from_folder(folder, filename_contains = ["_trial", "tail-curvature"], filename_ends_with = ".csv", filename_does_not_contain = exclude) for folder in folders]
tail_curvature_csvs = [i for list in tail_curvature_csvs for i in list]
tail_curvature_csvs = list(sorted(tail_curvature_csvs, key = lambda x: [int(x.split("\\")[-1].split("_")[0].split("-")[0]), int(x.split("\\")[-1].split("_")[0].split("-")[1]), int(x.split("\\")[-1].split("_")[0].split("-")[2]), int(x.split("\\")[-1][x.split("\\")[-1].index("fish"):].split("_")[0].split("-")[1]), int(x.split("\\")[-1][x.split("\\")[-1].index("trial"):].split("_")[0].split("-")[1])]))
tail_kinematics_csvs = [filenames_from_folder(folder, filename_contains = ["_trial", "tail-kinematics"], filename_ends_with = ".csv", filename_does_not_contain = exclude) for folder in folders]
tail_kinematics_csvs = [i for list in tail_kinematics_csvs for i in list]
tail_kinematics_csvs = list(sorted(tail_kinematics_csvs, key = lambda x: [int(x.split("\\")[-1].split("_")[0].split("-")[0]), int(x.split("\\")[-1].split("_")[0].split("-")[1]), int(x.split("\\")[-1].split("_")[0].split("-")[2]), int(x.split("\\")[-1][x.split("\\")[-1].index("fish"):].split("_")[0].split("-")[1]), int(x.split("\\")[-1][x.split("\\")[-1].index("trial"):].split("_")[0].split("-")[1])]))
# Calculate variables
phase = np.array([np.diff(np.genfromtxt(phase_csv, delimiter = ",")) for phase_csv in phase_csvs])
trial_num = np.array([np.genfromtxt(trial_num_csv, delimiter = ",") for trial_num_csv in trial_num_csvs])
trial_indices = np.array([np.append(np.where(np.diff(trial_num_i))[0], len(trial_num_i) - 1).reshape(-1, 2) for trial_num_i in trial_num])
tail_curvature = np.array([np.genfromtxt(tail_curvature_csv, delimiter = ",") for tail_curvature_csv in tail_curvature_csvs])
tail_kinematics = np.array([np.genfromtxt(tail_kinematics_csv, delimiter = ",", dtype = (float, float, bool)) for tail_kinematics_csv in tail_kinematics_csvs])
frequency = np.array([np.array([value[0] for value in trial_tail_kinematics]) for trial_tail_kinematics in tail_kinematics])
amplitude = np.array([np.array([value[1] for value in trial_tail_kinematics]) for trial_tail_kinematics in tail_kinematics])
instance = np.array([np.array([value[2] for value in trial_tail_kinematics]).astype(int) for trial_tail_kinematics in tail_kinematics])
# Get the order in which gain values were presented based on the gain value specified in the filename
gain_values = np.array([float(x.split("\\")[-1][x.split("\\")[-1].index("gain"):].split("_")[0].split("-")[1]) for x in tail_curvature_csvs])
# Insert a value of -1 after every 4 trials to represent the timeout period between successive blocks of trials
tail_curvature = insert_neg_val_reshape(tail_curvature)
frequency = insert_neg_val_reshape(frequency)
amplitude = insert_neg_val_reshape(amplitude)
instance = insert_neg_val_reshape(instance)
gain_values = insert_neg_val_reshape(gain_values)
return [phase_csvs, phase, trial_num, trial_indices, tail_curvature, frequency, amplitude, instance, gain_values]
def calculate_bout_indices_sum_instances_bout_detected(instance, frequency, amplitude, use_amplitude_exclusion = False):
all_bout_start_indices = [[np.array([]) for j in i] for i in np.zeros(instance.shape)]
all_bout_end_indices = [[np.array([]) for j in i] for i in np.zeros(instance.shape)]
sum_instances = np.zeros(instance.shape)
bout_detected_in_trial = np.zeros(instance.shape)
for fish_index, (fish_instance, fish_frequency, fish_amplitude) in enumerate(zip(instance, frequency, amplitude)):
for trial_index, (trial_instance, trial_frequency, trial_amplitude) in enumerate(zip(fish_instance, fish_frequency, fish_amplitude)):
diff_instances = np.diff(trial_instance.astype(int))
diff_instances = np.insert(diff_instances, 0, 0)
bout_start_indices = np.where(diff_instances == 1)[0]
bout_end_indices = np.where(diff_instances == -1)[0]
if bout_start_indices.shape[0] > 0 and bout_end_indices.shape[0] > 0:
if bout_end_indices[0] < bout_start_indices[0]:
bout_end_indices = np.delete(bout_end_indices, 0)
if bout_start_indices.shape[0] == bout_end_indices.shape[0] + 1:
bout_start_indices = np.delete(bout_start_indices, bout_start_indices.shape[0] - 1)
if bout_start_indices.shape[0] != bout_end_indices.shape[0]:
print("Error!")
continue
false_bouts = []
for k, (start_index, end_index) in enumerate(zip(bout_start_indices, bout_end_indices)):
if np.sum(trial_frequency[start_index:end_index]) == 0:
if use_amplitude_exclusion:
if np.max(trial_amplitude[start_index:end_index]) == np.min(trial_amplitude[start_index:end_index]):
false_bouts.append(k)
else:
false_bouts.append(k)
if len(false_bouts) > 0:
false_bouts = np.array(false_bouts)
bout_start_indices = np.delete(bout_start_indices, false_bouts)
bout_end_indices = np.delete(bout_end_indices, false_bouts)
all_bout_start_indices[fish_index][trial_index] = bout_start_indices
all_bout_end_indices[fish_index][trial_index] = bout_end_indices
n_bouts_detected = bout_start_indices.shape[0]
sum_instances[fish_index][trial_index] = n_bouts_detected
if n_bouts_detected > 0:
bout_detected_in_trial[fish_index][trial_index] = 1
return [np.array(all_bout_start_indices), np.array(all_bout_end_indices), sum_instances, bout_detected_in_trial]
def calculate_bout_durations_interbout_durations_latency_mean_tail_beat_frequency(sum_instances, frequency, bout_start_indices, bout_end_indices):
bout_durations = [[np.array([]) for j in i] for i in np.zeros(sum_instances.shape)]
interbout_durations = [[np.array([]) for j in i] for i in np.zeros(sum_instances.shape)]
latencies = [[np.array([]) for j in i] for i in np.zeros(sum_instances.shape)]
mean_tail_beat_frequencies = [[np.array([]) for j in i] for i in np.zeros(sum_instances.shape)]
for fish_index, fish_sum_instance in enumerate(sum_instances):
for index, sum_instance in enumerate(fish_sum_instance):
if sum_instance > 0:
bout_durations[fish_index][index] = (bout_end_indices[fish_index][index] - bout_start_indices[fish_index][index])
latencies[fish_index][index] = np.array([bout_start_indices[fish_index][index][0]])
mean_tail_beat_frequencies[fish_index][index] = np.array([np.mean(frequency[fish_index][index][bout_start_index:bout_end_index]) for bout_start_index, bout_end_index in zip(bout_start_indices[fish_index][index], bout_end_indices[fish_index][index])])
if sum_instance > 1:
interbout_durations[fish_index][index] = bout_start_indices[fish_index][index][1:] - bout_end_indices[fish_index][index][:-1]
return [np.array(bout_durations), np.array(interbout_durations), np.array(latencies), np.array(mean_tail_beat_frequencies)]
def sort_arr_by_gain(arr, gain_values):
return np.array([np.array([np.array([trial_arr for index, trial_arr in enumerate(fish_arr) if gain_value[index] == gain_value_index]) for fish_arr, gain_value in zip(arr, gain_values)]) for gain_value_index in np.arange(0, 2, 0.5)])
def get_index_of_file_basename(file_basename, phase_csvs):
return [i for i in range(len(phase_csvs)) if file_basename in phase_csvs[i]][0]
def generate_highlighted_regions(phase, trial_indices, gain_values):
regions_to_highlight = np.ones(phase.shape[0]) * -1
for trial_index, gain_value in zip(trial_indices, gain_values):
regions_to_highlight[trial_index[0]:trial_index[1]] = gain_value
return regions_to_highlight
def calculate_variables(data, imaging_FPS = 332):
# Create useful variable names
phase_csvs, phase, trial_num, trial_indices, tail_curvature, frequency, amplitude, instance, gain_values = data
# Process data and calculate new variables
sorted_tail_curvature = sort_arr_by_gain(tail_curvature, gain_values)
sorted_frequency = sort_arr_by_gain(frequency, gain_values)
sorted_amplitude = sort_arr_by_gain(amplitude, gain_values)
sorted_instance = sort_arr_by_gain(instance, gain_values)
bout_indices_sum_instances_bout_detected = np.array([calculate_bout_indices_sum_instances_bout_detected(gain_instance, gain_frequency, gain_amplitude) for gain_instance, gain_frequency, gain_amplitude in zip(sorted_instance, sorted_frequency, sorted_amplitude)])
bout_durations_interbout_durations_latency_mean_tail_beat_frequency = np.array([calculate_bout_durations_interbout_durations_latency_mean_tail_beat_frequency(gain_bout_indices_sum_instances_bout_detected[2], gain_frequency, gain_bout_indices_sum_instances_bout_detected[0], gain_bout_indices_sum_instances_bout_detected[1]) for gain_frequency, gain_bout_indices_sum_instances_bout_detected in zip(sorted_frequency, bout_indices_sum_instances_bout_detected)])
bout_start_indices, bout_end_indices, sum_instances, bout_detected = np.swapaxes(bout_indices_sum_instances_bout_detected, 0, 1)
bout_durations, interbout_durations, latency, mean_tail_beat_frequency = np.swapaxes(bout_durations_interbout_durations_latency_mean_tail_beat_frequency, 0, 1)
bout_durations = bout_durations / imaging_FPS * 1000
interbout_durations = interbout_durations / imaging_FPS
latency = latency / imaging_FPS
return [bout_start_indices, bout_end_indices, sum_instances, bout_detected, bout_durations, interbout_durations, latency, mean_tail_beat_frequency]
def calculate_means_and_sems(variables):
# Create useful variable names
bout_start_indices, bout_end_indices, sum_instances, bout_detected, bout_durations, interbout_durations, latency, max_tail_beat_frequency = variables
# Calculate means and sems
mean_n_bouts = np.mean(np.mean(sum_instances, axis = 2), axis = 1)
sem_n_bouts = np.array([stats.sem(gain_sum_instances) for gain_sum_instances in np.mean(sum_instances, axis = 2)])
mean_bout_durations = np.mean(np.array([[np.mean(np.concatenate(fish_bout_durations.ravel())) for fish_bout_durations in gain_bout_durations] for gain_bout_durations in bout_durations]), axis = 1)
sem_bout_durations = np.array([stats.sem([np.mean(np.concatenate(fish_bout_durations.ravel())) for fish_bout_durations in gain_bout_durations]) for gain_bout_durations in bout_durations])
mean_interbout_durations = np.mean(np.array([[np.mean(np.concatenate(fish_interbout_durations.ravel())) for fish_interbout_durations in gain_interbout_durations] for gain_interbout_durations in interbout_durations]), axis = 1)
sem_interbout_durations = np.array([stats.sem([np.mean(np.concatenate(fish_interbout_durations.ravel())) for fish_interbout_durations in gain_interbout_durations]) for gain_interbout_durations in interbout_durations])
mean_latency = np.mean(np.array([[np.mean(np.concatenate(fish_latency.ravel())) for fish_latency in gain_latency] for gain_latency in latency]), axis = 1)
sem_latency = np.array([stats.sem([np.mean(np.concatenate(fish_latency.ravel())) for fish_latency in gain_latency]) for gain_latency in latency])
mean_max_tail_beat_frequency = np.mean(np.array([[np.mean(np.concatenate(fish_max_tail_beat_frequency.ravel())) for fish_max_tail_beat_frequency in gain_max_tail_beat_frequency] for gain_max_tail_beat_frequency in max_tail_beat_frequency]), axis = 1)
sem_max_tail_beat_frequency = np.array([stats.sem([np.mean(np.concatenate(fish_max_tail_beat_frequency.ravel())) for fish_max_tail_beat_frequency in gain_max_tail_beat_frequency]) for gain_max_tail_beat_frequency in max_tail_beat_frequency])
return [mean_n_bouts, sem_n_bouts, mean_bout_durations, sem_bout_durations, mean_interbout_durations, sem_interbout_durations, mean_latency, sem_latency, mean_max_tail_beat_frequency, sem_max_tail_beat_frequency]
def plot_results(results, save_path = None):
mean_n_bouts, sem_n_bouts, mean_bout_durations, sem_bout_durations, mean_interbout_durations, sem_interbout_durations, mean_latency, sem_latency, mean_max_tail_beat_frequency, sem_max_tail_beat_frequency = results
fig = plt.figure(figsize = (20, 4))
gs = fig.add_gridspec(ncols = 5, nrows = 1, wspace = 0.2)
n_bouts_ax = fig.add_subplot(gs[0, 0])
bout_duration_ax = fig.add_subplot(gs[0, 1])
interbout_duration_ax = fig.add_subplot(gs[0, 2])
latency_ax = fig.add_subplot(gs[0, 3])
max_tbf_ax = fig.add_subplot(gs[0, 4])
n_bouts_ax.errorbar(range(len(mean_n_bouts)), mean_n_bouts, sem_n_bouts, color = "blue", capsize = 10, elinewidth = 4, capthick = 1, ecolor = "black", linewidth = 6, markerfacecolor = "white", marker = "o", markersize = 20, markeredgewidth = 4)
bout_duration_ax.errorbar(range(len(mean_bout_durations)), mean_bout_durations, sem_bout_durations, color = "blue", capsize = 10, elinewidth = 4, capthick = 1, ecolor = "black", linewidth = 6, markerfacecolor = "white", marker = "o", markersize = 20, markeredgewidth = 4)
interbout_duration_ax.errorbar(range(len(mean_interbout_durations)), mean_interbout_durations, sem_interbout_durations, color = "blue", capsize = 10, elinewidth = 4, capthick = 1, ecolor = "black", linewidth = 6, markerfacecolor = "white", marker = "o", markersize = 20, markeredgewidth = 4)
latency_ax.errorbar(range(len(mean_latency)), mean_latency, sem_latency, color = "blue", capsize = 10, elinewidth = 4, capthick = 1, ecolor = "black", linewidth = 6, markerfacecolor = "white", marker = "o", markersize = 20, markeredgewidth = 4)
max_tbf_ax.errorbar(range(len(mean_max_tail_beat_frequency)), mean_max_tail_beat_frequency, sem_max_tail_beat_frequency, color = "blue", capsize = 10, elinewidth = 4, capthick = 1, ecolor = "black", linewidth = 6, markerfacecolor = "white", marker = "o", markersize = 20, markeredgewidth = 4)
# Save the plot and show
if save_path is not None:
plt.savefig(save_path)
plt.show()
def calculate_ANOVA(variables):
bout_start_indices, bout_end_indices, sum_instances, bout_detected, bout_durations, interbout_durations, latency, max_tbf = variables
print("ANOVA RESULTS\n=============\n")
ANOVA_sum_instances = np.mean(sum_instances, axis = 2)
print("N bouts: {}".format(stats.f_oneway(ANOVA_sum_instances[0], ANOVA_sum_instances[1], ANOVA_sum_instances[2], ANOVA_sum_instances[3])))
ANOVA_bout_durations = np.array([[np.mean(np.concatenate(fish_bout_durations.ravel())) for fish_bout_durations in gain_bout_durations] for gain_bout_durations in bout_durations])
print("Bout durations: {}".format(stats.f_oneway(ANOVA_bout_durations[0], ANOVA_bout_durations[1], ANOVA_bout_durations[2], ANOVA_bout_durations[3])))
ANOVA_interbout_durations = np.array([[np.mean(np.concatenate(fish_interbout_durations.ravel())) for fish_interbout_durations in gain_interbout_durations] for gain_interbout_durations in interbout_durations])
print("Interbout durations: {}".format(stats.f_oneway(ANOVA_interbout_durations[0], ANOVA_interbout_durations[1], ANOVA_interbout_durations[2], ANOVA_interbout_durations[3])))
ANOVA_latency = np.array([[np.mean(np.concatenate(fish_latency.ravel())) for fish_latency in gain_latency] for gain_latency in latency])
print("Latency: {}".format(stats.f_oneway(ANOVA_latency[0], ANOVA_latency[1], ANOVA_latency[2], ANOVA_latency[3])))
ANOVA_tbf = np.array([[np.mean(np.concatenate(fish_tbf.ravel())) for fish_tbf in gain_tbf] for gain_tbf in max_tbf])
print("Max Tail Beat Frequency: {}".format(stats.f_oneway(ANOVA_tbf[0], ANOVA_tbf[1], ANOVA_tbf[2], ANOVA_tbf[3])))
def calculate_pairwise_tukeyhsd(variables):
bout_start_indices, bout_end_indices, sum_instances, bout_detected, bout_durations, interbout_durations, latency, max_tbf = variables
print("Tukey Post-Hoc Comparisons\n======================\n")
group_labels = np.repeat(np.array(["Gain 0.0", "Gain 0.5", "Gain 1.0", "Gain 1.5"]), sum_instances.shape[1]).ravel()
tukeyhsd_sum_instances = np.mean(sum_instances, axis = 2).ravel().astype(float)
print("N Bouts: {}".format(pairwise_tukeyhsd(tukeyhsd_sum_instances, group_labels).summary()))
tukeyhsd_bout_durations = np.array([[np.mean(np.concatenate(fish_bout_durations.ravel())) for fish_bout_durations in gain_bout_durations] for gain_bout_durations in bout_durations]).ravel()
print("Bout Durations: {}".format(pairwise_tukeyhsd(tukeyhsd_bout_durations, group_labels).summary()))
tukeyhsd_interbout_durations = np.array([[np.mean(np.concatenate(fish_interbout_durations.ravel())) for fish_interbout_durations in gain_interbout_durations] for gain_interbout_durations in interbout_durations]).ravel()
print("Interbout Durations: {}".format(pairwise_tukeyhsd(tukeyhsd_interbout_durations, group_labels).summary()))
tukeyhsd_latency = np.array([[np.mean(np.concatenate(fish_latency.ravel())) for fish_latency in gain_latency] for gain_latency in latency]).ravel()
print("Latency: {}".format(pairwise_tukeyhsd(tukeyhsd_latency, group_labels).summary()))
tukeyhsd_tbf = np.array([[np.mean(np.concatenate(fish_tbf.ravel())) for fish_tbf in gain_tbf] for gain_tbf in max_tbf]).ravel()
print("Max Tail Beat Frequency: {}".format(pairwise_tukeyhsd(tukeyhsd_tbf, group_labels).summary()))
def calculate_1d_closed_loop_data(ordered_data, ordered_timestamps, gain_value, exclude_short_bouts = False, short_bout_threshold = None):
trial_starts = np.where(np.diff(np.logical_and(ordered_data[0] == 1, ordered_data[1] == gain_value).astype(int)) == 1)[0]
trial_ends = np.where(np.diff(np.logical_and(ordered_data[0] == 1, ordered_data[1] == gain_value).astype(int)) == -1)[0]
time_starts = [ordered_timestamps[start] for start in trial_starts]
time_ends = [ordered_timestamps[end] for end in trial_ends]
instances = [ordered_data[6][start:end] for i, (start, end) in enumerate(zip(trial_starts, trial_ends)) if time_ends[i]-time_starts[i]>29]
frequency = [ordered_data[5][start:end] for i, (start, end) in enumerate(zip(trial_starts, trial_ends)) if time_ends[i]-time_starts[i]>29]
bout_starts = [np.where(np.diff(inst) == 1)[0] if len(np.where(np.diff(inst) == 1)[0]) > 0 else np.nan for inst in instances]
bout_ends = [np.where(np.diff(inst) == -1)[0] if len(np.where(np.diff(inst) == -1)[0]) > 0 else np.nan for inst in instances]
for i, (starts, ends) in enumerate(zip(bout_starts, bout_ends)):
if len(starts) > 0 and len(ends) > 0:
if ends[0] < starts[0]:
bout_ends[i] = np.delete(bout_ends[i], 0)
if starts[-1] > ends[-1]:
bout_starts[i] = np.delete(bout_starts[i], -1)
if exclude_short_bouts and short_bout_threshold is not None:
for i, (starts, ends) in enumerate(zip(bout_starts, bout_ends)):
exclusion_indices = []
for j, (start, end) in enumerate(zip(starts, ends)):
if end-start < short_bout_threshold:
exclusion_indices.append(j)
bout_starts[i] = np.delete(bout_starts[i], exclusion_indices)
bout_ends[i] = np.delete(bout_ends[i], exclusion_indices)
n_trials = len(trial_starts)
n_bouts = [len(start) if not np.isnan(start) else np.nan for start in bout_starts]
responsive_trials = np.sum([1 for val in n_bouts if val != 0 or not np.isnan(val)])
durations = [(end-start)*1000/332 for start, end in zip(bout_starts, bout_ends)]
interbout_durations = [(start[1:]-end[:-1])*1000/332 for start, end in zip(bout_starts, bout_ends)]
mean_tbf = [[np.mean(frequency[i][bout_start:bout_end]) for bout_start, bout_end in zip(trial_starts, trial_ends)] for i, (trial_starts, trial_ends) in enumerate(zip(bout_starts, bout_ends))]
n_bouts = np.mean(n_bouts)
durations = np.mean([np.mean(val) for val in durations if len(val) > 0]) if len([np.mean(val) for val in durations if len(val) > 0]) > 0 else np.nan
interbout_durations = np.mean([np.mean(val) for val in interbout_durations if len(val) > 0]) if len([np.mean(val) for val in interbout_durations if len(val) > 0]) > 0 else np.nan
mean_tbf = np.mean([np.mean(val) for val in mean_tbf if len(val) > 0]) if len([np.mean(val) for val in mean_tbf if len(val) > 0]) else np.nan
return [n_bouts, durations, interbout_durations, mean_tbf, n_trials, responsive_trials]
def remove_NaNs_from_data(data):
return [val for val in data if not np.isnan(val)]
|
#Develop a program that demonstrates the use of cloudmesh.common.Shell.
from cloudmesh.common import Shell
if __name__ == "__main__":
result = Shell.execute("dir")
print(result) |
import time
from nose.plugins.attrib import attr
from dxlclient import ResponseCallback, UuidGenerator, ServiceRegistrationInfo, Request
from dxlclient.test.base_test import BaseClientTest
from dxlclient.test.test_service import TestService
class AsyncCallbackTimeoutTest(BaseClientTest):
@attr('manual')
def test_execute_async_callback_timeout(self):
# TODO: Set SYSPROP_ASYNC_CALLBACK_CHECK_INTERVAL = 10000 when it is available
def resp_callback():
pass
cb = ResponseCallback()
cb.on_response = resp_callback
with self.create_client() as client:
client.connect()
req_topic = UuidGenerator.generate_id_as_string()
missing_topic = UuidGenerator.generate_id_as_string()
test_service = TestService(client, 1)
def empty_on_request(request):
pass
test_service.on_request = empty_on_request
reg_info = ServiceRegistrationInfo(client, "async_callback_test_service")
reg_info.add_topic(req_topic, test_service)
# Register the service
client.register_service_sync(reg_info, self.DEFAULT_TIMEOUT)
async_req = Request(destination_topic=req_topic)
client.async_request(async_req, cb) # TODO: Use the method with timeout when is will available
for i in range(0, 10):
req = Request(destination_topic=req_topic)
client.async_request(req, cb) # TODO: Use the updated method with timeout when it is available
req_for_error = Request(destination_topic=missing_topic)
client.async_request(req_for_error)
async_callback_count = client._get_async_callback_count()
self.assertEquals(11, async_callback_count)
for i in range(0, 20):
print "asyncCallbackCount = " + str(client._get_async_callback_count())
time.sleep(1)
req = Request(destination_topic=req_topic)
client.async_request(req, cb)
self.assertEquals(1, async_callback_count)
# TODO: Restore the value of SYSPROP_ASYNC_CALLBACK_CHECK_INTERVAL
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-25 14:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('insta', '0008_auto_20180725_1214'),
]
operations = [
migrations.AddField(
model_name='image',
name='image_comments',
field=models.ManyToManyField(default=False, to='insta.Profile'),
),
]
|
from vee.pipeline.base import PipelineStep
class DeferredStep(PipelineStep):
factory_priority = 9999
@classmethod
def factory(cls, step, pkg):
if pkg.url.startswith('deferred:'):
return cls()
def init(self, pkg):
pass
|
import tensorflow as tf
from .var_layer import VarLayer
class FC(VarLayer):
def __init__(self, in_channels, out_channels, dropout=None, **kwargs):
self.dropout = dropout
super(FC, self).__init__(
weight_shape=[in_channels, out_channels],
bias_shape=[out_channels],
**kwargs)
def _call(self, inputs):
in_channels = self.vars['weights'].get_shape()[0].value
outputs = tf.reshape(inputs, [-1, in_channels])
if self.dropout is not None:
outputs = tf.nn.dropout(outputs, 1 - self.dropout)
outputs = tf.matmul(outputs, self.vars['weights'])
if self.bias:
outputs = tf.nn.bias_add(outputs, self.vars['bias'])
return self.act(outputs)
|
import re
from rdflib import Graph, Literal, XSD, RDF
"""
Post processing functions. The JSON-LD pre-processor is model independent -- the transformations, while having certain
intimate knowledge of the FHIR resource structure, do not draw on individual schema definitions.
The post processor handles context specific information that has been injected in the JSON-LD transformation process
and uses it to tweak context sensitive fields
"""
# The following Regular expressions come from the FHIR datatypes model
gYear_re = re.compile(r'([0-9]([0-9]([0-9][1-9]|[1-9]0)|[1-9]00)|[1-9]000)$')
gYearMonth_re = re.compile(r'([0-9]([0-9]([0-9][1-9]|[1-9]0)|[1-9]00)|[1-9]000)(-(0[1-9]|1[0-2]))$')
date_re = re.compile(r'([0-9]([0-9]([0-9][1-9]|[1-9]0)|[1-9]00)|[1-9]000)(-(0[1-9]|1[0-2])(-(0[1-9]|[1-2][0-9]|3[0-1]))?)?$')
dateTime_re = re.compile(r'([0-9]([0-9]([0-9][1-9]|[1-9]0)|[1-9]00)|[1-9]000)'
r'(-(0[1-9]|1[0-2])(-(0[1-9]|[1-2][0-9]|3[0-1])'
r'(T([01][0-9]|2[0-3]):[0-5][0-9]:([0-5][0-9]|60)(\.[0-9]+)?'
r'(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00))))?)$')
time_re = re.compile(r'([01][0-9]|2[0-3]):[0-5][0-9]:([0-5][0-9]|60)(\.[0-9]+)?$')
def test_re():
assert gYear_re.match('2018')
assert gYear_re.match('0001')
assert not gYear_re.match('2018-12')
def post_process(g: Graph) -> Graph:
for s, p, o in list(g):
# Massage date time tags
if isinstance(o, Literal) and o.datatype == XSD.dateTime:
dt = None
date_str = str(o)
if gYear_re.match(date_str):
dt = XSD.gYear
elif gYearMonth_re.match(date_str):
dt = XSD.gYearMonth
elif date_re.match(date_str):
dt = XSD.date
if dt:
g.remove((s, p, o))
g.add((s, p, Literal(str(o), datatype=dt)))
return g
if __name__ == '__main__':
g = Graph()
g.add( (RDF.subject, RDF.predicate, Literal('2004', datatype=XSD.dateTime)))
for t in post_process(g):
print(str(t))
|
import time
import ast
import matplotlib.pyplot as plt
import numpy as np
with open('C:/Git/dyn_fol_times2', 'r') as f:
data = f.read().split('\n')
times = []
for line in data:
try:
if line == '':
continue
line = '{' + line.replace('one_it', '"one_it"').replace('mpc_id', '"mpc_id"') + '}'
# print(line)
times.append(ast.literal_eval(line))
except:
pass
|
# proxy module
from pyface.image_resource import *
|
import re
from typing import List, Dict, Set
def part_one(lines: List[str]) -> int:
all_ingredients, all_allergens, ingredient_allergen_map = process(lines)
count: int = 0
allergens: Set[str] = set()
for allergen in ingredient_allergen_map.values():
allergens |= allergen
safe_ingredients: Set[str] = set(all_ingredients) - allergens
for ingredient in all_ingredients:
if ingredient in safe_ingredients:
count += 1
return count
def process(lines: List[str]) -> [List[str], Set[str]]:
all_ingredients: List[str] = []
all_allergens: List[str] = []
ingredient_allergen_map: Dict[str, Set[str]] = dict()
pattern = re.compile('(?P<ingredients>.*)\s+\(contains\s+(?P<allergens>.*)\)')
for line in lines:
match = pattern.match(line)
ingredients: List[str] = match.groupdict()['ingredients'].split()
all_ingredients.extend(ingredients)
allergens: List[str] = match.groupdict()['allergens'].replace(',', '').split()
all_allergens.extend(allergens)
for allergen in allergens:
existing_ingredients: Set[str] = ingredient_allergen_map.get(allergen)
if existing_ingredients is not None:
ingredient_allergen_map[allergen] = set(ingredients) & existing_ingredients
else:
ingredient_allergen_map[allergen] = set(ingredients)
return all_ingredients, all_allergens, ingredient_allergen_map
def part_two(lines: List[str]) -> str:
all_ingredients, all_allergens, ingredient_allergen_map = process(lines)
lens = [len(item) for item in ingredient_allergen_map.values()]
while max(lens) > 1:
for allergen in ingredient_allergen_map.keys():
if len(ingredient_allergen_map[allergen]) == 1:
continue
for other in ingredient_allergen_map.keys():
if len(ingredient_allergen_map[allergen]) == 1:
break
if allergen == other:
continue
if len(ingredient_allergen_map[other]) == 1:
ingredient_allergen_map[allergen] -= ingredient_allergen_map[other]
lens = [len(item) for item in ingredient_allergen_map.values()]
allergens: List[str] = list(set(all_allergens))
allergens.sort()
canonical_list: List[str] = []
for key in allergens:
canonical_list.append(list(ingredient_allergen_map.get(key))[0])
return ",".join(canonical_list)
with open('input.txt', 'r') as file:
lines: List[str] = file.readlines()
print(part_one(lines))
print(part_two(lines))
|
import click
import os
import json
# from pyfiglet import Figlet
# f = Figlet(font='big')
# print(f.renderText('Android JS'))
import time
import sys
CWD = os.getcwd()
@click.group()
def cli():
pass
@cli.command()
@click.option('--app-name', prompt='App Name ? ', help='New App Name')
@click.option('--force', is_flag=True, default=False, help='Force to reload resources.')
@click.option('--debug', is_flag=True, default=False, help='Enable debug logs.')
def init(app_name, force, debug):
click.echo('Creating new project: ' + app_name)
click.echo(CWD)
@cli.command()
@click.option('--force', is_flag=True, default=False, help='Force to reload resources.')
@click.option('--debug', is_flag=True, default=False, help='Enable debug logs.')
@click.option('--release', is_flag=True, default=False, help='Build Apk in release mode.')
def build(force, debug, release):
click.echo('Building project')
if os.path.exists(os.path.join(CWD, 'test-data.json')):
package = {}
with open('./test-data.json') as pkg:
package = json.load(pkg)
print(package)
else:
print("package.json not found")
@cli.command()
@click.option('--force', is_flag=True, default=False, help='Force to reload resources.')
@click.option('--debug', is_flag=True, default=False, help='Enable debug logs.')
def update(force, debug):
click.echo('Update sdk')
if __name__ == "__main__":
cli() |
# coding: UTF-8
from .parser import ShiftReduceParser
|
import json
import pymongo
import sys
def connect_to_db_collection(db_name, collection_name):
'''
Return collection of a given database name and collection name
'''
connection = pymongo.Connection('localhost', 27017)
db = connection[db_name]
collection = db[collection_name]
return collection
def main():
if len(sys.argv) != 2:
usage_message = """
usage: %s db_name
Create problem_ids collection\n
"""
sys.stderr.write(usage_message % sys.argv[0])
sys.exit(1)
problem_ids_collection = connect_to_db_collection(source_db, 'problem_ids')
problem_ids_collection.drop()
tracking_collection = connect_to_db_collection(source_db, 'tracking')
user_id_map_collection = connect_to_db_collection(source_db, 'user_id_map')
cursor = tracking_collection.find({'event_type' : 'problem_check',
'event_source' : 'server'})
for document in cursor:
doc_result = {}
username = document['username']
if username.isdigit():
username = int(username)
doc_result['username'] = username
user_id_map = user_id_map_collection.find_one({'username' : username})
if not user_id_map:
print "Username {0} not found in collection user_id_map".format(username)
continue
doc_result['user_id'] = user_id_map['id']
doc_result['hash_id'] = user_id_map['hash_id']
doc_result['problem_id'] = document['event']['problem_id']
doc_result['course_id'] = document['context']['course_id']
doc_result['module'] = document['context']['module']
doc_result['time'] = document['time']
doc_result['event'] = document['event']
collection['problem_ids'].insert(doc_result)
if __name__ == '__main__':
main()
|
from checkov.common.vcs.vcs_schema import VCSSchema
class BranchRestrictionsSchema(VCSSchema):
def __init__(self):
schema = \
{
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"pagelen": {
"type": "integer"
},
"values": {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"kind": {
"type": "string"
},
"users": {
"type": "array",
"items": {}
},
"links": {
"type": "object",
"properties": {
"self": {
"type": "object",
"properties": {
"href": {
"type": "string"
}
},
"required": [
"href"
]
}
},
"required": [
"self"
]
},
"pattern": {
"type": "string"
},
"branch_match_kind": {
"type": "string"
},
"groups": {
"type": "array",
"items": {}
},
"type": {
"type": "string"
},
"id": {
"type": "integer"
}
},
"required": [
"kind",
"users",
"links",
"pattern",
"branch_match_kind",
"groups",
"type",
"id"
]
},
{
"type": "object",
"properties": {
"kind": {
"type": "string"
},
"users": {
"type": "array",
"items": {}
},
"links": {
"type": "object",
"properties": {
"self": {
"type": "object",
"properties": {
"href": {
"type": "string"
}
},
"required": [
"href"
]
}
},
"required": [
"self"
]
},
"pattern": {
"type": "string"
},
"branch_match_kind": {
"type": "string"
},
"groups": {
"type": "array",
"items": {}
},
"type": {
"type": "string"
},
"id": {
"type": "integer"
}
},
"required": [
"kind",
"users",
"links",
"pattern",
"branch_match_kind",
"groups",
"type",
"id"
]
},
{
"type": "object",
"properties": {
"kind": {
"type": "string"
},
"users": {
"type": "array",
"items": {}
},
"links": {
"type": "object",
"properties": {
"self": {
"type": "object",
"properties": {
"href": {
"type": "string"
}
},
"required": [
"href"
]
}
},
"required": [
"self"
]
},
"pattern": {
"type": "string"
},
"branch_match_kind": {
"type": "string"
},
"groups": {
"type": "array",
"items": {}
},
"type": {
"type": "string"
},
"id": {
"type": "integer"
}
},
"required": [
"kind",
"users",
"links",
"pattern",
"branch_match_kind",
"groups",
"type",
"id"
]
}
]
},
"page": {
"type": "integer"
},
"size": {
"type": "integer"
}
},
"required": [
"pagelen",
"values",
"page",
"size"
]
}
super().__init__(schema=schema)
schema = BranchRestrictionsSchema()
|
"""
Controller for the bookmarks
By: Tom Orth
"""
from app.bookmarks.model import BookmarkTranscript
from app.utils import convert_full_transcripts_to_json, check_if_parsed_transcripts_are_bookmarked
from app.bookmarks.form import SearchForm
from app.setup import conn
from flask import Blueprint, render_template, request, current_app, redirect, url_for, flash, Response, send_from_directory
from flask_login import login_required, current_user
bookmarks = Blueprint("bookmarks", __name__, url_prefix="/bookmarks")
# Search page for transcripts
@bookmarks.route("/list", methods=["GET", "POST"])
@login_required
def list():
search_form = SearchForm(request.form) # Search Form
if request.method == "POST" and search_form.validate_on_submit: # If POST request, we redirect to GET but with query string
return redirect(url_for("bookmarks.list", category=search_form.category.data, search=search_form.search.data))
else:
# We parse the query string to return the proper elements
search_query = f"SELECT * FROM user_transcript_view WHERE user_id={current_user.user_id};"
category = request.args.get("category")
search_string = request.args.get("search")
if category == "title":
search_query = f"SELECT * FROM user_transcript_view WHERE user_id={current_user.user_id} AND title ILIKE \'%{search_string}%\'"
elif category == "content":
search_query = f"SELECT * FROM user_transcript_view WHERE user_id={current_user.user_id} AND text_content ILIKE \'%{search_string}%\'"
elif category == "summary":
search_query = f"SELECT * FROM user_transcript_view WHERE user_id={current_user.user_id} AND summary ILIKE \'%{search_string}%\'"
# After retrieving the transcripts, we parse them into dictionarys to be sent back to the main screen
transcripts_res = BookmarkTranscript.run_and_return_many(conn, search_query)
transcripts_dicts = [transcript.__dict__ for transcript in transcripts_res]
for i, transcript in enumerate(transcripts_dicts):
del transcript["password_hash"]
del transcript["user_id"]
transcripts_dicts[i] = transcript
return render_template("bookmarks/list.html", title="Bookmarks", form=search_form, loggedin=current_user.is_authenticated, email=current_user.email, data=transcripts_dicts)
|
# coding: utf-8
"""
simple implementation of Longest common subsequence algorithm
https://en.wikipedia.org/wiki/Longest_common_subsequence_problem
"""
from __future__ import print_function
def lcs_length(str1, str2):
""" get longest common subsequence length
str1, str2 - strings
return C, B - matrix (M+1xN+1)
"""
len1 = len(str1)
len2 = len(str2)
# determine zero matrix
C = [[0] * (len2 + 1) for _ in range(len1 + 1)]
B = [[0] * (len2 + 1) for _ in range(len1 + 1)]
for i, char1 in enumerate(str1):
for j, char2 in enumerate(str2):
if char1 == char2:
# increase next item
C[i + 1][j + 1] = C[i][j] + 1
B[i + 1][j + 1] = '↖'
elif C[i][j + 1] >= C[i + 1][j]:
C[i + 1][j + 1] = C[i][j + 1]
B[i + 1][j + 1] = '↑'
else:
C[i + 1][j + 1] = C[i + 1][j]
B[i + 1][j + 1] = '←'
return C, B
def print_lcs(B, str1, i, j):
""" recursive print lcs funtion """
if i == 0 or j == 0:
return ''
if B[i][j] == '↖':
return str(print_lcs(B, str1, i - 1, j - 1)) + str(str1[i - 1])
elif B[i][j] == '↑':
return str(print_lcs(B, str1, i - 1, j))
else:
return str(print_lcs(B, str1, i, j - 1))
def lcs(str1, str2):
""" longest common subsequence algorithm implementation """
_, B = lcs_length(str1, str2)
return print_lcs(B, str1, len(str1), len(str2))
if __name__ in '__main__':
for WORD1, WORD2 in [('thisisatest', 'testing123testing'),
('ABCBDAB', 'BDCABA')]:
SUB = lcs(WORD1, WORD2)
print('LCS between words \'{}\' \'{}\':'.format(WORD1, WORD2), SUB)
|
from .model_600_basicSaVs import BasicSaVs
|
import numpy as np
from psychopy.visual.grating import GratingStim
class GazeStim(GratingStim):
"""Stimulus linked to eyetracker that shows gaze location."""
def __init__(self, win, tracker):
self.tracker = tracker
super(GazeStim, self).__init__(win,
autoDraw=True,
autoLog=False,
color="skyblue",
mask="gauss",
size=1,
tex=None)
def draw(self):
gaze = self.tracker.read_gaze(log=False, apply_offsets=False)
if np.isfinite(gaze).all():
self.pos = gaze
self.opacity = 1
else:
self.opacity = 0
super(GazeStim, self).draw()
|
#!/usr/bin/env python3
import os, sys, json, re, math, logging, traceback, pickle, hashlib
from lxml.etree import parse
from osgeo import gdal
import numpy as np
import isce
from iscesys.Component.ProductManager import ProductManager as PM
from isceobj.Orbit.Orbit import Orbit
from utils.time_utils import getTemporalSpanInDays
gdal.UseExceptions() # make GDAL raise python exceptions
log_format = "[%(asctime)s: %(levelname)s/%(funcName)s] %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger('update_met_json')
SENSING_RE = re.compile(r'(S1-IFG_.*?_(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})(\d{2})-(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})(\d{2}).*?orb)')
MISSION_RE = re.compile(r'^S1(\w)$')
def get_raster_corner_coords(vrt_file):
"""Return raster corner coordinates."""
# go to directory where vrt exists to extract from image
cwd =os.getcwd()
data_dir = os.path.dirname(os.path.abspath(vrt_file))
os.chdir(data_dir)
# extract geo-coded corner coordinates
ds = gdal.Open(os.path.basename(vrt_file))
gt = ds.GetGeoTransform()
cols = ds.RasterXSize
rows = ds.RasterYSize
ext = []
lon_arr = [0, cols]
lat_arr = [0, rows]
for px in lon_arr:
for py in lat_arr:
lon = gt[0] + (px * gt[1]) + (py * gt[2])
lat = gt[3] + (px * gt[4]) + (py * gt[5])
ext.append([lat, lon])
lat_arr.reverse()
os.chdir(cwd)
return ext
def load_product(int_file):
"""Load product from fine interferogram xml file."""
pm = PM()
pm.configure()
prod = pm.loadProduct(int_file)
return prod
def get_orbit():
"""Return orbit object."""
orb = Orbit()
orb.configure()
return orb
def get_aligned_bbox(prod, orb):
"""Return estimate of 4 corner coordinates of the
track-aligned bbox of the product."""
# create merged orbit
burst = prod.bursts[0]
#Add first burst orbit to begin with
for sv in burst.orbit:
orb.addStateVector(sv)
##Add all state vectors
for bb in prod.bursts:
for sv in bb.orbit:
if (sv.time< orb.minTime) or (sv.time > orb.maxTime):
orb.addStateVector(sv)
bb.orbit = orb
# extract bbox
ts = [prod.sensingStart, prod.sensingStop]
rngs = [prod.startingRange, prod.farRange]
pos = []
for tim in ts:
for rng in rngs:
llh = prod.orbit.rdr2geo(tim, rng, height=0.)
pos.append(llh)
pos = np.array(pos)
bbox = pos[[0, 1, 3, 2], 0:2]
return bbox.tolist()
def update_met_json(orbit_type, scene_count, swath_num, master_mission,
slave_mission, pickle_dir, int_file, vrt_file,
xml_file, json_file):
"""Write product metadata json."""
xml_file = os.path.abspath(xml_file)
with open(xml_file) as f:
doc = parse(f)
coordinate1 = doc.xpath('.//component[@name="coordinate1"]')[0]
width = float(coordinate1.xpath('.//property[@name="size"]/value')[0].text)
startLon = float(coordinate1.xpath('.//property[@name="startingvalue"]/value')[0].text)
deltaLon = float(coordinate1.xpath('.//property[@name="delta"]/value')[0].text)
endLon = startLon + deltaLon*width
coordinate2 = doc.xpath('.//component[@name="coordinate2"]')[0]
length = float(coordinate2.xpath('.//property[@name="size"]/value')[0].text)
startLat = float(coordinate2.xpath('.//property[@name="startingvalue"]/value')[0].text)
deltaLat = float(coordinate2.xpath('.//property[@name="delta"]/value')[0].text)
endLat = startLat + deltaLat*length
minLat = min(startLat,endLat)
maxLat = max(startLat,endLat)
minLon = min(startLon,endLon)
maxLon = max(startLon,endLon)
match = SENSING_RE.search(xml_file)
if not match:
raise RuntimeError("Failed to extract sensing times: %s" % xml_file)
archive_filename = match.groups()[0]
sensing_start, sensing_stop = sorted(["%s-%s-%sT%s:%s:%s" % match.groups()[1:7],
"%s-%s-%sT%s:%s:%s" % match.groups()[7:]])
# get temporal_span
temporal_span = getTemporalSpanInDays(sensing_stop, sensing_start)
#get polarization from ifg xml
try:
fin = open(int_file, 'r')
ifgxml = fin.read()
fin.close()
rslt = re.search(
'<property name="polarization">[\s]*?<value>(HV|HH|VV|HH\+HV|VV\+VH)</value>', ifgxml, re.M)
if rslt:
polarization = rslt.group(1)
else:
logger.warn("Failed to get polarization from fine_interferogram.xml")
polarization = 'ERR'
except Exception as e:
logger.warn("Failed to get polarization: %s" % traceback.format_exc())
polarization = 'ERR'
# load product and extract track-aligned bbox;
# fall back to raster corner coords (not track aligned)
try:
prod = load_product(int_file)
orb = get_orbit()
bbox = get_aligned_bbox(prod, orb)
except Exception as e:
logger.warn("Failed to get aligned bbox: %s" % traceback.format_exc())
logger.warn("Getting raster corner coords instead.")
bbox = get_raster_corner_coords(vrt_file)
#extract bperp and bpar
cb_pkl = os.path.join(pickle_dir, "computeBaselines")
with open(cb_pkl, 'rb') as f:
catalog = pickle.load(f)
bperp = catalog['baseline']['IW-{} Bperp at midrange for first common burst'.format(swath_num)]
bpar = catalog['baseline']['IW-{} Bpar at midrange for first common burst'.format(swath_num)]
ipf_version_master = catalog['master']['sensor']['processingsoftwareversion']
ipf_version_slave = catalog['slave']['sensor']['processingsoftwareversion']
# get mission char
mis_char_master = MISSION_RE.search(master_mission).group(1)
mis_char_slave = MISSION_RE.search(slave_mission).group(1)
missions = [
"Sentinel-1%s" % mis_char_master,
"Sentinel-1%s" % mis_char_slave,
]
# update metadata
with open(json_file) as f:
metadata = json.load(f)
#update direction to ascending/descending
if 'direction' in list(metadata.keys()):
direct = metadata['direction']
if direct == 'asc':
direct = 'ascending'
elif direct == 'dsc':
direct = 'descending'
metadata['direction'] = direct
metadata.update({
"tiles": True,
"tile_layers": [ "amplitude", "interferogram" ],
"archive_filename": archive_filename,
"spacecraftName": missions,
"platform": missions,
"sensor": "SAR-C Sentinel1",
"sensingStart": sensing_start,
"sensingStop": sensing_stop,
"temporal_span": temporal_span,
"inputFile": "sentinel.ini",
"product_type": "interferogram",
"orbit_type": orbit_type,
"polarization": polarization,
"scene_count": int(scene_count),
"imageCorners":{
"minLat":minLat,
"maxLat":maxLat,
"minLon":minLon,
"maxLon":maxLon
},
"bbox": bbox,
"ogr_bbox": [[x, y] for y, x in bbox],
"swath": [int(swath_num)],
"perpendicularBaseline": bperp,
"parallelBaseline": bpar,
"version": [ipf_version_master, ipf_version_slave],
"beamMode": "IW",
"sha224sum": hashlib.sha224(str.encode(os.path.basename(json_file))).hexdigest(),
})
# remove outdated fields
if 'verticalBaseline' in metadata: del metadata['verticalBaseline']
if 'horizontalBaseline' in metadata: del metadata['horizontalBaseline']
if 'totalBaseline' in metadata: del metadata['totalBaseline']
# remove orbit; breaks index into elasticsearch because of it's format
if 'orbit' in metadata: del metadata['orbit']
# write final file
with open(json_file, 'w') as f:
json.dump(metadata, f, indent=2)
if __name__ == "__main__":
if len(sys.argv) != 11:
raise SystemExit("usage: %s <orbit type used> <scene count> <swath num> <master_mission> <slave_mission> <pickle dir> <fine int file> <vrt file> <unw.geo.xml file> <output json file>" % sys.argv[0])
orbit_type = sys.argv[1]
scene_count = sys.argv[2]
swath_num = sys.argv[3]
master_mission = sys.argv[4]
slave_mission = sys.argv[5]
pickle_dir = sys.argv[6]
int_file = sys.argv[7]
vrt_file = sys.argv[8]
xml_file = sys.argv[9]
json_file = sys.argv[10]
update_met_json(orbit_type, scene_count, swath_num, master_mission,
slave_mission, pickle_dir, int_file, vrt_file,
xml_file, json_file)
|
"""OpenAPI2(Swagger) with Starlette
"""
import pytest
from starlette.applications import Starlette
from starlette.endpoints import HTTPEndpoint
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.testclient import TestClient
from uvicorn import run
from apiman.starlette import Apiman
app = Starlette()
sub_app = Starlette()
apiman = Apiman(template="./examples/docs/cat_template.yml")
apiman.init_app(app)
# define data
CATS = {
1: {"id": 1, "name": "DangDang", "age": 2},
2: {"id": 2, "name": "DingDing", "age": 1},
}
# add schema definition
apiman.add_schema("cat_age", {"type": "integer", "minimum": 0, "maximum": 3000})
apiman.add_schema(
"Cat",
{
"properties": {
"id": {"description": "global unique", "type": "integer"},
"name": {"type": "string"},
"age": {"$ref": "#/definitions/cat_age"},
},
"type": "object",
},
)
# define routes and schema(in doc string)
@app.route("/cat/{id}/")
class Cat(HTTPEndpoint):
"""
Declare multi method
---
get:
summary: Get single cat
tags:
- cat
parameters:
- name: id
type: string
in: path
required: True
- name: test_param1
type: string
in: header
required: True
- name: test_param2
type: string
in: query
required: True
responses:
"200":
description: OK
schema:
$ref: '#/definitions/Cat'
"404":
description: Not found
"""
def get(self, req: Request):
apiman.validate_request(req)
return JSONResponse(CATS[int(req.path_params["id"])])
def delete(self, req: Request):
"""
Declare single method
---
summary: Delete single cat
tags:
- cat
parameters:
- name: id
type: integer
in: path
required: True
responses:
"204":
description: OK
schema:
$ref: '#/definitions/Cat'
"404":
description: Not found
"""
cat = CATS.pop(int(req.path_params["id"]))
return JSONResponse(cat)
# define doc by yaml or json file
@sub_app.route("/cats/", methods=["GET"])
@apiman.from_file("./examples/docs/cats_get.yml")
def list_cats(req: Request):
return JSONResponse(list(CATS.values()))
@sub_app.route("/cats/", methods=["POST"])
@apiman.from_file("./examples/docs/cats_post.json")
async def create_cat(req: Request):
await apiman.async_validate_request(req)
cat = await req.json()
CATS[cat["id"]] = cat
return JSONResponse(cat)
@sub_app.route("/cats_form/", methods=["POST"])
async def create_cat_by_form(req: Request):
"""
summary: create cat by request form data
tags:
- cats
parameters:
- name: id
type: string
in: formData
required: True
- name: name
type: string
in: formData
required: True
- name: age
type: string
in: formData
required: True
responses:
"200":
description: OK
schema:
$ref: '#/definitions/Cat'
"""
await req.form()
apiman.validate_request(req)
cat = dict(await req.form())
cat["id"] = int(cat["id"])
cat["age"] = int(cat["age"])
CATS[cat["id"]] = cat
return JSONResponse(cat)
app.mount("/", sub_app)
def test_app():
client = TestClient(app)
spec = apiman.load_specification(app)
apiman.validate_specification()
assert client.get(apiman.config["specification_url"]).json() == spec
assert client.get(apiman.config["swagger_url"]).status_code == 200
assert client.get(apiman.config["redoc_url"]).status_code == 200
# --
with pytest.raises(Exception):
client.get("/cat/1/")
with pytest.raises(Exception):
client.get("/cat/1/?test_param2=test")
assert (
client.get(
"/cat/1/?test_param2=test", headers={"test_param1": "test"}
).status_code
== 200
)
# --
with pytest.raises(Exception):
client.post("/cats/", json={"name": "test", "id": 3})
assert (
client.post(
"/cats_form/",
data={"name": "test", "id": "3", "age": "4"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
).status_code
== 200
)
assert (
client.post("/cats/", json={"name": "test", "id": 3, "age": 4}).status_code
== 200
)
if __name__ == "__main__":
run(app)
|
def transform(dataset, rot_center=0, tune_rot_center=True):
"""Reconstruct sinograms using the tomopy gridrec algorithm
Typically, a data exchange file would be loaded for this
reconstruction. This operation will attempt to perform
flat-field correction of the raw data using the dark and
white background data found in the data exchange file.
This operator also requires either the tomviz/tomopy-pipeline
docker image, or a python environment with tomopy installed.
"""
import numpy as np
import tomopy
# Get the current volume as a numpy array.
array = dataset.active_scalars
dark = dataset.dark
white = dataset.white
angles = dataset.tilt_angles
tilt_axis = dataset.tilt_axis
# TomoPy wants the tilt axis to be zero, so ensure that is true
if tilt_axis == 2:
order = [2, 1, 0]
array = np.transpose(array, order)
if dark is not None and white is not None:
dark = np.transpose(dark, order)
white = np.transpose(white, order)
if angles is not None:
# tomopy wants radians
theta = np.radians(angles)
else:
# Assume it is equally spaced between 0 and 180 degrees
theta = tomopy.angles(array.shape[0])
# Perform flat-field correction of raw data
if white is not None and dark is not None:
array = tomopy.normalize(array, white, dark, cutoff=1.4)
if rot_center == 0:
# Try to find it automatically
init = array.shape[2] / 2.0
rot_center = tomopy.find_center(array, theta, init=init, ind=0,
tol=0.5)
elif tune_rot_center:
# Tune the center
rot_center = tomopy.find_center(array, theta, init=rot_center, ind=0,
tol=0.5)
# Calculate -log(array)
array = tomopy.minus_log(array)
# Remove nan, neg, and inf values
array = tomopy.remove_nan(array, val=0.0)
array = tomopy.remove_neg(array, val=0.00)
array[np.where(array == np.inf)] = 0.00
# Perform the reconstruction
array = tomopy.recon(array, theta, center=rot_center, algorithm='gridrec')
# Mask each reconstructed slice with a circle.
array = tomopy.circ_mask(array, axis=0, ratio=0.95)
# Set the transformed array
child = dataset.create_child_dataset()
child.active_scalars = array
return_values = {}
return_values['reconstruction'] = child
return return_values
|
import zss
class TreeDistanceComparator:
def __init__(self, tree1, tree2, maxComparisonDepth=None):
self.__tree1 = tree1.root
self.__tree2 = tree2.root
self.__treeHeight = max(tree1.getMaxDepth(), tree2.getMaxDepth())
self.__maxDepth = maxComparisonDepth
self.__treeEditDistance = None
self.__treeEditOperations = None
def __defaultComparator(self, node1, node2):
if node1 == '' and node2 != '':
# insert cost
return 1 + self.__treeHeight - node2[1]
if node1 != '' and node2 == '':
# remove cost
return 1 + self.__treeHeight - node1[1]
# update cost
node1label = node1[0]
node2label = node2[0]
node1depth = node1[1]
node2depth = node2[1]
differenceInDepth = max(node1depth, node2depth) - min(node1depth, node2depth)
if (node1label == node2label) and (differenceInDepth == 0):
# node is same -- no cost to update
return 0
elif (node1label == 'X' or node2label == 'X') and (differenceInDepth == 0):
# small cost to turn any node into an "X"
return 1
elif node1label == 'X' or node2label == 'X':
return 1+2*differenceInDepth
else:
return 2+2*differenceInDepth
def __getLabel(self, node):
if node.__class__.__name__ == "RootNode":
return ["Root", 0]
return [node.getId(), node.getDepth()]
def __getChildren(self, node):
if (self.__maxDepth is not None) and (node.getDepth() >= self.__maxDepth):
return []
return node.children
def compare(self, comparator=None):
if comparator is None:
comparator = self.__defaultComparator
self.__treeEditDistance, self.__treeEditOperations = zss.simple_distance(
self.__tree1, self.__tree2, self.__getChildren, self.__getLabel, comparator, return_operations=True
)
def getTreeEditDistance(self):
if self.__treeEditDistance is None:
self.compare()
return self.__treeEditDistance
def getTreeEditOperations(self):
if self.__treeEditOperations is None:
self.compare()
return self.__treeEditOperations
|
# script to segregate training and validation data
import cv2
import os
def load_images_from_folder(folder):
images = []
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder,filename))
if img is not None:
images.append(img)
return images
image_list = load_images_from_folder("Sad")
c1=0
c2=0
i=0
for i in range(len(image_list)):
if(i%10==0):
c1+=1
path="/home/mohit/EmotionRecognition/data/validation/Sad"
cv2.imwrite(os.path.join(path,'n'+str(i+4000)+'.jpg'),image_list[i])
else:
c2+=1
path="/home/mohit/EmotionRecognition/data/train/Sad"
cv2.imwrite(os.path.join(path,'n'+str(i+20000)+'.jpg'),image_list[i])
print(c1,c2) |
import numpy as np
#Refer to magmom_handler.py for usage of variables defined here
spblock_metals = [3,4,11,12,19,20,37,38,55,56,87,88]
dblock_metals = np.concatenate((np.arange(21,30,1),np.arange(39,48,1),
np.arange(71,80,1),np.arange(103,112,1)),axis=0).tolist()
fblock_metals = np.concatenate((np.arange(57,71,1),np.arange(89,103,1)),
axis=0).tolist()
nonmetal_list = [1,2,6,7,8,9,10,15,16,17,18,34,35,36,53,54,86]
metal_list = [val for val in np.arange(1,119,1) if val not in nonmetal_list]
mag_list = [metal for metal in metal_list if metal not in spblock_metals]
nomag_list = [val for val in np.arange(1,119,1) if val not in mag_list]
poor_metals = [metal for metal in metal_list if metal not in dblock_metals+fblock_metals+spblock_metals]
|
#! /usr/bin/python
import time
import random
from sds import SDS, decode_mig_status
def main():
sds = SDS('sds')
# sds.capture(16)
if 1:
print "counts 0x%08x" % sds.read_soc_reg(0x212)
decode_mig_status(sds.read_soc_reg(0x211))
if 1:
print "Reset"
sds.write_soc_reg(0x200, 1)
print "ctrl 0x%08x" % sds.read_soc_reg(0x200)
sds.write_soc_reg(0x200, 0)
print "ctrl 0x%08x" % sds.read_soc_reg(0x200)
time.sleep(0.1)
print "ctrl 0x%08x" % sds.read_soc_reg(0x200)
print
decode_mig_status(sds.read_soc_reg(0x211))
n = 3
o = 10
if 1:
print "write to FIFO"
for i in range(n):
sds.write_soc_reg(0x218, 0xf00f0000 + i)
time.sleep(0.1)
print "counts 0x%08x" % sds.read_soc_reg(0x212)
decode_mig_status(sds.read_soc_reg(0x211))
print "write to DDR"
sds.write_soc_reg(0x210, o | ((n-1)<<24) | (0<<30))
time.sleep(0.1)
print "counts 0x%08x" % sds.read_soc_reg(0x212)
decode_mig_status(sds.read_soc_reg(0x211))
sds.write_ddr(20, [ 0xdeadbeef, 0xfeedf00f ])
n = 31
o = 0
if 1:
print "read from DDR"
sds.write_soc_reg(0x210, o | ((n-1)<<24) | (1<<30))
time.sleep(0.1)
print "counts 0x%08x" % sds.read_soc_reg(0x212)
decode_mig_status(sds.read_soc_reg(0x211))
print "read from FIFO"
for i in range(n):
print "rd %2d -> 0x%08x" % (i, sds.read_soc_reg(0x218))
time.sleep(0.1)
print "counts 0x%08x" % sds.read_soc_reg(0x212)
decode_mig_status(sds.read_soc_reg(0x211))
data = sds.read_ddr(0, 32)
for i in range(len(data)):
print "%2d -> 0x%08x" % (i, data[i])
n = 0x100
o = 0x100
wr_data = [ random.randrange(1<<32) for _ in range(n) ]
sds.write_ddr(o, wr_data)
rd_data = sds.read_ddr(o, n)
assert all(wr_data == rd_data)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from flask_server.models import Admin, Article, Module
from .factories import UserFactory, ArticleFactory
@pytest.mark.usefixtures('db')
class TestUser:
"""User tests."""
def test_get_by_id(self):
"""Get user by ID."""
user = Admin(username='foo', password='foo@bar.com')
user.save()
retrieved = Admin.query.get_or_404(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
"""Test creation date."""
user = Admin(username='foo', password='foo@bar.com')
user.save()
assert bool(user._created_at)
assert isinstance(user._created_at, dt.datetime)
def test_password_is_nullable(self):
"""Test null password."""
user = Admin(username='foo')
user.save()
assert user.password is None
def test_factory(self, db):
"""Test user factory."""
user1 = UserFactory(password='myprecious')
user2 = UserFactory()
db.session.commit()
assert bool(user1.username)
assert bool(user1.name)
assert bool(user1.created_at)
assert bool(user2.password)
assert user1.verify_password('myprecious')
def test_check_password(self):
"""Check password."""
user = Admin.create(username='foo', name='foo@bar.com', password='foobarbaz123')
assert user.verify_password('foobarbaz123') is True
assert user.verify_password('barfoobaz') is False
@pytest.mark.usefixtures('db')
class TestArticle:
"""Article tests."""
def test_get_by_id(self):
"""Get Article by ID."""
article = Article(title='foo', content='somethingstrange')
article.save()
retrieved = Article.query.get_or_404(article.id)
assert retrieved == article
def test_factory(self, db):
"""Test Article factory."""
article = ArticleFactory()
db.session.commit()
assert bool(article.title)
assert bool(article.content)
assert isinstance(article.order, int)
assert bool(article.thumb_pic)
@pytest.mark.usefixtures('db')
class TestModule:
"""Module tests."""
def test_get_by_id(self):
"""Get Module by ID."""
module = Module(title='foo', order=5, template_id='i_1')
module.save()
retrieved = Module.query.get_or_404(module.id)
assert retrieved == module
def test_add_article_with_module(self):
"""Add a module to an article."""
module = Module(title='module_article')
module.save()
article = ArticleFactory()
article.module = module
article.save()
assert isinstance(article.module_id, int)
|
import math, time
import o80
from handle_contacts import MUJOCO_ID, ROBOT_SEGMENT_ID, BALL_SEGMENT_ID
from handle_contacts import get_table_contact_handle
handle = get_table_contact_handle()
ball = handle.frontends[BALL_SEGMENT_ID]
def _distance(p1, p2):
return math.sqrt([(a - b) ** 2 for a, b in zip(p1, p2)])
def _velocity(p1, p2, duration):
return [(a - b) / duration for a, b in zip(p2, p1)]
def _dropping(start, end, duration):
global handle, ball
# deativating contacts
handle.deactivate_contact(BALL_SEGMENT_ID)
# going to start position
ball.add_command(
start, [0, 0, 0], o80.Duration_us.milliseconds(400), o80.Mode.QUEUE
)
ball.pulse_and_wait()
# starting with clean contact
handle.reset_contact(BALL_SEGMENT_ID)
# reactivating contacts
handle.activate_contact(BALL_SEGMENT_ID)
## going to end point
velocity = _velocity(start, end, duration)
ball.add_command(end, velocity, o80.Duration_us.seconds(duration), o80.Mode.QUEUE)
ball.pulse_and_wait()
## breathing a bit
time.sleep(1)
starts = [
(1.0, 1.5, 0.5),
(1.5, 1.5, 0.5),
(0.5, 1.5, 0.5),
(1.1, 0.5, 0.5),
(0.0, 0.3, 0.5),
]
for start in starts:
duration = 1
end = (1.1, 0.5, -1.0)
_dropping(start, end, duration)
|
from . import style_reader, lists
def read_options(options):
custom_style_map = _read_style_map(options.get("style_map") or "")
include_default_style_map = options.get("include_default_style_map", True)
options["ignore_empty_paragraphs"] = options.get("ignore_empty_paragraphs", True)
options["style_map"] = custom_style_map + \
(_default_style_map if include_default_style_map else [])
return options
def _read_style_map(style_text):
lines = filter(None, map(_get_line, style_text.split("\n")))
return lists.map(style_reader.read_style, lines)
def _get_line(line):
line = line.strip()
if line.startswith("#"):
return None
else:
return line
_default_style_map = _read_style_map("""
p[style-name='Normal'] => p:fresh
p.Heading1 => h1:fresh
p.Heading2 => h2:fresh
p.Heading3 => h3:fresh
p.Heading4 => h4:fresh
p[style-name='Heading 1'] => h1:fresh
p[style-name='Heading 2'] => h2:fresh
p[style-name='Heading 3'] => h3:fresh
p[style-name='Heading 4'] => h4:fresh
p[style-name='heading 1'] => h1:fresh
p[style-name='heading 2'] => h2:fresh
p[style-name='heading 3'] => h3:fresh
p[style-name='heading 4'] => h4:fresh
p[style-name='heading 4'] => h4:fresh
p[style-name='footnote text'] => p
r[style-name='footnote reference'] =>
p[style-name='endnote text'] => p
r[style-name='endnote reference'] =>
# LibreOffice
p[style-name='Footnote'] => p
r[style-name='Footnote anchor'] =>
p[style-name='Endnote'] => p
r[style-name='Endnote anchor'] =>
p:unordered-list(1) => ul > li:fresh
p:unordered-list(2) => ul|ol > li > ul > li:fresh
p:unordered-list(3) => ul|ol > li > ul|ol > li > ul > li:fresh
p:unordered-list(4) => ul|ol > li > ul|ol > li > ul|ol > li > ul > li:fresh
p:unordered-list(5) => ul|ol > li > ul|ol > li > ul|ol > li > ul|ol > li > ul > li:fresh
p:ordered-list(1) => ol > li:fresh
p:ordered-list(2) => ul|ol > li > ol > li:fresh
p:ordered-list(3) => ul|ol > li > ul|ol > li > ol > li:fresh
p:ordered-list(4) => ul|ol > li > ul|ol > li > ul|ol > li > ol > li:fresh
p:ordered-list(5) => ul|ol > li > ul|ol > li > ul|ol > li > ul|ol > li > ol > li:fresh
""")
|
from functools import wraps
from flask import request
from flask.ext import restful
from . import api
from .errors import BadRequest
from .jwt import jwt_required
class Resource(restful.Resource):
method_decorators = [jwt_required]
def link(rel, resource, method="GET", **kwargs):
href = api.url_for(resource, **kwargs)
return {"rel": rel, "href": href, "method": method}
class Link(object):
def __init__(self, name, resource, condition, method):
self.name = name
self.resource = resource
self.condition = condition
self.method = method
def to_url(self, args):
return api.url_for(self.resource, **args)
def to_dict(self, args):
return {"rel": self.name,
"href": self.to_url(args),
"method": self.method}
def should_be_added(self, resource):
return self.condition(resource)
class Links(object):
def __init__(self, condition=lambda x: True, **args):
self.links = []
self.to_pass = args
self.condition = condition
def add(self, name, condition=None, method="GET"):
if condition is None:
condition = self.condition
def wrapper(cls):
self.links.append(Link(name, cls, condition, method))
return cls
return wrapper
def process(self, thing):
if type(thing) is not dict:
return thing
args = {}
for name_in_f, name_in_i in self.to_pass.items():
args[name_in_f] = thing.get(name_in_i)
links = []
for link in self.links:
if link.should_be_added(thing):
links.append(link.to_dict(args))
thing["_links"] = links
return thing
def __call__(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if type(ret) is list:
return [self.process(x) for x in ret]
return self.process(ret)
return wrapper
|
#!/usr/bin/python
# 2020, @oottppxx
import os
import re
import sys
import zlib
VERSION='202006141136'
TFTPD='192.168.0.50'
# 3726MiB max across partitions.
# for alignment, 7632895 works better?!
MAX_LBA=7634910
RECO_KLABEL='kernel'
IMG='disk.img'
TMP_PARTY='/mnt/hdd/party'
NEW_IMG=os.path.join(TMP_PARTY, 'new_disk.img')
TMP_GPT=os.path.join(TMP_PARTY, 'gpt')
TMP_BOOT=os.path.join(TMP_PARTY, 'boot')
TMP_KERNEL=os.path.join(TMP_PARTY, 'kernel')
TMP_ROOTFS=os.path.join(TMP_PARTY, 'rootfs')
TMP_MBOOT=os.path.join(TMP_PARTY, 'mboot')
TMP_STARTUP=os.path.join(TMP_PARTY, 'startup')
TMP_MRECO=os.path.join(TMP_PARTY, 'mreco')
TMP_MROOTFS=os.path.join(TMP_PARTY, 'mrootfs')
NEW_GPT=os.path.join(TMP_PARTY, 'new_gpt')
NEW_BOOT=os.path.join(TMP_PARTY, 'new_boot')
NEW_KERNEL=os.path.join(TMP_PARTY, 'new_kernel')
NEW_ROOTFS=os.path.join(TMP_PARTY, 'new_rootfs')
NEW_SWAP=os.path.join(TMP_PARTY, 'new_swap')
NEW_RECO=os.path.join(TMP_PARTY, 'new_reco')
NEW_KRECO=os.path.join(TMP_PARTY, 'new_kreco')
BOXMODE=''
BOXMODE_RE='^.*(?P<boxmode> [^ ]*)\'$'
BLINE=('ifconfig eth0 -auto && batch %(TFTPD)s:TFTPBOOT\n'
'testenv -n INT && '
'boot emmcflash0.%(LABELINDEX)s \'brcm_cma=440M@328M brcm_cma=192M@768M'
' root=/dev/mmcblk0p%(ROOTINDEX)s'
' rootsubdir=linuxrootfs%(SUBDIRINDEX)s'
' kernel=/dev/mmcblk0p%(KERNELINDEX)s'
' rw rootwait%(BOXMODE)s\'\n')
###
PBOOT=False
PRECO=False
PKRECO=False
PLINUXROOTFS=False
PUSERDATA=False
PSWAP=False
KNUM=0
FSNUM=0
# Stores tuples of (label, size in MiB).
GPT=[]
# Stores file names to be concatenated as to create the new image.
FILES=[]
BOOT_GUID='\xa2\xa0\xd0\xeb\xe5\xb9\x33\x44\x87\xc0\x68\xb6\xb7\x26\x99\xc7'
LINUX_GUID='\xaf\x3d\xc6\x0f\x83\x84\x72\x47\x8e\x79\x3d\x69\xd8\x47\x7d\xe4'
SWAP_GUID='\x6d\xfd\x57\x06\xab\xa4\xc4\x43\x84\xe5\x09\x33\xc8\x4b\x4f\x4f'
UNIQUE_GUID='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
ZERO_4='\x00\x00\x00\x00'
ZERO_8='\x00\x00\x00\x00\x00\x00\x00\x00'
OK=0
E_ARGTOOFEW=1
E_ARGTOOMANY=2
E_READDISK=3
E_PARTYMKDIR=4
E_GPTWRITE=5
E_BOOTWRITE=6
E_KERNELWRITE=7
E_ROOTFSWRITE=8
E_LOOPMOUNT=9
E_COPYSTARTUP=10
E_UMOUNT=11
E_READSTARTUP=12
E_NEWBOOT=13
E_NEWRECO=14
E_NEWKERNEL=15
E_NEWROOTFS=16
E_NEWSWAP=17
E_ARGSCHEME=18
E_NOBOOT=19
E_NOKERNEL=20
E_NOFS=21
E_TOOMANY=22
E_CREATESTARTUP=23
E_EXCEEDLBA=24
E_CREATEGPT=25
E_CREATEIMAGE=26
E_PATCH=254
def number(s):
acc = 0
while s:
n = NUMBER.get(s[0], None)
if n is None:
break
acc = acc*10 + n
s = s[1:]
return acc, s
def boot(s):
global GPT
global FILES
global PBOOT
label='boot'
if PBOOT:
return True, 'only 1 boot partition allowed!'
# if len(GPT):
# return True, 'boot partition must be the 1st partition exactly!'
size, s = number(s[1:])
if not size:
return True, 'boot partition requires a size > 0!'
bs = 2048*512
count = size
try:
error = os.system('dd if=/dev/zero of=%s bs=%d count=%d && mkfs.fat %s && mount %s %s' % (
NEW_BOOT, bs, count, NEW_BOOT, NEW_BOOT, TMP_MBOOT))
except:
error = -1
if error:
print 'Error: can\'t create %s file!' % NEW_BOOT
sys.exit(E_NEWBOOT)
GPT.append((label, size))
FILES.append(NEW_BOOT)
print '%s(%d)' % (label, size)
PBOOT = True
return False, s
def unpatch_63():
try:
error = os.system('cd %s && mv linuxrootfs1/* . && rmdir linuxrootfs1 && ln -sf / linuxrootfs1' % TMP_MROOTFS)
except:
error = -1
if error:
print 'Error: can\'t unpatch 6.3 rootfs in %s!' % TMP_MROOTFS
sys.exit(E_PATCH)
def recovery(s):
global GPT
global FILES
global PRECO
label='recovery'
if PRECO:
return True, 'only 1 recovery partition allowed!'
if KNUM > 0:
return True, 'recovery partition must come before any kernel partition!'
if len(GPT) != 1:
return True, 'recovery partition must be the 2nd partition exactly!'
size, s = number(s[1:])
if not size:
return True, 'recovery partition requires a size > 0!'
bs = 2048*512
count = size
try:
error = os.system('umount -f %s ; dd if=/dev/zero of=%s bs=%d count=%d && mkfs.ext4 %s && mount %s %s' % (
TMP_MRECO, NEW_RECO, bs, count, NEW_RECO, NEW_RECO, TMP_MRECO))
except:
error = -1
if error:
print 'Error: can\'t create %s file!' % NEW_RECO
sys.exit(E_NEWRECO)
try:
error = os.system('umount -f %s ; mount %s %s' % (TMP_MROOTFS, TMP_ROOTFS, TMP_MROOTFS))
except:
error = -1
if error:
print 'Error: can\'t loop mount %s file in the %s directory!' % (TMP_ROOTFS, TMP_MROOTFS)
sys.exit(E_LOOPMOUNT)
try:
error = os.system('cd %s/linuxrootfs1 && cp -aRf bin boot dev etc home lib proc run sbin sys tmp var %s' % (
TMP_MROOTFS, TMP_MRECO))
error += os.system('mkdir -p %s/usr && cd %s/linuxrootfs1/usr && cp -aRf bin sbin libexec %s/usr' % (
TMP_MRECO, TMP_MROOTFS, TMP_MRECO))
error += os.system('mkdir -p %s/usr/share && cd %s/linuxrootfs1/usr/share && cp -aRf udhcpc %s/usr/share' % (
TMP_MRECO, TMP_MROOTFS, TMP_MRECO))
error += os.system('mkdir -p %s/usr/lib && cp -af %s/linuxrootfs1/usr/lib/lib* %s/usr/lib' % (
TMP_MRECO, TMP_MROOTFS, TMP_MRECO))
error += os.system('sed -i -e "s/id:3/id:5/" %s/etc/inittab' % TMP_MRECO)
except:
error = -1
if error:
print 'Error: can\'t create recovery rootfs in %s!' % TMP_MRECO
sys.exit(E_NEWRECO)
unpatch_63()
try:
error = os.system('cd / ; umount %s ; umount %s' % (TMP_MROOTFS, TMP_MRECO))
except:
error = -1
if error:
print 'Error: can\'t unmount %s or %s!' % (TMP_MROOTFS, TMP_MRECO)
sys.exit(E_UMOUNT)
GPT.append((label, size))
FILES.append(NEW_RECO)
PRECO = True
return False, s
def rkernel(s):
global GPT
global FILES
global PKRECO
label = RECO_KLABEL
kernel = NEW_KRECO
if PKRECO:
return True, 'only 1 recovery kernel partition allowed!'
size, s = number(s[1:])
if not size:
return True, 'kernel partition requires a size > 0!'
bs = 2048*512
count = size
try:
error = 0
if not KNUM:
error = os.system('dd if=%s of=%s bs=%d count=%d' % (TMP_KERNEL, kernel, bs, count))
else:
label = '%s%d' % (label, KNUM+1)
kernel = '%s%d' % (kernel, KNUM+1)
if not (PBOOT and bool(FSNUM)):
error = os.system('dd if=/dev/zero of=%s bs=%d count=%d' % (kernel, bs, count))
except:
error = -1
if error:
print 'Error: can\'t create %s file!' % kernel
sys.exit(E_NEWKERNEL)
print '%s(%d)' % (label, size)
GPT.append((label, size))
FILES.append(NEW_KRECO)
PKRECO = True
return False, s
def kernel(s):
global GPT
global FILES
global KNUM
label = 'linuxkernel'
kernel = NEW_KERNEL
size, s = number(s[1:])
if not size:
return True, 'kernel partition requires a size > 0!'
bs = 2048*512
count = size
try:
error = 0
if not KNUM:
error = os.system('dd if=%s of=%s bs=%d count=%d' % (TMP_KERNEL, kernel, bs, count))
else:
label = '%s%d' % (label, KNUM+1)
kernel = '%s%d' % (kernel, KNUM+1)
if not (PBOOT and bool(FSNUM)):
error = os.system('dd if=/dev/zero of=%s bs=%d count=%d' % (kernel, bs, count))
except:
error = -1
if error:
print 'Error: can\'t create %s file!' % kernel
sys.exit(E_NEWKERNEL)
print '%s(%d)' % (label, size)
GPT.append((label, size))
if PRECO and not bool(KNUM) and not PKRECO:
GPT.append((RECO_KLABEL, size))
if not (PBOOT and bool(KNUM) and bool(FSNUM)):
FILES.append(kernel)
KNUM += 1
return False, s
def data(s, l):
global GPT
global FILES
global FSNUM
label = l
rootfs = NEW_ROOTFS
size, s = number(s[1:])
if not size:
return True, 'rootfs partition requires a size > 0!'
bs = 2048*512
count = size
try:
error = 0
if not FSNUM:
error = os.system('dd if=%s of=%s bs=%d count=%d' % (TMP_ROOTFS, rootfs, bs, count))
pass
else:
if not (PBOOT and bool(KNUM)):
rootfs = '%s2' % rootfs
error = os.system('dd if=/dev/zero of=%s bs=%d count=%d' % (rootfs, bs, count))
except:
error = -1
if error:
print 'Error: can\'t create %s file!' % rootfs
sys.exit(E_NEWROOTFS)
print '%s(%d)' % (label, size)
GPT.append((label, size))
if not (PBOOT and bool(KNUM) and bool(FSNUM)):
FILES.append(rootfs)
FSNUM += 1
return False, s
def linuxrootfs(s):
global PLINUXROOTFS
if PLINUXROOTFS:
return True, 'only 1 linuxrootfs partition allowed!'
PLINUXROOTFS = True
label = 'linuxrootfs'
return data(s, label)
def userdata(s):
global PUSERDATA
if PUSERDATA:
return True, 'only 1 userdata partition allowed!'
PUSERDATA = True
label = 'userdata'
return data(s, label)
def swap(s):
global GPT
global FILES
global PSWAP
if PSWAP:
return True, 'only 1 swap partition allowed!'
label = 'swap'
swap = NEW_SWAP
size, s = number(s[1:])
if not size:
return True, 'swap partition requires a size > 0!'
bs = 2048*512
count = size
try:
error = 0
if not (PBOOT and bool(KNUM) and bool(FSNUM)):
error = os.system('dd if=/dev/zero of=%s bs=%d count=%d' % (swap, bs, count))
except:
error = -1
if error:
print 'Error: can\'t create %s file!' % swap
sys.exit(E_NEWSWAP)
print '%s(%d)' % (label, size)
GPT.append((label, size))
if not (PBOOT and bool(KNUM) and bool(FSNUM)):
FILES.append(swap)
PSWAP = True
return False, s
def error(s):
return True, '%s unknown partition type, aborting...' % s[0]
NUMBER={
'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
}
FUN={
'b': boot,
'r': recovery,
'K': rkernel,
'k': kernel,
'l': linuxrootfs,
'u': userdata,
's': swap,
}
def partition(s):
while s:
f = FUN.get(s[0], error)
err, s = f(s)
if err:
print 'Error: %s' % s
sys.exit(E_ARGSCHEME)
def patch_63():
try:
error = os.system('dd if=/dev/zero of=%s/zeropadfs bs=29360128 count=8' % TMP_PARTY)
error += os.system('cat %s %s/zeropadfs > %s/zprootfs && mv %s/zprootfs %s' % (
TMP_ROOTFS, TMP_PARTY, TMP_PARTY, TMP_PARTY, TMP_ROOTFS))
error += os.system('umount -f %s ; mount %s %s' % (TMP_MROOTFS, TMP_ROOTFS, TMP_MROOTFS))
error += os.system('cd %s && mkdir linuxrootfs1 && mv * linuxrootfs1 ; mv linuxrootfs1/lost+found .' % TMP_MROOTFS)
except:
error = -1
if error:
print 'Error: can\'t patch 6.3 rootfs in %s!' % TMP_MROOTFS
sys.exit(E_PATCH)
def create_party():
try:
error = os.system('mkdir -p %s && mkdir -p %s && mkdir -p %s && mkdir -p %s' % (
TMP_PARTY, TMP_MBOOT, TMP_MRECO, TMP_MROOTFS))
except:
error = -1
if error:
print 'Error: can\'t create %s directory!' % TMP_PARTY
sys.exit(E_PARTYMKDIR)
bs = 2048*512
skip = 0
count = 1
try:
error = os.system('dd if=%s of=%s bs=%d skip=%d count=%d' % (IMG, TMP_GPT, bs, skip, count))
except:
error = -1
if error:
print 'Error: can\'t create %s file from %s!' % (TMP_GPT, IMG)
sys.exit(E_GPTWRITE)
skip = 1
count = 3
try:
error = os.system('dd if=%s of=%s bs=%d skip=%d count=%d' % (IMG, TMP_BOOT, bs, skip, count))
except:
error = -1
if error:
print 'Error: can\'t create %s file from %s!' % (TMP_BOOT, IMG)
sys.exit(E_BOOTWRITE)
skip = 4
count = 8
try:
error = os.system('dd if=%s of=%s bs=%d skip=%d count=%d' % (IMG, TMP_KERNEL, bs, skip, count))
except:
error = -1
if error:
print 'Error: can\'t create %s file from %s!' % (TMP_KERNEL, IMG)
sys.exit(E_KERNELWRITE)
bs = 12*2048*512
skip = 1
try:
error = os.system('dd if=%s of=%s bs=%d skip=%d' % (IMG, TMP_ROOTFS, bs, skip))
except:
error = -1
if error:
print 'Error: can\'t create %s file from %s!' % (TMP_ROOTFS, IMG)
sys.exit(E_ROOTFSWRITE)
patch_63()
try:
error = os.system('umount -f %s ; mount -t vfat,ro,loop %s %s' % (TMP_MBOOT, TMP_BOOT, TMP_MBOOT))
except:
error = -1
if error:
print 'Error: can\'t loop mount %s file in the %s directory!' % (TMP_BOOT, TMP_MBOOT)
sys.exit(E_LOOPMOUNT)
try:
error = os.system('cp %s %s' % (os.path.join(TMP_MBOOT, 'STARTUP'), TMP_STARTUP))
except:
error = -1
if error:
print 'Error: can\'t copy STARTUP file in the %s directory to %s!' % (TMP_MBOOT, TMP_STARTUP)
sys.exit(E_COPYSTARTUP)
try:
error = os.system('umount %s' % TMP_MBOOT)
except:
error = -1
if error:
print 'Error: can\'t unmount %s!' % TMP_MBOOT
sys.exit(E_UMOUNT)
def get_boxmode():
global BOXMODE
try:
with open(TMP_STARTUP, 'r') as startup:
boot_line = startup.readlines()[0]
except:
print 'Error: can\'t read %s!' % TMP_STARTUP
sys.exit(E_READSTARTUP)
m = re.match(BOXMODE_RE, boot_line)
if m:
BOXMODE = m.group('boxmode')
print 'Found boxmode: %s' % BOXMODE
else:
print 'Warning: couldn\'t find boxmode in boot line!'
def create_startup():
linuxrootfs_index = 0
userdata_index = 0
partition_index = 0
kernels = []
labels = []
for l, _ in GPT:
partition_index += 1
if 'linuxrootfs' in l:
linuxrootfs_index = partition_index
if 'userdata' in l:
userdata_index = partition_index
if 'linuxkernel' in l:
kernels.append(partition_index)
labels.append(l)
if not (linuxrootfs_index or userdata_index):
print 'Error: at least 1 linuxrootfs or userdata partition is required!'
sys.exit(E_NOFS)
label_index = 0
for k in kernels:
label = str(labels[label_index])
label_index += 1
subdir_index = label_index
kernel_index = k
if (label_index == 1 and linuxrootfs_index) or not userdata_index:
root_index = linuxrootfs_index
else:
root_index = userdata_index
startup_file = os.path.join(TMP_MBOOT, 'STARTUP_%d' % subdir_index)
try:
with open(startup_file, 'w') as f:
line = BLINE % {'TFTPD': TFTPD,
'LABELINDEX': label,
'ROOTINDEX': root_index,
'SUBDIRINDEX': subdir_index,
'KERNELINDEX': kernel_index,
'BOXMODE': BOXMODE}
f.writelines([line])
except:
print 'Error: couldn\'t create file %s!' % startup_file
sys.exit(E_CREATESTARTUP)
startup_file = os.path.join(TMP_MBOOT, 'STARTUP')
startup_1_file = os.path.join(TMP_MBOOT, 'STARTUP_1')
try:
error = os.system('cp %s %s' % (startup_1_file, startup_file))
except:
error = -1
if error:
print 'Error: couldn\'t create file %s!' % startup_file
sys.exit(E_CREATESTARTUP)
try:
error = os.system('sync && umount %s' % TMP_MBOOT)
except:
error = -1
if error:
print 'Error: couldn\'t unmount %s!' % TMP_MBOOT
sys.exit(E_UMOUNT)
def le32(x):
x4 = chr((x & 0xff000000) >> 24)
x3 = chr((x & 0x00ff0000) >> 16)
x2 = chr((x & 0x0000ff00) >> 8)
x1 = chr((x & 0x000000ff))
return x1+x2+x3+x4
def lba_block(first_lba, s):
next_lba = first_lba+s*2048
last_lba = next_lba-1
return (le32(first_lba & 0xffffffff)
+le32((first_lba>>32&0xffffffff))
+le32(last_lba & 0xffffffff)
+le32((last_lba>>32)&0xffffffff)), next_lba
def label_block(label):
if len(label) > 36:
print 'Warning: label %s too big, truncating to 36 characters!' % label
ulabel = label[:36].encode('utf-16')[2:]
pad = 72-len(ulabel)
return ulabel+pad*'\x00'
def fix_gpt_crc(gpt):
pte_crc = zlib.crc32(gpt[0x400:0x4400])
if pte_crc < 0:
pte_crc = 0x100000000+pte_crc
pc4 = chr((pte_crc & 0xff000000) >> 24)
pc3 = chr((pte_crc & 0x00ff0000) >> 16)
pc2 = chr((pte_crc & 0x0000ff00) >> 8)
pc1 = chr((pte_crc & 0x000000ff))
gpt = gpt[:0x258]+pc1+pc2+pc3+pc4+gpt[0x25c:]
gpt = gpt[:0x210]+ZERO_4+gpt[0x214:]
head_crc = zlib.crc32(gpt[0x200:0x25c])
if head_crc < 0:
head_crc = 0x100000000+head_crc
hc4 = chr((head_crc & 0xff000000) >> 24)
hc3 = chr((head_crc & 0x00ff0000) >> 16)
hc2 = chr((head_crc & 0x0000ff00) >> 8)
hc1 = chr((head_crc & 0x000000ff))
return gpt[:0x210]+hc1+hc2+hc3+hc4+gpt[0x214:]
def create_gpt():
global FILES
try:
with open(TMP_GPT, 'rb') as f:
gpt = f.read(0x400)
with open(NEW_GPT, 'wb') as f:
previous_first_lba = 0
first_lba = 2048
index = 0
for l, s in GPT:
if 'recovery' == l or RECO_KLABEL == l or 'linuxrootfs' in l or 'userdata' == l or 'linuxkernel' in l:
guid = LINUX_GUID
if 'boot' == l:
guid = BOOT_GUID
if 'swap' == l:
guid = SWAP_GUID
if not RECO_KLABEL == l or PKRECO:
previous_first_lba = first_lba
print '>>> %s %d %d %d' % (l, s, previous_first_lba, first_lba)
lba, first_lba = lba_block(previous_first_lba, s)
print '<<< %s %d %d %d' % (l, s, previous_first_lba, first_lba)
if first_lba > MAX_LBA:
print 'Error: your partition layout exceeds the available space!'
sys.exit(E_EXCEEDLBA)
label = label_block(l)
index += 1
gpt += (guid+UNIQUE_GUID+chr(index)+lba+ZERO_8+label)
pad = 2048*512-len(gpt)
gpt += (pad*'\x00')
f.write(fix_gpt_crc(gpt))
except:
print 'Error: can\'t generate new GPT!'
sys.exit(E_CREATEGPT)
FILES.insert(0, NEW_GPT)
def create_image():
sources = ''
for f in FILES:
sources += ' %s' % f
try:
error = os.system('cat %s > %s' % (sources, NEW_IMG))
except:
error = -1
if error:
print 'Error: can\'t generate new image file %s!'
sys.exit(E_CREATEIMAGE)
###
if len(sys.argv) < 3:
print 'I need a path to disk.img and a partition layout string!'
print 'Optionally, you can also indicate the TFTP server address'
sys.exit(E_ARGTOOFEW)
if len(sys.argv) > 4:
print 'Too many arguments!'
sys.exit(E_ARGTOOMANY)
if len(sys.argv) > 3:
TFTPD=sys.argv[3]
IMG = os.path.join(sys.argv[1], IMG)
create_party()
get_boxmode()
partition(sys.argv[2])
if not PBOOT:
print 'Error: boot partition is required!'
sys.exit(E_NOBOOT)
if not KNUM:
print 'Error: at least 1 kernel partition is required!'
sys.exit(E_NOKERNEL)
if not FSNUM:
print 'Error: at least 1 linuxrootfs or userdata partition is required!'
sys.exit(E_NOFS)
if len(NEW_GPT) > 128:
print 'Error: too many partitions, max is 128!'
sys.exit(E_TOOMANY)
print GPT
print FILES
create_startup()
create_gpt()
create_image()
print 'Ok'
sys.exit(OK)
|
from django.urls import path
from . import views
urlpatterns = [
#localhost:8000/Sumar
path('',views.index,name='El saludo'),
#sumando
path('<int:numero1>/<int:numero2>',views.Sumando,name='sumando'),
] |
# Copyright (c) 2021, VRAI Labs and/or its affiliates. All rights reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License") as published by the Apache Software Foundation.
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, Union, List
from supertokens_python.async_to_sync_wrapper import sync
from ..types import User
def create_email_verification_token(user_id: str, user_context: Union[None, Dict[str, Any]] = None):
from supertokens_python.recipe.thirdparty.asyncio import \
create_email_verification_token
return sync(create_email_verification_token(user_id, user_context))
def verify_email_using_token(token: str, user_context: Union[None, Dict[str, Any]] = None):
from supertokens_python.recipe.thirdparty.asyncio import \
verify_email_using_token
return sync(verify_email_using_token(token, user_context))
def is_email_verified(user_id: str, user_context: Union[None, Dict[str, Any]] = None):
from supertokens_python.recipe.thirdparty.asyncio import is_email_verified
return sync(is_email_verified(user_id, user_context))
def unverify_email(user_id: str, user_context: Union[None, Dict[str, Any]] = None):
from supertokens_python.recipe.thirdparty.asyncio import unverify_email
return sync(unverify_email(user_id, user_context))
async def revoke_email_verification_tokens(user_id: str, user_context: Union[None, Dict[str, Any]] = None):
from supertokens_python.recipe.thirdparty.asyncio import \
revoke_email_verification_tokens
return sync(revoke_email_verification_tokens(user_id, user_context))
def get_user_by_id(user_id: str, user_context: Union[None, Dict[str, Any]] = None) -> Union[User, None]:
from supertokens_python.recipe.thirdparty.asyncio import get_user_by_id
return sync(get_user_by_id(user_id, user_context))
async def get_users_by_email(email: str, user_context: Union[None, Dict[str, Any]] = None) -> List[User]:
from supertokens_python.recipe.thirdparty.asyncio import get_users_by_email
return sync(get_users_by_email(email, user_context))
def get_user_by_third_party_info(
third_party_id: str, third_party_user_id: str, user_context: Union[None, Dict[str, Any]] = None):
from supertokens_python.recipe.thirdparty.asyncio import \
get_user_by_third_party_info
return sync(get_user_by_third_party_info(
third_party_id, third_party_user_id, user_context))
def sign_in_up(third_party_id: str, third_party_user_id: str,
email: str, email_verified: bool, user_context: Union[None, Dict[str, Any]] = None):
from supertokens_python.recipe.thirdparty.asyncio import sign_in_up
return sync(sign_in_up(third_party_id,
third_party_user_id, email, email_verified, user_context))
|
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import cv2
import numpy as np
def draw_keypoints(kps, verbose=False):
lines = [(0,1),(1,2),(2,6),(6,3),(3,4),(4,5),(6,7),(7,8),(8,9),(10,11),(11,12),(12,7),(7,13),(13,14),(14,15)]
pose_dict = {0: 'right_ankle',
1: 'right_knee',
2: 'right_hip',
3: 'left_hip',
4: 'left_knee',
5: 'left_ankle',
6: 'pelvis',
7: 'thorax',
8: 'upper_neck',
9: 'head_top',
10: 'right_wrist',
11: 'right_elbow',
12: 'right_shoulder',
13: 'left_shoulder',
14: 'left_elbow',
15: 'left_wrist'}
joints = []
for i in range(len(kps)):
joint = kps[i]
joint_x = joint[0]*(-1)
joint_y = joint[1]*(-1)
#if (joint_x > 0):
plt.scatter(joint_x, joint_y, s=10, c='red', marker='o', label=i)
if (verbose):
plt.annotate(pose_dict[i], (joint_x, joint_y))
for l in lines:
j1 = kps[l[0]]
j2 = kps[l[1]]
#if (j1[0] > 0 and j2[0] > 0):
x = [j1[0]*(-1), j2[0]*(-1)]
y = [j1[1]*(-1), j2[1]*(-1)]
plt.plot(x, y)
plt.show()
|
from dictfetcher import DictFetcherMixin
d = {
"a": {
"b":{
"abc": "what",
"aba": "the",
"aa": "kkk",
"a": "lala"
},
"a": "somevalue",
"c": {
"x.y": "except",
"x": "bug",
"y": "ssss"
},
"g": "deep",
},
"b": "to",
"c": {
"d": {
"f":{
"g": "1234"
}
}
}
}
class DictFetcher(dict, DictFetcherMixin):
pass
df = DictFetcher(d)
def test_simple_dot_expression():
assert df["c.d.f.g"]["c.d.f.g"] == "1234"
def test_simple_dot_expty_expression():
assert df["c.a.f.g"] == {}
def test_simple_star_expression():
assert is_dict_same(df["a.b.ab*"], {"a.b.abc": "what", "a.b.aba": "the"})
def test_question_mark_expression():
assert is_dict_same(df["a.?.x"], {"a.c.x": "bug"})
def test_char_choose_expression():
assert is_dict_same(df["a.b.ab[abc]"], {"a.b.abc": "what", "a.b.aba": "the"})
assert is_dict_same(df["a.b.ab[ab]"], {"a.b.aba": "the"})
def test_key_has_dot_expression():
assert is_dict_same(df["a.c.x.y"], {})
def test_list_expression():
assert is_dict_same(df[("a", "c", "x.y")], {"a.c.x.y": "except"})
def test_empty_result():
assert df["not"] == {}
def is_dict_same(d1, d2):
return sorted(d1) == sorted(d2)
|
from libra_client.canoser import Struct, RustEnum
from libra_client.lbrtypes.ledger_info import LedgerInfoWithSignatures
from libra_client.lbrtypes.transaction import TransactionWithProof, TransactionListWithProof
from libra_client.lbrtypes.account_state_blob import AccountStateWithProof
from libra_client.lbrtypes.contract_event import EventWithProof
from libra_client.lbrtypes.epoch_change import EpochChangeProof
from libra_client.lbrtypes.proof.definition import AccumulatorConsistencyProof
from libra_client.lbrtypes.trusted_state import TrustedState
class GetAccountTransactionBySequenceNumber(Struct):
_fields = [
("transaction_with_proof", TransactionWithProof),
("proof_of_current_sequence_number", AccountStateWithProof)
]
class GetAccountState(Struct):
_fields = [
("account_state_with_proof", AccountStateWithProof)
]
@classmethod
def from_proto(cls, proto):
ret = cls()
ret.account_state_with_proof = AccountStateWithProof.from_proto(proto.account_state_with_proof)
return ret
class GetEventsByEventAccessPath(Struct):
_fields = [
("events_with_proof", [EventWithProof]),
("proof_of_latest_event", AccountStateWithProof)
]
class GetTransactions(Struct):
_fields = [
("txn_list_with_proof", TransactionListWithProof)
]
class ResponseItem(RustEnum):
_enums = [
("get_account_transaction_by_sequence_number_response", GetAccountTransactionBySequenceNumber),
("get_account_state_response", GetAccountState),
("get_events_by_event_access_path_response", GetEventsByEventAccessPath),
("get_transactions_response", GetTransactions)
]
@classmethod
def from_proto(cls, proto):
if proto.HasField("get_account_transaction_by_sequence_number_response"):
return ResponseItem("get_account_transaction_by_sequence_number_response", GetAccountTransactionBySequenceNumber.from_proto(proto.get_account_transaction_by_sequence_number_response))
if proto.HasField("get_account_state_response"):
return ResponseItem("get_account_state_response", GetAccountState.from_proto(proto.get_account_state_response))
if proto.HasField("get_events_by_event_access_path_response"):
return ResponseItem("get_events_by_event_access_path_response", GetEventsByEventAccessPath.from_proto(proto.get_events_by_event_access_path_response))
if proto.HasField("get_transactions_response"):
return ResponseItem("get_transactions_response", GetTransactions.from_proto(proto.get_transactions_response))
class UpdateToLatestLedgerResponse(Struct):
_fields = [
("response_items", [ResponseItem]),
("ledger_info_with_sigs", LedgerInfoWithSignatures),
("epoch_change_proof", EpochChangeProof),
("ledger_consistency_proof", AccumulatorConsistencyProof)
]
@classmethod
def from_proto(cls, proto):
ret = cls()
ret.response_items = [ResponseItem.from_proto(item) for item in proto.response_items]
ret.ledger_info_with_sigs = LedgerInfoWithSignatures.deserialize(proto.ledger_info_with_sigs.bytes)
ret.epoch_change_proof = EpochChangeProof.from_proto(proto.epoch_change_proof)
ret.ledger_consistency_proof = AccumulatorConsistencyProof.from_proto(proto.ledger_consistency_proof)
return ret
from libra_client.lbrtypes.ledger_info import LedgerInfo
def get_ledger_info(self) -> LedgerInfo:
return self.ledger_info_with_sigs.get_ledger_info()
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Shell-Search-UriHandler
GUID : 606c6fe0-a9dc-4a9d-bdea-830aff6716e7
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=101, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_101_0(Etw):
pattern = Struct(
"Uri" / WString
)
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=102, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_102_0(Etw):
pattern = Struct(
"Uri" / WString,
"Result" / Int32ul
)
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=201, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_201_0(Etw):
pattern = Struct(
"Uri" / WString
)
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=202, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_202_0(Etw):
pattern = Struct(
"Scheme" / WString
)
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=203, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_203_0(Etw):
pattern = Struct(
"SearchView" / WString
)
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=204, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_204_0(Etw):
pattern = Struct(
"Uri" / WString,
"Result" / Int32ul
)
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=301, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_301_0(Etw):
pattern = Struct(
"Parameter" / WString
)
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=302, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_302_0(Etw):
pattern = Struct(
"Parameter" / WString,
"ParameterValue" / WString
)
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=303, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_303_0(Etw):
pattern = Struct(
"Parameter" / WString,
"ParameterValue" / WString
)
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=304, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_304_0(Etw):
pattern = Struct(
"Parameter" / WString,
"Value" / WString,
"Result" / Int32ul
)
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=305, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_305_0(Etw):
pattern = Struct(
"Parameter" / WString,
"OldValue" / WString,
"NewValue" / WString
)
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=306, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_306_0(Etw):
pattern = Struct(
"Parameter" / WString,
"Result" / Int32ul
)
@declare(guid=guid("606c6fe0-a9dc-4a9d-bdea-830aff6716e7"), event_id=402, version=0)
class Microsoft_Windows_Shell_Search_UriHandler_402_0(Etw):
pattern = Struct(
"Result" / Int32ul
)
|
import json
from channels.generic.websocket import WebsocketConsumer
from asgiref.sync import async_to_sync
from backend.views import check_is_admin
from backend.models import Event
class ImportConsumer(WebsocketConsumer):
def connect(self):
user = self.scope["user"]
event_id = self.scope['url_route']['kwargs']['event_id']
try:
event = Event.objects.get(pk=event_id)
except Exception:
self.close()
if not check_is_admin(user, event):
self.close()
self.group_name = 'event_%s_import' % event_id
# Join group
async_to_sync(self.channel_layer.group_add)(
self.group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
# Leave group
async_to_sync(self.channel_layer.group_discard)(
self.group_name,
self.channel_name
)
# Receive message from WebSocket
# def receive(self, text_data):
# text_data_json = json.loads(text_data)
# message = text_data_json['message']
# # Send message to room group
# async_to_sync(self.channel_layer.group_send)(
# self.group_name,
# {
# 'type': 'import_message',
# 'message': self.user.real_name + ': ' + message
# }
# )
# Receive message from room group
def import_message(self, event):
message = event['message']
# Send message to WebSocket
self.send(text_data=json.dumps(message)) |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .data_controller import *
from .get_data_controller import *
from .get_postgres_instance import *
from .get_sql_managed_instance import *
from .get_sql_server import *
from .get_sql_server_instance import *
from .get_sql_server_registration import *
from .postgres_instance import *
from .sql_managed_instance import *
from .sql_server import *
from .sql_server_instance import *
from .sql_server_registration import *
from ._inputs import *
from . import outputs
|
"""
Check id() contra cmp()
"""
import support
try:
a={[1,2]:3}
if not a.has_key([1,2]):
raise support.TestError("Lists hash inconsistently")
except TypeError, e:
pass
else:
raise support.TestError("Should raise a TypeError")
|
import numpy as np
import open3d as o3d
import time
from utils.utils import prepare_dataset, draw_registration_result
def ransac_registration(source_down: o3d.geometry.PointCloud, target_down: o3d.geometry.PointCloud,
source_fpfh: o3d.pipelines.registration.Feature,
target_fpfh: o3d.pipelines.registration.Feature, voxel_size: np.float64) -> \
o3d.pipelines.registration.RegistrationResult:
'''
RANSACConvergenceCriteria - определяет критерий сходимости. RANSAC останавливается
если кол-во итераций достигает max_iteration, или проверка прошла max_validation
раз. Проверка - самая вычислительно затратная операция; важна для времени работы
алгоритма.
'''
distance_threshold = voxel_size * 1.5
print(":: RANSAC registration on downsampled point clouds.")
print(" Since the downsampling voxel size is %.3f," % voxel_size)
print(" we use a liberal distance threshold %.3f." % distance_threshold)
result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
o3d.pipelines.registration.TransformationEstimationPointToPlane(),
20, [
o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(
2),
o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(
voxel_size) # (distance_threshold)
], o3d.pipelines.registration.RANSACConvergenceCriteria(4000000, 500)) # (4000000, 500)
return result
def fast_ransac_registration(source_down: o3d.geometry.PointCloud,
target_down: o3d.geometry.PointCloud,
source_fpfh: o3d.pipelines.registration.Feature,
target_fpfh: o3d.pipelines.registration.Feature,
voxel_size: np.float64) -> \
o3d.pipelines.registration.RegistrationResult:
distance_threshold = voxel_size * 1.5
print(":: Apply fast global registration with distance threshold %.3f" \
% distance_threshold)
result = o3d.pipelines.registration.registration_fast_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh,
o3d.pipelines.registration.FastGlobalRegistrationOption(
maximum_correspondence_distance=distance_threshold
)
)
return result
def icp_point_to_point_registration(source, target, voxel_size, result_ransac):
distance_threshold = voxel_size * 0.4
print(":: Point-to-plane ICP registration is applied on original point")
print(" clouds to refine the alignment. This time we use a strict")
print(" distance threshold %.3f." % distance_threshold)
result = o3d.pipelines.registration.registration_icp(
source, target, distance_threshold, result_ransac.transformation,
o3d.pipelines.registration.TransformationEstimationPointToPlane())
return result
def demo_ransac_registration():
print("Demo for manual RANSAC")
voxel_size = np.float64(0.2)
# отрисовка 1
source, target, source_down, target_down, source_fpfh, target_fpfh, source_s = prepare_dataset(voxel_size)
source.paint_uniform_color([1, 0.706, 0])
target.paint_uniform_color([0, 0.651, 0.929])
start = time.time()
# result_fast = fast_ransac_registration(source_down, target_down,
# source_fpfh, target_fpfh,
# voxel_size)
# print("Fast global registration took %.3f sec.\n" % (time.time() - start))
# print(result_fast)
# draw_registration_result(source_down, target_down, result_fast.transformation)
print("3. RANSAC registration.")
result_ransac = ransac_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size)
print("::RANSAC registration took %.3f sec.\n" % (time.time() - start))
print(result_ransac, '\n')
# отрисовка 2
draw_registration_result(source_down, target_down, result_ransac.transformation)
result_icp = icp_point_to_point_registration(source, target, voxel_size, result_ransac)
print(result_icp)
# отрисовка 3
draw_registration_result(source_s, target, result_icp.transformation)
if __name__ == "__main__":
demo_ransac_registration()
|
# Copyright 2019 Regents of the University of Minnesota.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
import subprocess
import grpc
import pytest
def pytest_configure(config):
config.addinivalue_line(
"markers", "consul"
)
config.addinivalue_line(
"markers", "integration"
)
def pytest_addoption(parser):
parser.addoption(
"--consul", action="store_true", default=False,
help="runs integration tests that require consul",
)
parser.addoption(
"--integration", action="store_true", default=False, help="runs integration tests"
)
def pytest_collection_modifyitems(config, items):
if not config.getoption("--consul"):
skip_consul = pytest.mark.skip(reason="need --consul option to run")
for item in items:
if "consul" in item.keywords:
item.add_marker(skip_consul)
if not config.getoption("--integration"):
skip_integration = pytest.mark.skip("need --integration option to run")
for item in items:
if "integration" in item.keywords:
item.add_marker(skip_integration)
@pytest.fixture(name='processor_watcher')
def fixture_processor_watcher():
def func(address, process):
try:
if process.returncode is not None:
raise ValueError('subprocess terminated')
with grpc.insecure_channel(address, [('grpc.enable_http_proxy', False)]) as channel:
future = grpc.channel_ready_future(channel)
future.result(timeout=20)
yield address
finally:
process.send_signal(signal.SIGINT)
try:
stdout, _ = process.communicate(timeout=1)
print("processor exited with code: ", process.returncode)
print(stdout.decode('utf-8'))
except subprocess.TimeoutExpired:
print("timed out waiting for processor to terminate")
return func
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import argparse
import time
import cv2
import torch
from utils.config import opt
from imageio import imread
from model.rpn.bbox_transform import clip_boxes
from utils.dataset import CusDataset
from torchvision.ops import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import VGG16
from utils import torch_utils
# specify visible GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/vgg16.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='vgg16', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models',
default="output")
parser.add_argument('--image_dir', dest='image_dir',
help='directory to load images for demo',
default="samples")
parser.add_argument('--save_dir', dest='save_dir',
help='directory to load images for demo',
default="images")
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=0, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=5010, type=int)
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
parser.add_argument('--webcam_num', dest='webcam_num',
help='webcam ID number',
default=-1, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
device = torch_utils.select_device()
print('Called with args:')
print(args)
input_dir = args.load_dir + "/" + args.net
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir, 'faster_rcnn_{}_{}.pth'.format(args.net, args.checkepoch))
load_name = 'E:/condaDev/faster_rcnn_1_20_5010.pth'
pascal_classes = np.asarray(['__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor'])
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = VGG16(pascal_classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(pascal_classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(pascal_classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(pascal_classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
fasterRCNN.create_architecture()
print("load checkpoint {}".format(load_name))
try: # GPU
checkpoint = torch.load(load_name)
except: # CPU
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
pooling_mode = checkpoint['pooling_mode']
print('load model successfully!')
data_set = CusDataset()
data_loader = torch.utils.data.DataLoader(data_set, batch_size=1, num_workers=0)
print('Loaded Photo: {} images.'.format(len(data_loader)))
fasterRCNN.to(device)
fasterRCNN.eval()
start = time.time()
max_per_image = 100
thresh = 0.05
for step, (im, im_data, im_info, gt_boxes, num_boxes, im_scales, pth) in enumerate(data_loader):
im_data, im_info, gt_boxes, num_boxes, im_scales = im_data.to(device), im_info.to(device), \
gt_boxes.to(device), num_boxes.to(device), im_scales.to(device)
# pdb.set_trace()
det_tic = time.time()
rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_box, RCNN_loss_cls, RCNN_loss_bbox, rois_label\
= fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(opt.bbox_normalize_stds).to(device) \
+ torch.FloatTensor(opt.bbox_normalize_means).to(device)
box_deltas = box_deltas.view(1, -1, 4 * len(pascal_classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
pred_boxes /= im_scales[0]
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
im2show = np.copy(im)
for j in range(1, len(pascal_classes)):
inds = torch.nonzero(scores[:, j] > thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = scores[:, j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = pred_boxes[inds, :]
else:
cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
cls_dets = cls_dets[order]
keep = nms(cls_boxes[order, :], cls_scores[order], opt.TEST_NMS)
cls_dets = cls_dets[keep.view(-1).long()]
im2show = vis_detections(im2show, pascal_classes[j], cls_dets.cpu().numpy(), 0.5)
misc_toc = time.time()
nms_time = misc_toc - misc_tic
print('im_detect: {:d}/{:d} detect_time:{:.3f}s nms_time:{:.3f}s'
.format(step + 1, len(data_loader), detect_time, nms_time))
result_path = os.path.join(args.save_dir, pth[0])
cv2.imwrite(result_path, im2show)
# very slow, ~10s per image |
'''Read and write PCM 16 bits files.'''
import numpy as np
# open raw PCM audio file, return float array
def read_raw_pcm_16bits(filename):
fd = open(filename, 'rb')
data = np.fromfile(file = fd, dtype = np.int16)
fd.close()
return np.array(data, dtype=float)
# write a raw PCM audio file
def write_raw_pcm_16bits(filename, data):
fd = open(filename, 'wb')
towrite = np.array(data, dtype=np.int16)
towrite.tofile(fd)
fd.close()
|
''' Handling the data io '''
import argparse
import torch
import transformer.Constants as Constants
def read_instances_from_file(inst_file, max_sent_len, keep_case):
''' Convert file into word seq lists and vocab '''
word_insts = []
trimmed_sent_count = 0
with open(inst_file) as f:
for sent in f:
if not keep_case:
sent = sent.lower()
words = sent.split()
if len(words) > max_sent_len:
trimmed_sent_count += 1
word_inst = words[:max_sent_len]
if word_inst:
word_insts += [[Constants.BOS_WORD] + word_inst + [Constants.EOS_WORD]]
else:
word_insts += [None]
print('[Info] Get {} instances from {}'.format(len(word_insts), inst_file))
if trimmed_sent_count > 0:
print('[Warning] {} instances are trimmed to the max sentence length {}.'
.format(trimmed_sent_count, max_sent_len))
return word_insts
def build_vocab_idx(word_insts, min_word_count):
''' Trim vocab by number of occurence '''
full_vocab = set(w for sent in word_insts for w in sent)
print('[Info] Original Vocabulary size =', len(full_vocab))
word2idx = {
Constants.BOS_WORD: Constants.BOS,
Constants.EOS_WORD: Constants.EOS,
Constants.PAD_WORD: Constants.PAD,
Constants.UNK_WORD: Constants.UNK}
word_count = {w: 0 for w in full_vocab}
for sent in word_insts:
for word in sent:
word_count[word] += 1
ignored_word_count = 0
for word, count in word_count.items():
if word not in word2idx:
if count > min_word_count:
word2idx[word] = len(word2idx)
else:
ignored_word_count += 1
print('[Info] Trimmed vocabulary size = {},'.format(len(word2idx)),
'each with minimum occurrence = {}'.format(min_word_count))
print("[Info] Ignored word count = {}".format(ignored_word_count))
return word2idx
def convert_instance_to_idx_seq(word_insts, word2idx):
''' Mapping words to idx sequence. '''
return [[word2idx.get(w, Constants.UNK) for w in s] for s in word_insts]
def main():
''' Main function '''
parser = argparse.ArgumentParser()
parser.add_argument('-train_src', required=True)
parser.add_argument('-train_tgt', required=True)
parser.add_argument('-valid_src', required=True)
parser.add_argument('-valid_tgt', required=True)
parser.add_argument('-save_data', required=True)
parser.add_argument('-max_len', '--max_word_seq_len', type=int, default=50)
parser.add_argument('-min_word_count', type=int, default=5)
parser.add_argument('-keep_case', action='store_true')
parser.add_argument('-share_vocab', action='store_true')
parser.add_argument('-vocab', default=None)
opt = parser.parse_args()
opt.max_token_seq_len = opt.max_word_seq_len + 2 # include the <s> and </s>
# Training set
train_src_word_insts = read_instances_from_file(
opt.train_src, opt.max_word_seq_len, opt.keep_case)
train_tgt_word_insts = read_instances_from_file(
opt.train_tgt, opt.max_word_seq_len, opt.keep_case)
if len(train_src_word_insts) != len(train_tgt_word_insts):
print('[Warning] The training instance count is not equal.')
min_inst_count = min(len(train_src_word_insts), len(train_tgt_word_insts))
train_src_word_insts = train_src_word_insts[:min_inst_count]
train_tgt_word_insts = train_tgt_word_insts[:min_inst_count]
#- Remove empty instances
train_src_word_insts, train_tgt_word_insts = list(zip(*[
(s, t) for s, t in zip(train_src_word_insts, train_tgt_word_insts) if s and t]))
# Validation set
valid_src_word_insts = read_instances_from_file(
opt.valid_src, opt.max_word_seq_len, opt.keep_case)
valid_tgt_word_insts = read_instances_from_file(
opt.valid_tgt, opt.max_word_seq_len, opt.keep_case)
if len(valid_src_word_insts) != len(valid_tgt_word_insts):
print('[Warning] The validation instance count is not equal.')
min_inst_count = min(len(valid_src_word_insts), len(valid_tgt_word_insts))
valid_src_word_insts = valid_src_word_insts[:min_inst_count]
valid_tgt_word_insts = valid_tgt_word_insts[:min_inst_count]
#- Remove empty instances
valid_src_word_insts, valid_tgt_word_insts = list(zip(*[
(s, t) for s, t in zip(valid_src_word_insts, valid_tgt_word_insts) if s and t]))
# Build vocabulary
if opt.vocab:
predefined_data = torch.load(opt.vocab)
assert 'dict' in predefined_data
print('[Info] Pre-defined vocabulary found.')
src_word2idx = predefined_data['dict']['src']
tgt_word2idx = predefined_data['dict']['tgt']
else:
if opt.share_vocab:
print('[Info] Build shared vocabulary for source and target.')
word2idx = build_vocab_idx(
train_src_word_insts + train_tgt_word_insts, opt.min_word_count)
src_word2idx = tgt_word2idx = word2idx
else:
print('[Info] Build vocabulary for source.')
src_word2idx = build_vocab_idx(train_src_word_insts, opt.min_word_count)
print('[Info] Build vocabulary for target.')
tgt_word2idx = build_vocab_idx(train_tgt_word_insts, opt.min_word_count)
# word to index
print('[Info] Convert source word instances into sequences of word index.')
train_src_insts = convert_instance_to_idx_seq(train_src_word_insts, src_word2idx)
valid_src_insts = convert_instance_to_idx_seq(valid_src_word_insts, src_word2idx)
print('[Info] Convert target word instances into sequences of word index.')
train_tgt_insts = convert_instance_to_idx_seq(train_tgt_word_insts, tgt_word2idx)
valid_tgt_insts = convert_instance_to_idx_seq(valid_tgt_word_insts, tgt_word2idx)
data = {
'settings': opt,
'dict': {
'src': src_word2idx,
'tgt': tgt_word2idx},
'train': {
'src': train_src_insts,
'tgt': train_tgt_insts},
'valid': {
'src': valid_src_insts,
'tgt': valid_tgt_insts}}
print('[Info] Dumping the processed data to pickle file', opt.save_data)
torch.save(data, opt.save_data)
print('[Info] Finish.')
if __name__ == '__main__':
main()
|
import tensorflow as tf
import numpy as np
class TextCNN(object):
"""
CNN 2-chanels model for text classification.
"""
def __init__(self, sentence_len, vocab_size, embedding_size, num_classes,
static_embedding_filter, filter_sizes, num_filters, l2_reg_lambda = 0.0):
# Initialize placeholder
self.inputs = tf.placeholder(tf.int32, [None, sentence_len], name = "inputs")
self.labels = tf.placeholder(tf.float32, [None, num_classes], name = "labels")
self.dropout_keep_prob = tf.placeholder(tf.float32, name = "dropout_keep_prop")
# Embedding words in to vectors 1x300 with both static and nonsatic filters
with tf.device("/cpu:0"), tf.name_scope("embedding"):
# Initialize nonstatic filters
nonstatic_embedding_filter = tf.get_variable("nonstatic_filter", initializer = static_embedding_filter)
self.nonstatic_embedding = tf.nn.embedding_lookup(nonstatic_embedding_filter, self.inputs)
self.static_embedding = tf.nn.embedding_lookup(static_embedding_filter, self.inputs)
self.embedded_sentences = tf.concat([self.nonstatic_embedding, self.static_embedding], -1)
self.embedded_layer = tf.reshape(self.embedded_sentences, [-1, sentence_len, embedding_size, 2])
# Convolutional and maxpooling layers
pooled_layer = []
for filter_size in filter_sizes:
filter_shape = [filter_size, embedding_size, 2, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name = "weights")
b = tf.Variable(tf.constant(0.1, shape = [num_filters]), name = "bias")
# Convolutional layer
conv = tf.nn.conv2d(
self.embedded_layer,
W,
strides = [1, 1, 1, 1],
padding = "VALID",
name = "conv")
# Add bias and apply rectifier linear unit function
h = tf.nn.relu(tf.nn.bias_add(conv, b), name = "relu")
# Maxpooling layer
pooled = tf.nn.max_pool(
h,
ksize = [1, sentence_len - filter_size + 1, 1, 1],
strides = [1, 1, 1, 1],
padding = "VALID",
name = "pooled")
pooled_layer.append(pooled)
total_num_filters = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_layer, 3, name = "h_pool")
self.h_flat = tf.reshape(self.h_pool, [-1 , total_num_filters], name = "h_flat")
# Add dropout
self.h_drop = tf.nn.dropout(self.h_flat, self.dropout_keep_prob, name = "h_drop")
l2_loss = 0.0
# Outputs layer
with tf.name_scope("outputs"):
W = tf.get_variable("weights", shape = [total_num_filters, num_classes],
initializer = tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape = [num_classes]), name = "bias")
self.scores = tf.nn.xw_plus_b(self.h_flat, W, b, name = "scores")
self.predictions = tf.argmax(self.scores, 1)
l2_loss += tf.nn.l2_loss(W) + tf.nn.l2_loss(b)
# Compute loss
losses = tf.nn.softmax_cross_entropy_with_logits(labels = self.labels, logits = self.scores)
self.loss = tf.add(tf.reduce_mean(losses), l2_loss * l2_reg_lambda, name = "loss")
# Compute accuracy
correct_predictions = tf.equal(self.predictions, tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name = "accuracy") |
from email.mime import image
from django.shortcuts import render
from django.http import HttpResponse
from .models import Images, Location
from images.models import Images
# Create your views here.
def home(request):
images = Images.objects.all()
location_list = Location.objects.all()
return render(request, 'index.html', {"images":images, "location_list": location_list})
def search_category(request):
location_list = Location.objects.all()
if 'image' in request.GET and request.GET['image']:
search_category = request.GET.get('image')
searched_category = Images.search_image_by_category(search_category)
message = f"{search_category}"
return render(request, 'search.html', {"message": message, "images": searched_category, "location_list":location_list})
else:
message = "You haven't searched for any catrgory"
return render(request, 'search.html', {"message": message, "location_list":location_list})
def filter_by_location(request, location):
images = Images.filter_by_location(location)
location_list = Location.objects.all()
return render(request, 'location.html', {"images":images, "location_list":location_list})
|
"""route.py
Linux parsers for the following commands:
* route
"""
# python
import re
# metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Optional
# =======================================================
# Schema for 'route'
# =======================================================
class RouteSchema(MetaParser):
"""Schema for route"""
# Destination Gateway Genmask Flags Metric Ref Use Iface
# 0.0.0.0 192.168.1.1 0.0.0.0 UG 0 0 0 wlo1
schema = {
Any(): {
'destination': str,
'gateway': str,
'mask': str,
'flags': str,
Optional('metric'): int,
Optional('ref'): int,
Optional('use'): int,
Optional('mss'): int,
Optional('window'): int,
Optional('irtt'): int,
'interface': str
}
}
schema = {
'routes': {
Any(): { # 'destination'
'mask': {
Any(): {
'nexthop': {
Any(): { # index: 1, 2, 3, etc
'interface': str,
'flags': str,
'gateway': str,
'metric': int,
'ref': int,
'use': int
}
}
}
}
}
}
}
# =======================================================
# Parser for 'route'
# =======================================================
class Route(RouteSchema):
"""Parser for
* route
* route -4 -n
* route -4n
* route -n4
* route -n -4
"""
cli_command = ['route', 'route {flag}']
def cli(self, flag=None, output=None):
if output is None:
cmd = self.cli_command[0]
if flag in ['-4 -n', '-4n', '-n4']:
command = self.cli_command[1].replace('{flag}', flag)
out = self.device.execute(cmd)
else:
out = output
# Destination Gateway Genmask Flags Metric Ref Use Iface
# 192.168.1.0 0.0.0.0 255.255.255.0 U 600 0 0 wlo1
p1 = re.compile(r'(?P<destination>[a-z0-9\.\:]+)'
' +(?P<gateway>[a-z0-9\.\:_]+)'
' +(?P<mask>[a-z0-9\.\:]+)'
' +(?P<flags>[a-zA-Z]+)'
' +(?P<metric>(\d+))'
' +(?P<ref>(\d+))'
' +(?P<use>(\d+))'
' +(?P<interface>\S+)'
)
# Initializes the Python dictionary variable
parsed_dict = {}
# Defines the "for" loop, to pattern match each line of output
for line in out.splitlines():
line = line.strip()
# 192.168.1.0 0.0.0.0 255.255.255.0 U 600 0 0 wlo1
m = p1.match(line)
if m:
if 'routes' not in parsed_dict:
parsed_dict.setdefault('routes', {})
group = m.groupdict()
destination = group['destination']
mask = group['mask']
index_dict = {}
for str_k in ['interface', 'flags', 'gateway']:
index_dict[str_k] = group[str_k]
for int_k in ['metric', 'ref', 'use']:
index_dict[int_k] = int(group[int_k])
if destination in parsed_dict['routes']:
if mask in parsed_dict['routes'][destination]['mask']:
parsed_dict['routes'][destination]['mask'][mask].\
setdefault('nexthop', {index+1: index_dict})
else:
index = 1
parsed_dict['routes'][destination]['mask'].\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
else:
index = 1
parsed_dict['routes'].setdefault(destination, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
continue
return parsed_dict
# =======================================================
# Parser for 'netstat -rn'
# =======================================================
class ShowNetworkStatusRoute(Route, RouteSchema):
"""Parser for
* netstat -rn
"""
cli_command = ['netstat -rn']
def cli(self, output=None):
if output is None:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
return super().cli(output=out)
|
"""
Main File for Program
"""
from libs.url_utils import send_msg_url as s_url
from libs.wsp import WebWSP as Wsp
from os.path import join as os_join
from os import getcwd
def sing_msg_multi_num(numb_path,
msg_path):
with open(numb_path) as numbers:
with open(msg_path) as msg_chunks:
msg = msg_chunks.readlines()
m = "\n".join(msg)
browser = Wsp(
driver_path=os_join(
getcwd(),
'chromedriver'
)
)
browser.pressEnter(
[
s_url(
n,
m
) for n in numbers
]
)
def multi_msg_multi_num(number_path, msg_path):
wsp = Wsp(driver_path=os_join(getcwd(), 'chromedriver'))
with open(number_path) as numbers:
with open(msg_path) as messages:
data = list(map(lambda x: s_url(x[0], x[1]), zip([n.strip() for n in numbers], messages.read().split(';'))))
# print(data)
wsp.pressEnter(data)
if __name__ == '__main__':
from time import time
t_start = time()
multi_msg_multi_num('tmp/Fonos_Corte 200121.txt', 'tmp/Mensaje Corte 200121.txt')
total = time() - t_start
if total < 60:
print(f'Elapsed Time: {total}s')
else:
print(f'Elapsed Time: {total/60}m')
|
from rest_framework import serializers
from location.models import GeoLocation
class GeoLocationSerializer(serializers.ModelSerializer):
class Meta:
model = GeoLocation
fields = ('geofile',)
@property
def data(self):
return dict(
data=self.instance.create_coordinate_file(),
filepath=self.instance.coordinate_file.url)
|
"""empty message
Revision ID: 1df82b5937b
Revises: None
Create Date: 2016-06-08 11:11:54.020641
"""
# revision identifiers, used by Alembic.
revision = '1df82b5937b'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('image',
sa.Column('image_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('md5_name', sa.String(length=32), nullable=True),
sa.Column('type', sa.String(length=40), nullable=True),
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('time', sa.DateTime(), nullable=True),
sa.Column('modify_time', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('image_id'),
sa.UniqueConstraint('md5_name')
)
op.create_table('post',
sa.Column('post_id', sa.Integer(), nullable=False),
sa.Column('zh_title', sa.String(length=200), nullable=True),
sa.Column('en_title', sa.String(length=200), nullable=True),
sa.Column('md_content', sa.String(length=9000), nullable=True),
sa.Column('html_content', sa.String(length=9000), nullable=True),
sa.Column('is_top', sa.Boolean(), nullable=True),
sa.Column('time', sa.DateTime(), nullable=True),
sa.Column('modify_time', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('post_id'),
sa.UniqueConstraint('en_title')
)
op.create_table('tag',
sa.Column('tag_id', sa.Integer(), nullable=False),
sa.Column('tag_name', sa.String(length=50), nullable=True),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('tag_id'),
sa.UniqueConstraint('tag_name')
)
op.create_table('user',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=50), nullable=True),
sa.Column('email', sa.String(length=100), nullable=True),
sa.Column('password', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('user_id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('post_tag',
sa.Column('pt_id', sa.Integer(), nullable=False),
sa.Column('tag_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['post_id'], ['post.post_id'], ),
sa.ForeignKeyConstraint(['tag_id'], ['tag.tag_id'], ),
sa.PrimaryKeyConstraint('pt_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('post_tag')
op.drop_table('user')
op.drop_table('tag')
op.drop_table('post')
op.drop_table('image')
### end Alembic commands ###
|
import numpy as np
import matplotlib.pylab as plt
import time
from IPython import display
## separating the input data to test and train
def create_test_train(finalOffense_tweet,none_tweet,ratio_to_train=0.75):
# Set random seed
np.random.seed(0)
## creating the new data matrix
full_data_mat = []
tweet_none = []
tweet_offensive = []
for i in range(0,len(finalOffense_tweet)):
tweet_none.append(none_tweet[i][0])
tweet_offensive.append(finalOffense_tweet[i][0])
## combining the arrays
full_data_mat = np.array(tweet_offensive + tweet_none)
## creating the classes array to be added into the data array
classes_array = ["offense"]*len(finalOffense_tweet)+["none"]*len(none_tweet)
## adding classes as a new column
full_data_mat = np.c_[full_data_mat, classes_array]
## separating test and train by randomization
temp = np.random.uniform(0, 1, len(full_data_mat)) <= ratio_to_train
## adding them as additional columns in data
full_data_mat = np.c_[full_data_mat, temp]
## separating test and train
# Create two new dataframes, one with the training rows, one with the test rows
train = full_data_mat[full_data_mat[:,-1]==['True']]
train = train[:,:-1] ## removing the radomised column
test = full_data_mat[full_data_mat[:,-1]==['False']]
test = test[:,:-1] ## removing the radomised column
return(test, train)
## plot static data in live mode
def static_data_live_plot(x0,y0,x1,y1,label0='offense',label1='none'):
fig = plt.figure()
ax1 = fig.add_subplot(111)
for i in range(len(x0)):
try:
ax1.scatter(x0[0:i], y0[0:i], s=10, c='red', marker="x", label=label0)
ax1.scatter(x1[0:i],y1[0:i], s=10, c='black', marker="o", label=label1)
handles, labels = ax1.get_legend_handles_labels()
handle_list, label_list = [], []
for handle, label in zip(handles, labels):
if label not in label_list:
handle_list.append(handle)
label_list.append(label)
plt.legend(handle_list, label_list)
display.display(plt.gcf())
display.clear_output(wait=True)
time.sleep(0.1)
except KeyboardInterrupt:
ax1.scatter(x0, y0, s=10, c='red', marker="x", label=label0)
ax1.scatter(x1,y1, s=10, c='black', marker="o", label=label1)
break
## voting matrix function
import operator
import pandas as pd
from sklearn.metrics import accuracy_score
def voting_matrix(test_class,RFpredictions,pre_svm,pre_nb):
THE_CLASS = list(pd.factorize(test_class)[0])
NB_class = list(pd.factorize(pre_nb)[0])
SVM_class = list([1]+list(pd.factorize(pre_svm[1:],order=False)[0]))
RF_class = list(RFpredictions)
Voting_class = list(map(operator.add, NB_class,SVM_class))
Voting_class = list(map(operator.add, Voting_class,RF_class))
for (i, item) in enumerate(Voting_class):
if item > 1:
Voting_class[i] = 1
else:
Voting_class[i] = 0
d = {"Real_Class": THE_CLASS,"NB_class":NB_class,"SVM_class":SVM_class,"RF_class":RF_class,"Voting_Class":Voting_class}
vot_mat = pd.DataFrame(data = d, columns =["Real_Class","NB_class","SVM_class","RF_class","Voting_Class"])
vot_cnf_mat = pd.crosstab(np.array(vot_mat['Real_Class']), np.array(vot_mat['Voting_Class']), rownames=['Actual class'], colnames=['Predicted class'])
## accuracy score
vot_acc_score = accuracy_score(np.array(vot_mat['Real_Class']), np.array(vot_mat['Voting_Class']))
return vot_mat,vot_cnf_mat, vot_acc_score
## reading tweet live
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy import API
import io
from tweepy.streaming import StreamListener
import json
import pickle
from HAL_RF_functions import new_tweet_RF
from BoGClassfier import BOGTweet_live
#Variables that contains the user credentials to access Twitter API
access_token = "900251987161305089-HgGGuGNtSfRCAGdMqWiRkaak42RIQ4V"
access_token_secret = "QBpZd9hlSc1qVp2SguVP4KcUotjVQVEKn0b17e7D9UyMA"
consumer_key = "c8ueHMou4lOZWRMyVxzcLNENQ"
consumer_secret = "E5AbdRPJxoNCnl8TR1XCkhDg5I749yWcCB89HFuq56iCxEpIvO"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch
from torchknickknacks import modelutils
model = torch.hub.load('pytorch/vision:v0.10.0', 'alexnet', pretrained = True)
# Register a recorder to the 4th layer of the features part of AlexNet
# Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
# and record the output of the layer during the forward pass
layer = list(model.features.named_children())[3][1]
recorder = modelutils.Recorder(layer, record_output = True, backward = False)
data = torch.rand(64, 3, 224, 224)
output = model(data)
print(recorder.recording)#tensor of shape (64, 192, 27, 27)
recorder.close()#remove the recorder
# Record input to the layer during the forward pass
recorder = modelutils.Recorder(layer, record_input = True, backward = False)
data = torch.rand(64, 3, 224, 224)
output = model(data)
print(recorder.recording)#tensor of shape (64, 64, 27, 27)
recorder.close()#remove the recorder
# Register a recorder to the 4th layer of the features part of AlexNet
# MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
# and record the output of the layer in the bacward pass
layer = list(model.features.named_children())[2][1]
# Record output to the layer during the backward pass
recorder = modelutils.Recorder(layer, record_output = True, backward = True)
data = torch.rand(64, 3, 224, 224)
output = model(data)
loss = torch.nn.CrossEntropyLoss()
labels = torch.randint(1000, (64,))#random labels just to compute a bacward pass
l = loss(output, labels)
l.backward()
print(recorder.recording[0])#tensor of shape (64, 64, 27, 27)
recorder.close()#remove the recorder
# Register a recorder to the 4th layer of the features part of AlexNet
# Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
# and record the parameters of the layer in the forward pass
layer = list(model.features.named_children())[3][1]
recorder = modelutils.Recorder(layer, record_params = True, backward = False)
data = torch.rand(64, 3, 224, 224)
output = model(data)
print(recorder.recording)#list of tensors of shape (192, 64, 5, 5) (weights) (192,) (biases)
recorder.close()#remove the recorder
# A custom function can also be passed to the recorder and perform arbitrary
# operations. In the example below, the custom function prints the kwargs that
# are passed along with the custon function and also return 1 (stored in the recorder)
def custom_fn(*args, **kwargs):#signature of any custom fn
print('custom called')
for k,v in kwargs.items():
print('\nkey argument:', k)
print('\nvalue argument:', v)
return 1
recorder = modelutils.Recorder(layer,
backward = False,
custom_fn = custom_fn,
print_value = 5)
data = torch.rand(64, 3, 224, 224)
output = model(data)
print(recorder.recording)#list of tensors of shape (192, 64, 5, 5) (weights) (192,) (biases)
recorder.close()#remove the recorder
# Record output to the layer during the forward pass and store it in folder
layer = list(model.features.named_children())[3][1]
recorder = modelutils.Recorder(
layer,
record_params = True,
backward = False,
save_to = '/Users/alexandrosgoulas/Data/work-stuff/python-code/projects/test_recorder'#create the folder before running this example!
)
for _ in range(5):#5 passes e.g. batches, thus 5 stored "recorded" tensors
data = torch.rand(64, 3, 224, 224)
output = model(data)
recorder.close()#remove the recorder |
#!/usr/bin/python
from ftplib import FTP
import os, time
import zipfile
from os.path import join, exists
import subprocess, hashlib
from datetime import datetime
from time import gmtime
class DataGetter:
def __init__(self):
self.updatedFiles = []
self.newFiles = []
self.errorFiles = []
self.ftpHost = "ftp.cpc.ncep.noaa.gov"
self.ftp = FTP(self.ftpHost)
self.ftp.login()
def processDataset(self, datasetName, localDir):
self.localDir = localDir
self.yearList = []
self.updatedFiles = []
self.newFiles = []
self.errorFiles = []
self.remoteDir = "/precip/CMORPH_V1.0/RAW/"
print "Processing dataset..." + datasetName
os.chdir(self.localDir + datasetName) # go to dataset dir
#print os.getcwd()
self.ftp.cwd(self.remoteDir + datasetName)
#print "ftp ", self.ftp.pwd()
self.ftp.retrlines("LIST", self.yearList.append)
for year in self.yearList:
print year[-4:]
mnList = self.doDirectory(year,True)
if mnList and year[-4:]=="2016":
#if mnList:
locmd5=self.read_localmd5()
baseDir = os.getcwd()
for f in mnList:
self.handleFile(baseDir, f, locmd5)
self.ftp.cwd("../") # get out of year dir
os.chdir("../") #get out of year dir
else:
fileList = []
fileList = self.ftp.retrlines("LIST", fileList.append)
baseDir = os.getcwd()
locmd5={}
for f in fileList:
self.handleFile(baseDir, f, locmd5)
os.chdir("../") # get out of year dir to skip or containing only files
#print os.getcwd()
self.ftp.cwd("../")
#print "ftp ", self.ftp.pwd()
os.chdir("../") # get out of dataset dir
#print os.getcwd()
self.ftp.cwd("../")
#print "ftp ", self.ftp.pwd()
print "======================================================="
print "Summary for " + datasetName
print "======================================================="
print "These files were updated: "
for f in self.updatedFiles:
print f
print "======================================================="
print "These are new files: "
for f in self.newFiles:
print f
print "======================================================="
print "These files and problems: "
for f in self.errorFiles:
print f
def doDirectory(self, dirLine, makedir):
if(dirLine[0] == 'd'):
dirName = dirLine[(dirLine.rindex(" ") + 1):]
if makedir:
if(not os.path.exists(dirName)):
os.mkdir(dirName)
#print os.path.exists(dirName), dirName
os.chdir(dirName) # go to "year" dir
#print os.getcwd()
self.ftp.cwd(dirName)
#print "ftp ", self.ftp.pwd()
lineList = []
self.ftp.retrlines("LIST", lineList.append)
return lineList
def handleFile(self, baseDir, fileLine, locmd5):
if fileLine[0]== '-' :
try:
line = fileLine[(fileLine.rindex(" ") + 1):]
filename = line.split(" ")[-1]
if filename[-4:]==".tar":
#self.fileList.append(filename)
self.doFile(baseDir, filename, locmd5)
except ValueError:
pass
def doFile(self, baseDir, filename, locmd5):
curDir = os.getcwd()
if(os.path.exists(filename)):
local_md5 = locmd5[filename]
if not self.check_md5sum(filename, local_md5):
print "file exists to update", filename
if(self.downloadFile(filename, True)):
self.updatedFiles.append(os.path.abspath(filename))
else:
if(self.downloadFile(filename, False)):
self.newFiles.append(os.path.abspath(filename))
def check_md5sum(self, filename, local_md5):
''' Execute md5sum on file on ftp and return True,if same as read from local file '''
m = hashlib.md5()
self.ftp.retrbinary('RETR %s' % filename, m.update)
ftp_md5 = m.hexdigest()
print local_md5, ftp_md5, filename
return local_md5 == ftp_md5
def read_localmd5(self):
'''Read local md5sum from file and load as dictionary'''
locmd5={}
fmd5=open('original_md5sum.txt','r')
for line in fmd5.readlines():
md5,name=line.split(" ")
name=name.replace("\n","")
locmd5[name]=md5
return locmd5
def downloadFile(self, filename, isUpdate):
newFile = None
#print filename
if(isUpdate):
#newFile = open(filename + ".1", "wb")
newFile = open(filename, "wb")
else:
newFile = open(filename, "wb")
try:
try:
print "Trying to download file... " + filename
self.ftp.retrbinary("RETR " + filename, newFile.write)
os.popen("chmod g+rxX " + filename).readline()
os.popen("chgrp ua8 " + filename).readline()
if(isUpdate):
lines = os.popen("mv " + filename + ".1 " + filename).readlines()
if(len(lines) != 0):
print lines
self.errorFiles.append(filename + " counld not move file")
return False
return True
except Exception, e:
self.errorFiles.append(filename + " could not be downloaded:")
print e
return False
finally:
newFile.close()
#os.popen("gunzip " + filename)
def close(self):
self.ftp.quit()
if __name__ == "__main__":
getter = DataGetter()
getter.processDataset("8km-30min", "/g/data1/ua8/CMORPH/CMORPH_V1.0/raw/")
getter.close()
|
# -*- coding:utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
r"""
==============
JSON formatter
==============
This formatter outputs the issues in JSON.
:Example:
.. code-block:: javascript
{
"errors": [],
"generated_at": "2015-12-16T22:27:34Z",
"metrics": {
"_totals": {
"CONFIDENCE.HIGH": 1,
"CONFIDENCE.LOW": 0,
"CONFIDENCE.MEDIUM": 0,
"CONFIDENCE.UNDEFINED": 0,
"SEVERITY.HIGH": 0,
"SEVERITY.LOW": 0,
"SEVERITY.MEDIUM": 1,
"SEVERITY.UNDEFINED": 0,
"loc": 5,
"nosec": 0
},
"examples/yaml_load.py": {
"CONFIDENCE.HIGH": 1,
"CONFIDENCE.LOW": 0,
"CONFIDENCE.MEDIUM": 0,
"CONFIDENCE.UNDEFINED": 0,
"SEVERITY.HIGH": 0,
"SEVERITY.LOW": 0,
"SEVERITY.MEDIUM": 1,
"SEVERITY.UNDEFINED": 0,
"loc": 5,
"nosec": 0
}
},
"results": [
{
"code": "4 ystr = yaml.dump({'a' : 1, 'b' : 2, 'c' : 3})\n5
y = yaml.load(ystr)\n6 yaml.dump(y)\n",
"filename": "examples/yaml_load.py",
"issue_confidence": "HIGH",
"issue_severity": "MEDIUM",
"issue_text": "Use of unsafe yaml load. Allows instantiation of
arbitrary objects. Consider yaml.safe_load().\n",
"line_number": 5,
"line_range": [
5
],
"more_info": "https://docs.openstack.org/bandit/latest/",
"test_name": "blacklist_calls",
"test_id": "B301"
}
]
}
.. versionadded:: 0.10.0
"""
# Necessary so we can import the standard library json module while continuing
# to name this file json.py. (Python 2 only)
from __future__ import absolute_import
import datetime
import json
import logging
import operator
import sys
from bandit.core import docs_utils
from bandit.core import test_properties
LOG = logging.getLogger(__name__)
@test_properties.accepts_baseline
def report(manager, fileobj, sev_level, conf_level, lines=-1):
'''''Prints issues in JSON format
:param manager: the bandit manager object
:param fileobj: The output file object, which may be sys.stdout
:param sev_level: Filtering severity level
:param conf_level: Filtering confidence level
:param lines: Number of lines to report, -1 for all
'''
machine_output = {'results': [], 'errors': []}
for (fname, reason) in manager.get_skipped():
machine_output['errors'].append({'filename': fname,
'reason': reason})
results = manager.get_issue_list(sev_level=sev_level,
conf_level=conf_level)
baseline = not isinstance(results, list)
if baseline:
collector = []
for r in results:
d = r.as_dict()
d['more_info'] = docs_utils.get_url(d['test_id'])
if len(results[r]) > 1:
d['candidates'] = [c.as_dict() for c in results[r]]
collector.append(d)
else:
collector = [r.as_dict() for r in results]
for elem in collector:
elem['more_info'] = docs_utils.get_url(elem['test_id'])
itemgetter = operator.itemgetter
if manager.agg_type == 'vuln':
machine_output['results'] = sorted(collector,
key=itemgetter('test_name'))
else:
machine_output['results'] = sorted(collector,
key=itemgetter('filename'))
machine_output['metrics'] = manager.metrics.data
# timezone agnostic format
TS_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
time_string = datetime.datetime.utcnow().strftime(TS_FORMAT)
machine_output['generated_at'] = time_string
result = json.dumps(machine_output, sort_keys=True,
indent=2, separators=(',', ': '))
with fileobj:
fileobj.write(result)
if fileobj.name != sys.stdout.name:
LOG.info("JSON output written to file: %s", fileobj.name)
|
class ModelInfo(object):
def is_primary(self):
pass
def has_owner(self):
pass
def is_secondary(self):
pass
def primary_model(self):
pass
def needs_view(self):
pass
def get_verbs(self):
pass
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#pylint: disable=I0011,W0231
"""Missile class"""
import item
class Missile(item.Item):
"""Missile"""
sprite = "|"
def __init__(self, x, y, player):
"""init bomb"""
item.Item.__init__(self, x, y)
self.player = player
def tick(self):
"""action in tick - move missile up"""
self.move_up()
def event_discard(self):
"""discard missile, decrease Player bomb counter"""
self.player.missile['current'] -= 1
def can_hit(self):
"""missile can hit something"""
return True
def is_hit(self, target):
"""missile cant be hit"""
return False
|
#
# CreateStatusGraph.py
# Reads a status file (pickle), calculates code
# coverage and graphs the bitmap in PNG format
#
import sys
import pickle
try:
import png
HAS_PYPNG = True
except:
print "[!] PyPNG library not found."
print "[*] Install via PIP: pip install pypng"
HAS_PYPNG = False
def populate_array(bitmap):
""" Array of RGB values for the PNG file """
width = 256
height = 256
p = []
for i in xrange(height):
row = []
for j in xrange(width):
idx = i * height + j
n = bitmap[idx]
if not n:
rgb = (0, 0, 0)
else:
rgb = get_rgb_from_value(n)
row.append(rgb)
p.append(row)
return p
def get_rgb_from_value(n):
""" Bins and some bit shifting """
if n < 2:
k = 0xFF0000
elif n < 4:
k = 0xFFFF00
elif n < 8:
k = 0x009900
elif n < 32:
k = 0x0080FF
elif n < 128:
k = 0x00FFFF
else:
k = 0xFFFFFF
R = (k & 0xFF0000) >> 16
G = (k & 0x00FF00) >> 8
B = (k & 0x0000FF)
return (R, G, B)
def get_coverage(bitmap):
""" Consider only not null values """
total = len(bitmap)
covered = total - bitmap.count(0)
coverage = ((covered + 0.0) / total) * 100
return coverage
def main():
if len(sys.argv) != 2:
print "python %s <filename>" % sys.argv[0]
sys.exit(1)
try:
filename = sys.argv[1]
with open(filename, 'rb') as f:
saved_state = pickle.load(f)
bitmap = saved_state['bitmap']
except:
print "[!] Could not load bitmap"
sys.exit(1)
# Get rough code coverage value
coverage = get_coverage(bitmap)
print "[*] Code coverage (basic block calls): %.2f" % coverage
if HAS_PYPNG:
# Create PNG image from bitmap values
p = populate_array(bitmap)
img = png.from_array(p, 'RGB')
img.save('status_graph.png')
print "[*] Created PNG file"
if __name__ == '__main__':
main()
|
s = input()
n = int(input())
for _ in range(n):
l, r = map(int, input().split())
lstr = s[:l-1]
mstr = s[l-1:r]
mstr = mstr[::-1]
rstr = s[r:]
s = lstr + mstr + rstr
print(s)
|
import optparse
import Utils
import gensim
import numpy
def doc2vec(m, document):
vectors = [m[token] for token in document if token in m]
if len(vectors) == 0:
return None
doc = numpy.sum(vectors, axis=0)
return doc
def main():
parser = optparse.OptionParser()
parser.add_option('-d', '--dataset', default='sample')
options, args = parser.parse_args()
documents = list(Utils.read_json('%s-tokenized.json' % options.dataset))
m = gensim.models.word2vec.Word2Vec.load('%s-word-vector-model' % options.dataset)
vectors = [doc2vec(m, document) for document in documents]
vectors = [v if v is not None else numpy.zeros_like(vectors[0]) for v in vectors]
vectors = numpy.array(vectors)
Utils.write_matrix('%s-vectors.mtx' % options.dataset, vectors)
main()
|
#! /usr/bin/env python
# Copyright 2017, RackN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pip install requests
import requests, argparse, json, urllib3, os
'''
Usage: https://github.com/digitalrebar/provision/tree/master/integration/ansible
example: ansible -i inventory.py all -a "uname -a"
'''
# Children Group Support
# 1. Create a "ansible-children" parameter
# 2. Add that parameter to the parent profile
# 3. Set the "ansible-children" parameter in the parent profile to the list of children's profiles
def main():
inventory = { "_meta": { "hostvars": {} } }
# change these values to match your DigitalRebar installation
addr = os.getenv('RS_ENDPOINT', "https://127.0.0.1:8092")
ups = os.getenv('RS_KEY', "rocketskates:r0cketsk8ts")
arr = ups.split(":")
user = arr[0]
password = arr[1]
# Argument parsing
parser = argparse.ArgumentParser(description="Ansible dynamic inventory via DigitalRebar")
parser.add_argument("--list", help="Ansible inventory of all of the deployments",
action="store_true", dest="list_inventory")
parser.add_argument("--host",
help="Ansible inventory of a particular host", action="store",
dest="ansible_host", type=str)
cli_args = parser.parse_args()
list_inventory = cli_args.list_inventory
ansible_host = cli_args.ansible_host
Headers = {'content-type': 'application/json'}
urllib3.disable_warnings()
inventory["_meta"]["rebar_url"] = addr
inventory["_meta"]["rebar_user"] = user
profiles = {}
profiles_vars = {}
profiles_raw = requests.get(addr + "/api/v3/profiles",headers=Headers,auth=(user,password),verify=False)
if profiles_raw.status_code == 200:
for profile in profiles_raw.json():
profiles[profile[u"Name"]] = []
profiles_vars[profile[u"Name"]] = profile[u"Params"]
else:
raise IOError(profiles_raw.text)
if list_inventory:
URL = addr + "/api/v3/machines"
elif ansible_host:
URL = addr + "/api/v3/machines?Name=" + ansible_host
else:
URL = addr + "/api/v3/machines"
raw = requests.get(URL,headers=Headers,auth=(user,password),verify=False)
if raw.status_code == 200:
for machine in raw.json():
name = machine[u'Name']
# TODO, should we only show machines that are in local bootenv? others could be transistioning
# if the machine has profiles, collect them
if machine[u"Profiles"]:
for profile in machine[u"Profiles"]:
profiles[profile].append(name)
inventory["_meta"]["hostvars"][name] = {"ansible_ssh_user": "root", "ansible_host": machine[u"Address"]}
else:
raise IOError(raw.text)
for profile in profiles:
section = {}
if len(profiles[profile]) > 0:
section["hosts"] = []
for machine in profiles[profile]:
section["hosts"].extend([machine])
if profiles_vars[profile] is None:
pass # so nothing
elif u'ansible-children' in profiles_vars[profile].keys():
section["children"] = []
for child in profiles_vars[profile][u'ansible-children']:
section["children"].extend([child])
elif len(profiles_vars[profile]) > 0:
section["vars"] = {}
for param in profiles_vars[profile]:
value = profiles_vars[profile][param]
section["vars"][param] = value
if len(section.keys()) > 0:
inventory[profile] = section
print json.dumps(inventory)
if __name__ == "__main__":
main()
|
from compas.geometry import Pointcloud
from compas.utilities import i_to_red, pairwise
from compas_plotters import Plotter
plotter = Plotter(figsize=(8, 5))
pointcloud = Pointcloud.from_bounds(8, 5, 0, 10)
for index, (a, b) in enumerate(pairwise(pointcloud)):
vector = b - a
vector.unitize()
plotter.add(vector, point=a, draw_point=True, color=i_to_red(max(index / 10, 0.1), normalize=True))
plotter.add(b, size=10, edgecolor=(1, 0, 0))
plotter.zoom_extents()
plotter.save('docs/_images/tutorial/plotters_vector-options.png', dpi=300)
|
"""Test the list command."""
# mypy: ignore-errors
# flake8: noqa
import argparse
from unittest.mock import patch
import pytest
from dfetch.commands.list import List
from tests.manifest_mock import mock_manifest
DEFAULT_ARGS = argparse.Namespace()
DEFAULT_ARGS.projects = []
@pytest.mark.parametrize(
"name, projects",
[
("empty", []),
("single_project", [{"name": "my_project"}]),
("two_projects", [{"name": "first"}, {"name": "second"}]),
],
)
def test_list(name, projects):
list = List()
with patch("dfetch.manifest.manifest.get_manifest") as mocked_get_manifest:
with patch("dfetch.log.DLogger.print_info_line") as mocked_print_info_line:
mocked_get_manifest.return_value = (mock_manifest(projects), "/")
list(DEFAULT_ARGS)
if projects:
for project in projects:
mocked_print_info_line.assert_any_call("project", project["name"])
else:
mocked_print_info_line.assert_not_called()
|
########################################
#
# Program Code for Fred Inmoov
# Of the Cyber_One YouTube Channel
#
# This is version 3 with MarySpech TTS,
# ProgramAB as the brain and Spinx
# Speech Recognition
#
########################################
# generate random integer values
from random import seed
from random import randint
# seed random number generator
seed(1)
# start the service
raspi = Runtime.createAndStart("raspi","RasPi")
# Head.setController("RasPi","1","0x40")
Head = Runtime.createAndStart("Head","Adafruit16CServoDriver")
Head.attach("raspi","1","0x40")
# Start the clock services
BlinkClock = Runtime.createAndStart("BlinkClock","Clock")
AwakeClock = Runtime.createAndStart("AwakeClock","Clock")
Awake = False
# Change the names of the servos and the pin numbers to your usage
RightEyeLR = Runtime.createAndStart("RightEyeLR", "Servo")
# attach it to the pwm board - pin 15
RightEyeLR.attach(Head,15)
# Next lets set the various limits and mappings.
RightEyeLR.setMinMax(0,180)
RightEyeLR.map(0,180,1,180)
RightEyeLR.setRest(90)
RightEyeLR.setInverted(False)
RightEyeLR.setVelocity(60)
RightEyeLR.setAutoDisable(True)
RightEyeLR.rest()
RightEyeUD = Runtime.createAndStart("RightEyeUD", "Servo")
# attach it to the pwm board - pin 14
RightEyeUD.attach(Head,14)
RightEyeUD.setMinMax(0,180)
RightEyeUD.map(0,180,1,180)
RightEyeUD.setRest(90)
RightEyeUD.setInverted(False)
RightEyeUD.setVelocity(120)
RightEyeUD.setAutoDisable(True)
RightEyeUD.rest()
LeftEyeLR = Runtime.createAndStart("LefttEyeLR", "Servo")
# attach it to the pwm board - pin 13
LeftEyeLR.attach(Head,13)
LeftEyeLR.setMinMax(0,180)
LeftEyeLR.map(0,180,1,180)
LeftEyeLR.setRest(90)
LeftEyeLR.setInverted(False)
LeftEyeLR.setVelocity(120)
LeftEyeLR.setAutoDisable(True)
LeftEyeLR.rest()
LeftEyeUD = Runtime.createAndStart("LeftEyeUD", "Servo")
# attach it to the pwm board - pin 12
LeftEyeUD.attach(Head,12)
LeftEyeUD.setMinMax(0,180)
LeftEyeUD.map(0,180,1,180)
LeftEyeUD.setRest(90)
LeftEyeUD.setInverted(False)
LeftEyeUD.setVelocity(60)
LeftEyeUD.setAutoDisable(True)
LeftEyeUD.rest()
UpperEyeLid = Runtime.createAndStart("UpperEyeLid", "Servo")
# attach it to the pwm board - pin 11
UpperEyeLid.attach(Head,11)
UpperEyeLid.setMinMax(60,180)
UpperEyeLid.map(0,180,60,180)
UpperEyeLid.setRest(45)
UpperEyeLid.setInverted(False)
UpperEyeLid.setVelocity(-1)
UpperEyeLid.setAutoDisable(False)
# UpperEyeLid.rest()
LowerEyeLid = Runtime.createAndStart("LowerEyeLid", "Servo")
# attach it to the pwm board - pin 10
LowerEyeLid.attach(Head,10)
LowerEyeLid.setMinMax(0,120)
LowerEyeLid.map(0,180,0,120)
LowerEyeLid.setRest(30)
LowerEyeLid.setInverted(False)
LowerEyeLid.setVelocity(-1)
LowerEyeLid.setAutoDisable(False)
# LowerEyeLid.rest()
Jaw = Runtime.createAndStart("Jaw", "Servo")
# attach it to the pwm board - pin 9
Jaw.attach(Head,9)
Jaw.setMinMax(0,180)
Jaw.map(0,180,1,180)
Jaw.setRest(90)
Jaw.setInverted(False)
Jaw.setVelocity(-1)
Jaw.setAutoDisable(True)
#Jaw.rest()
HeadYaw = Runtime.createAndStart("HeadYaw", "Servo")
# attach it to the pwm board - pin 8
HeadYaw.attach(Head,8)
HeadYaw.setMinMax(0,180)
HeadYaw.map(0,180,1,180)
HeadYaw.setRest(90)
HeadYaw.setInverted(False)
HeadYaw.setVelocity(120)
HeadYaw.setAutoDisable(True)
HeadYaw.rest()
HeadPitch = Runtime.createAndStart("HeadPitch", "Servo")
# attach it to the pwm board - pin 7
HeadPitch.attach(Head,7)
HeadPitch.setMinMax(0,180)
HeadPitch.map(0,180,1,180)
HeadPitch.setRest(90)
HeadPitch.setInverted(False)
HeadPitch.setVelocity(120)
HeadPitch.setAutoDisable(True)
HeadPitch.rest()
HeadRoll = Runtime.createAndStart("HeadRoll", "Servo")
# attach it to the pwm board - pin 6
HeadRoll.attach(Head,6)
HeadRoll.setMinMax(0,180)
HeadRoll.map(0,180,1,180)
HeadRoll.setRest(90)
HeadRoll.setInverted(False)
HeadRoll.setVelocity(120)
HeadRoll.setAutoDisable(True)
HeadRoll.rest()
# TTS speech
mouth = Runtime.createAndStart("mouth", "MarySpeech")
#mouth.setVoice("cmu-bdl-hsmm") # Mark
mouth.setVoice("cmu-rms-hsmm") # Henry
#mouth.setVoice("dfki-obadiah-hsmm") # Obadiah
#mouth.setVoice("dfki-spike-hsmm") # Spike
#mouth.installComponentsAcceptLicense("dfki-obadiah-hsmm") #Use this line to install more voice files
mouth.setVolume(100.0)
# create ear Speech Recognition Service
ear = Runtime.createAndStart("ear","Sphinx")
# prevent infinite loop - this will suppress the
# recognition when speaking - default behavior
# when attaching an ear to a mouth :)
ear.attach(mouth)
# Jaw control based on speech.
mouthcontrol = Runtime.create("mouthcontrol","MouthControl")
mouthcontrol.setJaw(Jaw)
mouthcontrol.setMouth(mouth)
mouthcontrol.setmouth(77, 120)
mouthcontrol.setdelays(60, 60, 70)
mouthcontrol.startService()
# create a ProgramAB service and start a session
Fred = Runtime.start("Fred", "ProgramAB")
Fred.startSession("Ray")
# create a route which sends published Responses to the
# mouth.speak(String) method
Fred.addTextListener(mouth)
# Next lets create a route that sends the speech our
# robot has heard to the ProgramAB
ear.addTextListener(Fred)
# Define Wakeup and sleep routines.
def WakeSleep(timedata):
if Awake == False:
BlinkClock.startClock()
Awake = True # need to add a wake up sequence here.
else:
BlinkClock.stopClock()
Awake = False # need to add a going to sleep sequence here.
# Routine to create the blinking motion
def blink(timedata):
UpperEyeLid.moveTo(150) # close the upper eye lid
LowerEyeLid.moveTo(150) # close the lower eye lid
sleep(0.5)
UpperEyeLid.moveTo(45) # Open the upper eye lid
LowerEyeLid.moveTo(45) # Open the lower eye lid
BlinkClock.setInterval(randint(5000, 10000)) # Set a new random time for the next blink
sleep(10.0)
BlinkClock.addListener("pulse", python.name, "blink")
BlinkClock.setInterval(10000)
BlinkClock.startClock()
def eyesLR(eyesLRpos):
RightEyeLR.moveTo(eyesLRpos)
LeftEyeLR.moveTo(eyesLRpos)
def eyesUD(eyesUDpos):
RightEyeUD.moveTo(eyesUDpos)
LeftEyeUD.moveTo(eyesUDpos)
UpperEyeLid.moveTo(45)
LowerEyeLid.moveTo(45)
Jaw.moveTo(77)
eyesLR(0)
sleep(2.0)
eyesLR(180)
sleep(2.0)
eyesLR(90)
UpperEyeLid.moveTo(45)
LowerEyeLid.moveTo(45)
mouth.speakBlocking(u"Hello world, I am awake.")
# begin listening
ear.startListening()
sleep(10)
Fred.getResponse(u"What time is it?")
#mouth.speak(u"I wonder if non blocking is a good idea?")
|
from country_builder import CountryBuilder
from game_builder import GameBuilder
from event_builder import EventBuilder
from medal_builder import MedalBuilder
from team_builder import TeamBuilder
from coach_builder import CoachBuilder
from athletes_builder import AthletesBuilder
import os.path
my_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(my_path, "../olympics/")
# todo improve the load of files
input_file_path = path
output_file_path = path+'/output/'
# Note! The order of execution matters!
countryBuilder = CountryBuilder()
countryBuilder.build(input_file_path, output_file_path)
gameBuilder = GameBuilder()
gameBuilder.build(input_file_path, output_file_path)
eventBuilder = EventBuilder()
eventBuilder.build(input_file_path, output_file_path)
medalBuilder = MedalBuilder()
medalBuilder.build(input_file_path, output_file_path)
teamBuilder = TeamBuilder()
teamBuilder.build(input_file_path, output_file_path)
coachBuilder = CoachBuilder()
coachBuilder.build(input_file_path, output_file_path)
athletesBuilder = AthletesBuilder()
athletesBuilder.build(input_file_path, output_file_path)
print("Process done!") |
#!/usr/bin/env python
# coding: utf-8
# Binary Search and Linear Search on a large number along with Time
import time
# Binary Search
def binary_search(given_list,item):
start = time.time()
low = 0
high = len(given_list)-1
mid = (low + high) // 2
while given_list[mid] != item:
if given_list[mid]<item:
low=mid+1
mid=(low+high) // 2
elif given_list[mid] == item:
return "{} matched at {}".format(item,mid), time.time() - start
else:
high=mid-1
mid=(low+high) // 2
return "{} matched at {}".format(item,mid), time.time() - start
list_n = [x for x in range(10000000)]
Output, Time = binary_search((list_n),622253)
print(Output)
# 622253 matched at 622253
print()
print("It took {:.8f} milliseconds to search".format(Time))
# It took 0.00000000 milliseconds to search
# Linear Search
def linear_search(given_list,item):
start = time.time()
for num in given_list:
if num == item:
return "{} matched at {}".format(item,given_list.index(num)), time.time() - start
list_n = [x for x in range(10000000)]
Output, Time = linear_search(list_n,622253)
print(Output)
# 622253 matched at 622253
print()
print("It took {:.8f} milliseconds to search".format(Time))
# It took 0.04587674 milliseconds to search
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import mock
import pytest
from data_pipeline.tools.introspector.info.namespace import NamespaceInfoCommand
from data_pipeline.tools.introspector.info.source import SourceInfoCommand
from data_pipeline.tools.introspector.info.topic import TopicInfoCommand
from tests.tools.introspector.base_test import TestIntrospectorBase
class TestTopicInfoCommand(TestIntrospectorBase):
@pytest.fixture
def command(self, containers):
command = TopicInfoCommand("data_pipeline_introspector_info_topic")
command.log.debug = mock.Mock()
command.log.info = mock.Mock()
command.log.warning = mock.Mock()
return command
def test_list_schemas(
self,
command,
schematizer,
topic_one_active,
schema_one_active
):
actual_schemas = command.list_schemas(topic_one_active.name)
topic_schema = schematizer.get_latest_schema_by_topic_name(
topic_one_active.name
)
assert len(actual_schemas) == 1
self._assert_schema_equals_schema_dict(
topic_schema=topic_schema,
schema_obj=schema_one_active,
schema_dict=actual_schemas[0]
)
def test_info_topic(
self,
command,
schematizer,
schema_one_active,
namespace_one,
source_one_active,
topic_one_active
):
topic_schema = schematizer.get_latest_schema_by_topic_name(
topic_one_active.name
)
topic_dict = command.info_topic(topic_one_active.name)
self._assert_topic_equals_topic_dict(
topic=topic_one_active,
topic_dict=topic_dict,
namespace_name=namespace_one,
source_name=source_one_active,
is_active=True
)
actual_schemas = topic_dict['schemas']
assert len(actual_schemas) == 1
self._assert_schema_equals_schema_dict(
topic_schema=topic_schema,
schema_obj=schema_one_active,
schema_dict=actual_schemas[0]
)
class TestSourceInfoCommand(TestIntrospectorBase):
@pytest.fixture
def command(self, containers):
command = SourceInfoCommand("data_pipeline_introspector_info_source")
command.log.debug = mock.Mock()
command.log.info = mock.Mock()
command.log.warning = mock.Mock()
return command
def test_info_source_id(
self,
command,
topic_one_inactive,
topic_one_inactive_b,
source_one_inactive,
namespace_one
):
source_obj = topic_one_inactive.source
source_dict = command.info_source(
source_id=source_obj.source_id,
source_name=None,
namespace_name=None,
active_sources=False
)
self._assert_source_equals_source_dict(
source=source_obj,
source_dict=source_dict,
namespace_name=namespace_one,
source_name=source_one_inactive
)
source_topics = source_dict['topics']
assert len(source_topics) == 2
sorted_expected_topics = sorted(
[topic_one_inactive, topic_one_inactive_b],
key=lambda topic: topic.topic_id
)
sorted_actual_topics = sorted(
source_topics,
key=lambda topic_dict: topic_dict['topic_id']
)
self._assert_topic_equals_topic_dict(
topic=sorted_expected_topics[0],
topic_dict=sorted_actual_topics[0],
namespace_name=namespace_one,
source_name=source_one_inactive,
is_active=False
)
self._assert_topic_equals_topic_dict(
topic=sorted_expected_topics[1],
topic_dict=sorted_actual_topics[1],
namespace_name=namespace_one,
source_name=source_one_inactive,
is_active=False
)
def test_info_source_missing_source_name(
self,
command,
namespace_one
):
with pytest.raises(ValueError) as e:
command.info_source(
source_id=None,
source_name="this_source_will_not_exist",
namespace_name=namespace_one
)
assert e.value.args
assert "Given SOURCE_NAME|NAMESPACE_NAME doesn't exist" in e.value.args[0]
def test_info_source_name_pair(
self,
command,
topic_one_active,
source_one_active,
namespace_one
):
source_dict = command.info_source(
source_id=None,
source_name=source_one_active,
namespace_name=namespace_one,
active_sources=True
)
source_obj = topic_one_active.source
self._assert_source_equals_source_dict(
source=source_obj,
source_dict=source_dict,
namespace_name=namespace_one,
source_name=source_one_active,
active_topic_count=1
)
source_topics = source_dict['topics']
assert len(source_topics) == 1
self._assert_topic_equals_topic_dict(
topic=topic_one_active,
topic_dict=source_topics[0],
namespace_name=namespace_one,
source_name=source_one_active,
is_active=True
)
class TestNamespaceInfoCommand(TestIntrospectorBase):
@pytest.fixture
def command(self, containers):
command = NamespaceInfoCommand("data_pipeline_introspector_info_namespace")
command.log.debug = mock.Mock()
command.log.info = mock.Mock()
command.log.warning = mock.Mock()
return command
def test_info_namespace_missing(
self,
command
):
with pytest.raises(ValueError) as e:
command.info_namespace("not_a_namespace")
assert e.value.args
assert "Given namespace doesn't exist" in e.value.args[0]
@pytest.mark.parametrize(
"assert_active_counts",
[True, False],
ids=['with_active_namespaces', 'without_active_namespaces']
)
def test_info_namespace(
self,
command,
namespace_two,
source_two_inactive,
source_two_active,
topic_two_active,
assert_active_counts
):
namespace_obj = topic_two_active.source.namespace
namespace_dict = command.info_namespace(
namespace_two,
active_namespaces=assert_active_counts
)
self._assert_namespace_equals_namespace_dict(
namespace=namespace_obj,
namespace_dict=namespace_dict,
namespace_name=namespace_two,
assert_active_counts=assert_active_counts
)
namespace_sources = namespace_dict['sources']
assert len(namespace_sources) == 2
if namespace_sources[0]['name'] == source_two_active:
assert namespace_sources[1]['name'] == source_two_inactive
else:
assert namespace_sources[0]['name'] == source_two_inactive
assert namespace_sources[1]['name'] == source_two_active
|
class Solution:
def search(self, nums: List[int], target: int) -> int:
low, high = 0, len(nums) - 1
while (low <= high):
pivot = (high + low) // 2
# check lucky case
if nums[pivot] == target:
return pivot
# check if it exisits in the LHS
elif nums[pivot] >= nums[low]:
if target >= nums[low] and target < nums[pivot]:
high = pivot - 1
else:
low = pivot + 1
else:# check if it exisits in the RHS
if target >= nums[pivot] and target <= nums[high]:
low = pivot + 1
else:
high = pivot - 1
return -1 |
"""Provides helper functions for dates."""
import datetime as dt
from typing import List, Optional
import pytz
BERLIN = pytz.timezone("Europe/Berlin")
UTC = pytz.UTC
def local_now() -> dt.datetime:
"""Generate current datetime (Berlin time zone).
Returns:
dt.datetime: Current datetime.
"""
return dt.datetime.now(tz=BERLIN)
def local_today() -> dt.date:
"""Generate current date (Berlin time zone).
Returns:
dt.date: Today's date.
"""
return dt.datetime.now(BERLIN).date()
def local_yesterday() -> dt.date:
"""Generate yesterday's date (Berlin time zone).
Returns:
dt.date: Yesterday's date.
"""
return local_today() - dt.timedelta(days=1)
def date_range(start: dt.date, end: dt.date) -> List[dt.date]:
"""Generate a list of dates within a range. Start and end are both
inclusive.
Args:
start (dt.date): Start date for range.
end (dt.date): End date for range.
Returns:
List[dt.date]: List of dates between start and end.
"""
delta = (end - start).days
return [start + dt.timedelta(days=delta_days) for delta_days in range(delta + 1)]
def date_param(
date: Optional[dt.date],
*,
default: Optional[dt.date] = None,
earliest: Optional[dt.date] = None,
latest: Optional[dt.date] = None,
) -> Optional[dt.date]:
"""For when you have an optional date parameter in your function but you want to limit the
range of dates allowed. Also allows you to set a default.
Args:
date (Optional[dt.date]): The date you want to filter.
default (Optional[dt.date]): Provide a default in case the date is None.
earliest (Optional[dt.date]): The earliest date you want to allow.
latest (Optional[dt.date]): The latest date you want to allow.
Returns:
Optional[dt.date]: The resulting date, or None if both ``date`` and ``default`` are ``None``.
"""
if date is None:
return default
if earliest:
date = max(earliest, date)
if latest:
date = min(latest, date)
return date
def to_timedelta(seconds: Optional[int]) -> Optional[dt.timedelta]:
"""Generate a timedelta from an int containing a number of seconds.
Args:
seconds (Optional[int]): Amount of seconds to convert to timedelta. Also
accepts None as input.
Returns:
Optional[dt.timedelta]: timedelta - returns None if seconds are None.
"""
if seconds is not None:
return dt.timedelta(seconds=seconds)
else:
return None
def as_local_tz(
date_time: Optional[str], tz: pytz.timezone = BERLIN
) -> Optional[dt.datetime]:
"""Add timezone info to timezone naive isoformat date/time string.
Args:
date_time (Optional[str]): String containing timezone naive isoformat date/time
string.
tz (pytz.timezone, optional): Timezone to add to naive string. Defaults to
BERLIN.
Returns:
Optional[dt.datetime]: dt.datetime object with tz timezone. Returns None if
input date_time is None.
"""
if date_time is not None:
return tz.localize(dt.datetime.fromisoformat(date_time))
else:
return None
|
from matplotlib import pyplot as plt
import numpy as np
from tensorflow.keras.preprocessing import image
from PIL import Image, ImageOps
# Models to play around with:
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input, decode_predictions
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.vgg16 import VGG16
def image_to_array(image_path='/images/aquarium.jpg',
image_width=996,
add_border=True):
"""
Given an image_path resizes the image to image_width,
adds a border to the image if add_border=True,
and processes the image to a Numpy array
"""
basewidth = image_width
img = Image.open(image_path)
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
if add_border == True:
img = ImageOps.expand(img, border=50, fill='grey')
img_array = image.img_to_array(img)
return img_array
def image_tile_slicer(img_array=image_to_array(), steps_for_x_frames=14, steps_for_y_frames=14):
"""
Given an img_array slices the img_array in 224 by 224 tiles,
with the given steps_for_x_frames and the given steps_for_y_frames
"""
tiles = []
for x in range(0, 1920, steps_for_x_frames):
for y in range(0, 1080, steps_for_y_frames):
tile = img_array[x:x+224, y:y+224, :]
if tile.shape == (224, 224, 3):
tiles.append(tile)
return tiles
def make_predictions(chosen_model=MobileNetV2,
tiles=image_tile_slicer()):
"""
Spots an Anemoe fish on the picture,
given a pretrained chosen_model (e.g. MobileNetV2, ResNet50 or VGG16),
Numpy arrays from the 224 by 224 tiles created from the original picture,
and an accuracy_threshold (default is 90%) for the prediction.
"""
model = chosen_model(weights='imagenet', include_top=True)
model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])
tiles = np.array(tiles)
predictions = decode_predictions(model.predict(tiles))
return predictions
def nemo_classifier(predictions=make_predictions(),
accuracy_threshold=0.9):
"""
Spots an Anemoe fish on the Numpy arrays
from the 224 by 224 tiles created from the original picture,
with the selected accuracy_threshold (default is 90%) for the prediction.
"""
prediction_nr = 0
nemo_count = 0
confidence_count = 0
confident_nemo_count = 0
for i in predictions:
if predictions[prediction_nr][0][1] == 'anemone_fish':
nemo_count +=1
if predictions[prediction_nr][0][2] > accuracy_threshold:
confident_nemo_count +=1
print('Anemoe fish found in the frame nr.: '+str(prediction_nr))
print()
if predictions[prediction_nr][0][2] > accuracy_threshold:
confidence_count +=1
prediction_nr += 1
if confident_nemo_count == 0:
print("No Anemoe fish was found with the given accuracy threshold of "+str(accuracy_threshold)+'\n')
print('=========================================================\n')
print('Total number of 224x224 frames: '+str(len(predictions)))
print('Frames with Nemos: '+str(nemo_count))
print('Frames with accuracy threshold over '+str(accuracy_threshold)+': '+str(confidence_count))
print('Frames with accuracy of Nemo over '+str(accuracy_threshold)+': '+str(confident_nemo_count)+'\n')
print('=========================================================\n')
def show_top_predictions(predictions=make_predictions(),
accuracy_threshold=0.9):
"""
Shows top two predictions for every 224 by 224 tile
created from the original picture,
with the selected accuracy_threshold (default is 90%) for the prediction.
"""
prediction_nr = 0
for i in predictions:
if predictions[prediction_nr][0][2] > accuracy_threshold:
print('Prediction for frame nr.: ' + str(prediction_nr))
print('Best guess is: '+str(predictions[prediction_nr][0][2])+str(' ')+str(predictions[prediction_nr][0][1]))
print('Second best guess is: '+str(predictions[prediction_nr][1][2])+str(' ')+str(predictions[prediction_nr][1][1]))
print('---\n')
prediction_nr += 1 |
# -*- coding: utf-8 -*-
"""
ui/gridbase.py
Last updated: 2021-10-10
Widget with tiles on grid layout (QGraphicsScene/QGraphicsView).
=+LICENCE=============================
Copyright 2021 Michael Towers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=-LICENCE========================================
"""
##### Configuration #####################
FONT_DEFAULT = 'Droid Sans'
FONT_SIZE_DEFAULT = 11
FONT_COLOUR = '442222' # rrggbb
BORDER_COLOUR = '000088' # rrggbb
MARK_COLOUR = 'E00000' # rrggbb
# Line width for borders
UNDERLINE_WIDTH = 3.0
BORDER_WIDTH = 1.0
SCENE_MARGIN = 10.0 # Margin around content in GraphicsView widgets
#####################
### Messages
_TILE_OUT_OF_BOUNDS = ("Kachel außerhalb Tabellenbereich:\n"
" Zeile {row}, Höhe {rspan}, Spalte {col}, Breite {cspan}")
_NOTSTRING = "In <grid::Tile>: Zeichenkette erwartet: {val}"
#####################################################
import sys, os, copy
from PySide6.QtWidgets import QGraphicsView, QGraphicsScene, \
QGraphicsRectItem, QGraphicsSimpleTextItem, QGraphicsLineItem
from PySide6.QtGui import (QFont, QPen, QColor, QBrush, QTransform,
QPainter, QPdfWriter, QPageLayout)
from PySide6.QtCore import Qt, QMarginsF, QRectF, QBuffer, QByteArray
class GridError(Exception):
pass
### ---
class GridView(QGraphicsView):
"""This is the "view" widget for the grid.
The actual grid is implemented as a "scene".
"""
def __init__(self):
self._scale = 1.0
super ().__init__()
# Change update mode: The default, MinimalViewportUpdate, seems
# to cause artefacts to be left, i.e. it updates too little.
# Also BoundingRectViewportUpdate seems not to be 100% effective.
#self.setViewportUpdateMode(self.BoundingRectViewportUpdate)
self.setViewportUpdateMode(self.FullViewportUpdate)
self.ldpi = self.logicalDpiX()
if self.logicalDpiY() != self.ldpi:
REPORT('WARNING', "LOGICAL DPI different for x and y")
self.MM2PT = self.ldpi / 25.4
#
def set_scene(self, scene):
"""Set the QGraphicsScene for this view. The size will be fixed
to that of the initial <sceneRect> (to prevent it from being
altered by pop-ups).
<scene> may be <None>, to remove the current scene.
"""
self.setScene(scene)
if scene:
self.setSceneRect(scene._sceneRect)
#
def mousePressEvent(self, event):
point = event.pos()
# print("POS:", point, self.mapToGlobal(point), self.itemAt(point))
# The Tile may not be the top item.
items = self.items(point)
button = event.button()
if items:
for item in items:
# Give all items at this point a chance to react, starting
# with the topmost. An item can break the chain by
# returning a false value.
try:
if button == Qt.LeftButton:
if not item.leftclick():
return
elif button == Qt.RightButton:
if not item.rightclick():
return
except AttributeError:
pass
#
### View scaling
def scaleUp (self):
self.scale(1)
#
def scaleDn (self):
self.scale(-1)
#
def scale(self, delta):
t = QTransform()
self._scale += self._scale * delta / 10
t.scale(self._scale, self._scale)
self.setTransform(t)
### ---------------
###
class GridViewRescaling(GridView):
"""An QGraphicsView that automatically adjusts the scaling of its
scene to fill the viewing window.
"""
def __init__(self):
super().__init__()
# Apparently it is a good idea to disable scrollbars when using
# this resizing scheme. With this resizing scheme they would not
# appear anyway, so this doesn't lose any features!
self.setHorizontalScrollBarPolicy (Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy (Qt.ScrollBarAlwaysOff)
def resizeEvent(self, event):
self.resize()
return super().resizeEvent(event)
def resize(self, qrect=None):
if qrect == None:
qrect = self.sceneRect()
self.fitInView(qrect, Qt.KeepAspectRatio)
###
class GridBase(QGraphicsScene):
def __init__(self, gview, rowheights, columnwidths):
"""Set the grid size.
<columnwidths>: a list of column widths (mm)
<rowheights>: a list of row heights (mm)
Rows and columns are 0-indexed.
"""
super().__init__()
self._gview = gview
self._styles = {'*': CellStyle(FONT_DEFAULT, FONT_SIZE_DEFAULT,
align = 'c', border = 1, mark = MARK_COLOUR)
}
self.xmarks = [0.0]
x = 0.0
for c in columnwidths:
x += c * self._gview.MM2PT
self.xmarks.append(x)
self.ymarks = [0.0]
y = 0.0
for r in rowheights:
y += r * self._gview.MM2PT
self.ymarks.append(y)
# Allow a little margin
self._sceneRect = QRectF(-SCENE_MARGIN, -SCENE_MARGIN,
x + 2 * SCENE_MARGIN, y + 2 * SCENE_MARGIN)
#
def style(self, name):
return self._styles[name]
#
def new_style(self, name, base = None, **params):
if base:
style0 = self._styles[base]
self._styles[name] = style0.copy(**params)
else:
self._styles[name] = CellStyle(params.pop('font', None),
params.pop('size', None), **params)
#
def ncols(self):
return len(self.xmarks) - 1
#
def nrows(self):
return len(self.ymarks) - 1
#
def screen_coordinates(self, x, y):
"""Return the screen coordinates of the given scene point.
"""
viewp = self._gview.mapFromScene(x, y)
return self._gview.mapToGlobal(viewp)
#
def basic_tile(self, row, col, tag, text, style, cspan = 1, rspan = 1):
"""Add a basic tile to the grid, checking coordinates and
converting row + col to x + y point-coordinates for the
<Tile> class.
"""
# Check bounds
if (row < 0 or col < 0
or (row + rspan) >= len(self.ymarks)
or (col + cspan) >= len(self.xmarks)):
raise GridError(_TILE_OUT_OF_BOUNDS.format(
row = row, col = col, cspan = cspan, rspan = rspan))
x = self.xmarks[col]
y = self.ymarks[row]
w = self.xmarks[col + cspan] - x
h = self.ymarks[row + rspan] - y
t = Tile(self, tag, x, y, w, h, text, self._styles[style])
self.addItem(t)
return t
#
### pdf output
def setPdfMargins(self, left = 15, top = 15, right = 15, bottom = 15):
self._pdfmargins = (left, top, right, bottom)
return self._pdfmargins
#
def pdfMargins(self):
try:
return self._pdfmargins
except AttributeError:
return self.setPdfMargins()
#
def to_pdf(self, filepath):
"""Produce and save a pdf of the table.
The output orientation is selected according to the aspect ratio
of the table. If the table is too big for the page area, it will
be shrunk to fit.
"""
if not filepath.endswith('.pdf'):
filepath += '.pdf'
printer = QPdfWriter(filepath)
printer.setPageSize(printer.A4)
printer.setPageMargins(QMarginsF(*self.pdfMargins()),
QPageLayout.Millimeter)
sceneRect = self._sceneRect
sw = sceneRect.width()
sh = sceneRect.height()
if sw > sh:
printer.setPageOrientation(QPageLayout.Orientation.Landscape)
painter = QPainter()
painter.begin(printer)
scaling = printer.logicalDpiX() / self._gview.ldpi
# Do drawing with painter
page_layout = printer.pageLayout()
pdf_rect = page_layout.paintRect(QPageLayout.Point)
pdf_w = pdf_rect.width()
pdf_h = pdf_rect.height()
if sw > pdf_w or sh > pdf_h:
# Shrink to fit page
self.render(painter)
else:
# Scale resolution to keep size
pdf_rect.setWidth(sw * scaling)
pdf_rect.setHeight(sh * scaling)
self.render(painter, pdf_rect)
painter.end()
return filepath
#
# An earlier, alternative implementation of the pdf writer:
def to_pdf0(self, filepath):
"""Produce and save a pdf of the table.
The output orientation is selected according to the aspect ratio
of the table. If the table is too big for the page area, it will
be shrunk to fit.
"""
qbytes = QByteArray()
qbuf = QBuffer(qbytes)
qbuf.open(qbuf.WriteOnly)
printer = QPdfWriter(qbuf)
printer.setPageSize(printer.A4)
printer.setPageMargins(QMarginsF(*self.pdfMargins()),
QPageLayout.Millimeter)
sceneRect = self._sceneRect
sw = sceneRect.width()
sh = sceneRect.height()
if sw > sh:
printer.setPageOrientation(QPageLayout.Orientation.Landscape)
pdf_dpmm = printer.resolution() / 25.4 # pdf resolution, dots per mm
scene_dpmm = self._gview.MM2PT # scene resolution, dots per mm
natural_scale = pdf_dpmm / scene_dpmm
page_layout = printer.pageLayout()
pdf_rect = page_layout.paintRect(QPageLayout.Millimeter)
swmm = sw / self._gview.MM2PT
shmm = sh / self._gview.MM2PT
painter = QPainter(printer)
pdf_wmm = pdf_rect.width()
pdf_hmm = pdf_rect.height()
if swmm > pdf_wmm or shmm > pdf_hmm:
# Shrink to fit page
self.render(painter)
else:
# Scale resolution to keep size
pdf_rect.setWidth(sw * natural_scale)
pdf_rect.setHeight(sh * natural_scale)
self.render(painter, pdf_rect)
painter.end()
qbuf.close()
# Write resulting file
if not filepath.endswith('.pdf'):
filepath += '.pdf'
with open(filepath, 'wb') as fh:
fh.write(bytes(qbytes))
return filepath
###
class CellStyle:
"""Handle various aspects of cell styling.
Also manage caches for fonts, pens and brushes.
"""
_fonts = {}
_brushes = {}
_pens = {}
#
@classmethod
def getFont(cls, fontFamily, fontSize, fontBold, fontItalic):
ftag = (fontFamily, fontSize, fontBold, fontItalic)
try:
return cls._fonts[ftag]
except:
pass
font = QFont()
if fontFamily:
font.setFamily(fontFamily)
if fontSize:
font.setPointSizeF(fontSize)
if fontBold:
font.setBold(True)
if fontItalic:
font.setItalic(True)
cls._fonts[ftag] = font
return font
#
@classmethod
def getPen(cls, width, colour = None):
"""Manage a cache for pens of different width and colour.
"""
if width:
wc = (width, colour or BORDER_COLOUR)
try:
return cls._pens[wc]
except AttributeError:
cls._pens = {}
except KeyError:
pass
pen = QPen('#FF' + wc[1])
pen.setWidthF(wc[0])
cls._pens[wc] = pen
return pen
else:
try:
return cls._noPen
except AttributeError:
cls._noPen = QPen()
cls._noPen.setStyle(Qt.NoPen)
return cls._noPen
#
@classmethod
def getBrush(cls, colour):
"""Manage a cache for brushes of different colour.
<colour> is a colour in the form 'RRGGBB'.
"""
try:
return cls._brushes[colour or FONT_COLOUR]
except:
pass
brush = QBrush(QColor('#FF' + (colour or FONT_COLOUR)))
cls._brushes[colour] = brush
return brush
#
def __init__(self, font, size, align = 'c', highlight = None,
bg = None, border = 1, border_colour = None, mark = None):
"""
<font> is the name of the font (<None> => default, not recommended,
unless the cell is to contain no text).
<size> is the size of the font (<None> => default, not recommended,
unless the cell is to contain no text).
<align> is the horizontal (l, c or r) OR vertical (b, m, t) alignment.
Vertical alignment is for rotated text (-90° only).
<highlight> can set bold, italic and font colour: 'bi:RRGGBB'. All bits
are optional, but the colon must be present if a colour is given.
<bg> can set the background colour ('RRGGBB').
<border>: Only three border types are supported here:
0: none
1: all sides
2: (thicker) underline
<border_colour>: 'RRGGBB', default is <BORDER_COLOUR>.
<mark> is a colour ('RRGGBB') which can be selected as an
"alternative" font colour.
"""
# Font
self.setFont(font, size, highlight)
self.colour_marked = mark
# Alignment
self.setAlign(align)
# Background colour
self.bgColour = self.getBrush(bg) if bg else None
# Border
self.border = border
self.border_colour = border_colour
#
def setFont(self, font, size, highlight):
self._font, self._size, self._highlight = font, size, highlight
try:
emph, clr = highlight.split(':')
except:
emph, clr = highlight or '', None
self.fontColour = self.getBrush(clr)
self.font = self.getFont(font, size, 'b' in emph, 'i' in emph)
#
def setAlign(self, align):
if align in 'bmt':
# Vertical
self.alignment = ('c', align, True)
else:
self.alignment = (align, 'm', False)
#
def copy(self, font = None, size = None, align = None,
highlight = None, mark = None, bg = None, border = None):
"""Make a copy of this style, but with changes specified by the
parameters.
Note that a change to a 'None' parameter value is not possible.
"""
newstyle = copy.copy(self)
if font or size or highlight:
newstyle.setFont(font or self._font,
size or self._size, highlight or self._highlight)
if mark:
newstyle.colour_marked = mark
if align:
newstyle.setAlign(align)
if bg:
newstyle.bgColour = self.getBrush(bg)
if border != None:
newstyle.border = border
return newstyle
###
class Tile(QGraphicsRectItem):
"""The graphical representation of a table cell.
This cell can span rows and columns.
It contains a simple text element.
Both cell and text can be styled to a limited extent (see <CellStyle>).
"""
def __init__(self, grid, tag, x, y, w, h, text, style):
self._style = style
self._grid = grid
self.tag = tag
self.height0 = h
self.width0 = w
super().__init__(0, 0, w, h)
self.setFlag(self.ItemClipsChildrenToShape, True)
self.setPos(x, y)
# Background colour
if style.bgColour != None:
self.setBrush(style.bgColour)
# Border
if style.border == 1:
# Set the pen for the rectangle boundary
pen0 = CellStyle.getPen(BORDER_WIDTH, style.border_colour)
else:
# No border for the rectangle
pen0 = CellStyle.getPen(None)
if style.border != 0:
# Thick underline
line = QGraphicsLineItem(self)
line.setPen(CellStyle.getPen(UNDERLINE_WIDTH,
style.border_colour))
line.setLine(0, h, w, h)
self.setPen(pen0)
# Alignment and rotation
self.halign, self.valign, self.rotation = style.alignment
# Text
self.textItem = QGraphicsSimpleTextItem(self)
self.textItem.setFont(style.font)
self.textItem.setBrush(style.fontColour)
self.setText(text or '')
#
def mark(self):
if self._style.colour_marked:
self.textItem.setBrush(self._style.getBrush(self._style.colour_marked))
#
def unmark(self):
self.textItem.setBrush(self._style.fontColour)
#
def margin(self):
return 0.4 * self._grid._gview.MM2PT
#
def value(self):
return self._text
#
def setText(self, text):
if type(text) != str:
raise GridError(_NOTSTRING.format(val = repr(text)))
self._text = text
self.textItem.setText(text)
self.textItem.setScale(1)
w = self.textItem.boundingRect().width()
h = self.textItem.boundingRect().height()
if text:
scale = 1
maxw = self.width0 - self.margin() * 2
maxh = self.height0 - self.margin() * 2
if self.rotation:
maxh -= self.margin() * 4
if w > maxh:
scale = maxh / w
if h > maxw:
_scale = maxw / h
if _scale < scale:
scale = _scale
if scale < 0.6:
self.textItem.setText('###')
scale = (maxh /
self.textItem.boundingRect().width())
if scale < 1:
self.textItem.setScale(scale)
trf = QTransform().rotate(-90)
self.textItem.setTransform(trf)
else:
maxw -= self.margin() * 4
if w > maxw:
scale = maxw / w
if h > maxh:
_scale = maxh / h
if _scale < scale:
scale = _scale
if scale < 0.6:
self.textItem.setText('###')
scale = (maxw /
self.textItem.boundingRect().width())
if scale < 1:
self.textItem.setScale(scale)
# This print line can help find box size problems:
# print("BOX-SCALE: %5.3f (%s) *** w: %6.2f / %6.2f *** h: %6.2f / %6.2f"
# % (scale, text, w, maxw, h, maxh))
bdrect = self.textItem.mapRectToParent(
self.textItem.boundingRect())
yshift = - bdrect.top() if self.rotation else 0.0
w = bdrect.width()
h = bdrect.height()
xshift = 0.0
if self.halign == 'l':
xshift += self.margin()
elif self.halign == 'r':
xshift += self.width0 - self.margin() - w
else:
xshift += (self.width0 - w) / 2
if self.valign == 't':
yshift += self.margin()
elif self.valign == 'b':
yshift += self.height0 - self.margin() - h
else:
yshift += (self.height0 - h) / 2
self.textItem.setPos(xshift, yshift)
#
def leftclick(self):
return self._grid.tile_left_clicked(self)
#
def rightclick(self):
return self._grid.tile_right_clicked(self)
#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#
#TODO ...
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ade:
# Asynchronous Differential Evolution.
#
# Copyright (C) 2018-20 by Edwin A. Suominen,
# http://edsuom.com/ade
#
# See edsuom.com for API documentation as well as information about
# Ed's background and other projects, software and otherwise.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
The L{Constraints} base class makes it easier for you to enforce
parameter constraints. If you have any highly correlated parameters,
an instance of L{RelationsChecker} may be helpful, too.
"""
import numpy as np
from ade.util import *
class RelationsChecker(object):
"""
Checks that the linear relation between two parameters is within
the limits imposed by the 'relations' dict-of-dicts specification.
Each entry of the dict, keyed by a first parameter name, is
another dict with its entries keyed by a second parameter
name. Each of those entries is a 3-tuple with the slope I{m},
y-intercept I{b}, and maximum deviation I{yMaxErr} in value of the
second parameter from its nominal value as determined by
M{y2(y1)=m*y1+b}.
"""
def __init__(self, relations):
self.relations = relations
def __call__(self, params):
for xName in self.relations:
x = params[xName]
ys = self.relations[xName]
for yName in ys:
y = params[yName]
m, b, yMaxErr = ys[yName]
yr = m*x + b
if abs(yr-y) > yMaxErr:
return False
return True
class Constraints(object):
"""
Subclass me and define one or more constraint-checking
methods.
Register the methods to be used with a given instance of that
subclass by defining a I{registry} dict in your subclass, keyed by
method name. Each entry must have a 2-sequence, with the first
item being linear parameter names (or C{None}) and log-space
parameter names (or C{None}) for the constraint method.
You can define instance attributes via constructor keywords. Any
constructor arguments are supplied to the L{setup} method you can
override in your subclass, which gets called during construction
right after instance attributes get set by any constructor
keywords.
To just add a raw constraint function that gets called without any
parameter transformations, use L{append}.
Log-space parameter values are used in the author's ongoing
circuit simulation project and are supported in this module, but
not yet implemented at the level of the I{ade} package otherwise.
@cvar debug: Set C{True} to have failing constraints shown with
parameters. (Debugging only.)
"""
debug = False
def __init__(self, *args, **kw):
for name in kw:
setattr(self, name, kw[name])
self.cList = []
self.setup(*args)
for methodName in self.registry:
func = getattr(self, methodName)
self.cList.append([func]+list(self.registry[methodName]))
self.fList = []
def setup(self, *args):
"""
Override this to do setup with any constructor arguments and with
instance attributes set via any constructor keywords.
"""
pass
def __len__(self):
return len(self.cList) + len(self.fList)
def __bool__(self):
return bool(self.cList) or bool(self.fList)
def __iter__(self):
"""
Iterating over an instance of me yields wrappers of my class-wide
constraint-checking functions registered in my I{cList}, plus
any functions registered after setup in my I{fList}.
You can register a function in I{fList} after setup by calling
L{append} with the callable as the sole argument. It will be
treated exactly the same except called after class-wide
functions.
Each wrapper automatically transforms any log parameters into
their linear values before calling the wrapped
constraint-checking function with a revised parameter
dict. Also, if a parameter is not present, forces a C{True}
"constraint satisfied" result so that setting a parameter to
known doesn't cause bogus constraint checking.
You can treat an instance of me sort of like a list, using
iteration and appending. But you can't get or set individual
items by index.
"""
def wrapper(params):
if linearParams:
if not linearParams.issubset(params):
return True
for name in linearParams:
if name not in newParams:
newParams[name] = params[name]
if logParams:
if not logParams.issubset(params):
return True
for name in logParams:
if name not in newParams:
newParams[name] = np.power(10.0, params[name])
result = func(newParams)
if self.debug:
print(sub("BOGUS: {}, {}", func.__name__, newParams))
return result
newParams = {}
for func, linearParams, logParams in self.cList:
yield wrapper
for func in self.fList:
yield func
def append(self, func):
self.fList.append(func)
|
import sys
import cv2
import math
import numpy as np
# H: 0-179, S: 0-255, V: 0-255 for opencv color ranges
class TrackedObj:
__rgb_color = None
__tolerance = None
lower_color = None
upper_color = None
name = None
coords = None
contour_filter = None
def __init__(self, name, rgb_color, coords=None, tolerance=10, contour_filter=None):
self.name = name
self.__rgb_color = rgb_color
self.__tolerance = tolerance
self.contour_filter = contour_filter
hsv_color = cv2.cvtColor(np.uint8([[ rgb_color ]]), cv2.COLOR_RGB2HSV)[0][0]
self.lower_color = hsv_color + np.uint8([ -tolerance, -200, -200 ])
self.upper_color = hsv_color + np.uint8([ tolerance, 0, 0 ])
if (self.lower_color[0] > 179):
self.lower_color[0] = 180 - (255 - self.lower_color[0])
if (self.upper_color[0] > 179):
self.upper_color[0] = 180 - (255 - self.upper_color[0])
if coords is None:
self.coords = [None, None, None]
else:
self.coords = coords
def copy(self):
return TrackedObj(
self.name,
self.__rgb_color,
coords=self.coords,
tolerance=self.__tolerance,
contour_filter=self.contour_filter
)
def __repr__(self):
return '<TrackedObj: name={}, lower={}, upper={}>'.format(self.name, self.lower_color, self.upper_color)
def add_coords_xz(self, coords):
self.coords = [coords[0], self.coords[1], coords[2]]
def add_coords_yz(self, coords):
self.coords = [self.coords[0], coords[1], coords[2]]
def add_coords(self, coords):
self.coords[0], self.coords[1], self.coords[2] = coords
def test_contour(self, contour):
if self.contour_filter is None:
return True
return self.contour_filter(contour)
def contour_center(contour):
m = cv2.moments(contour)
cX = int(m["m10"] / m["m00"])
cY = int(m["m01"] / m["m00"])
return (cX, cY)
def find_contours(image, lower_color, upper_color, circularity=0):
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
if lower_color[0] > upper_color[0]:
# lower is higher in the HSV space than the upper,
# so we need to OR together two ranges
mask_u = cv2.inRange(
hsv,
np.uint8([ 0, lower_color[1], lower_color[2] ]),
upper_color
)
mask_l = cv2.inRange(
hsv,
lower_color,
np.uint8([ 179, upper_color[1], upper_color[2] ])
)
# combine the masks
mask = cv2.bitwise_or( mask_u, mask_l )
else:
mask = cv2.inRange(hsv, lower_color, upper_color)
#cv2.imwrite('mask.png', mask)
_, contours, _ = cv2.findContours(
mask,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE
)
return contours
def get_circularity(contour):
perim = cv2.arcLength(contour, True)
area = cv2.contourArea(contour)
# equal area circle perimeter
eq_r = math.sqrt( float(area) / math.pi)
eq_perim = 2.0 * math.pi * eq_r
return eq_perim / float(perim)
def show_contours(image, contours, color=(255,255,255), name=None):
for c in contours:
m = cv2.moments(c)
b = cv2.boundingRect(c)
cv2.rectangle(
image,
(b[0], b[1]),
(b[0] + b[2], b[1] + b[3]),
color,
1,
0
)
cX = int(m["m10"] / m["m00"])
cY = int(m["m01"] / m["m00"])
cv2.circle(image, (cX, cY), 1, color, -1)
if name is not None:
cv2.putText(
image,
name,
(b[0] + b[2] + 2, b[1] + int(b[2] / 2)),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
1
)
return image
def resolve_objects_naive(xz_image, yz_image, tracked_objects):
"""
Resolve objects within an image
:param xz_image: XZ image
:param yz_image: YZ image
:param tracked_objects: [TrackedObj] array to track
"""
# scale objects
x_scale = xz_image.shape[1] # shape is (rows, cols, [color depth])
y_scale = yz_image.shape[1]
z_scale = xz_image.shape[0]
processed = []
for tracked in tracked_objects:
print('scanning for {}'.format(tracked.name))
# find the contours from the xz and yz images
contours_xz = find_contours(
xz_image.copy(),
tracked.lower_color,
tracked.upper_color
)
contours_yz = find_contours(
yz_image.copy(),
tracked.lower_color,
tracked.upper_color
)
# handle edge cases (e.g. more than one tracked object,
# obfuscated objects, etc.)
if len(contours_xz) != len(contours_yz):
print('!!! obfuscated object')
# test
else:
for contour_xz, contour_yz in zip(contours_xz, contours_yz):
# copy the target (in case there are multiple)
current_target = tracked.copy()
x, z = contour_center(contour_xz)
y, z = contour_center(contour_yz)
# convert everything to floats, and convert to relative coords
x = float(x) / float(x_scale)
y = float(y) / float(y_scale)
z = float(z) / float(z_scale)
print('\textracted {}'.format(
(x, y, z)
))
processed.append(current_target)
return processed
def resolve_object_stereo_contours(yz_image, xz_image, tracked_obj):
results = []
# find the contours from the xz and yz images
contours_yz = find_contours(
yz_image.copy(),
tracked_obj.lower_color,
tracked_obj.upper_color
)
contours_yz = [c for c in contours_yz if tracked_obj.test_contour(c)]
contours_xz = find_contours(
xz_image.copy(),
tracked_obj.lower_color,
tracked_obj.upper_color
)
contours_xz = [c for c in contours_xz if tracked_obj.test_contour(c)]
return list(zip(contours_yz, contours_xz))
def markup_image(img, tracked_objects=None):
tracked_objects = [
TrackedObj(
'BlueJoint',
np.array([0,0,255]),
),
TrackedObj(
'GreenJoint',
np.array([0,255,0]),
),
TrackedObj(
'RedJoint',
np.array([255,0,0]),
),
TrackedObj(
'YellowJoint',
np.array([255,255,0]),
tolerance=5
),
TrackedObj(
'Sphere Floater',
np.array([255,165,0]),
tolerance=5,
contour_filter=lambda c: get_circularity(c) > 0.9
),
TrackedObj(
'Cyl Floater',
np.array([255,165,0]),
tolerance=5,
contour_filter=lambda c: get_circularity(c) < 0.9
)
]
marked = img.copy()
for tracked in tracked_objects:
#print('resolving %s' % tracked.name)
contours = find_contours(img.copy(), tracked.lower_color, tracked.upper_color)
# quick n dirty filter
contours = [contour for contour in contours if tracked.test_contour(contour)]
try:
marked = show_contours(marked, contours, name=tracked.name)
except Exception as e:
pass
#print('\tsomething went wrong with %s' % tracked.name)
return marked
if __name__=="__main__":
img_yz = cv2.imread('../captures/cam1_yz_pose1.png')
img_xz = cv2.imread('../captures/cam2_xz_pose1.png')
#marked = img.copy()
#print('image dimensions:', img.shape)
tracked_objects = [
TrackedObj(
'BlueJoint',
np.array([0,0,255]),
),
TrackedObj(
'GreenJoint',
np.array([0,255,0]),
),
TrackedObj(
'RedJoint',
np.array([255,0,0]),
),
TrackedObj(
'YellowJoint',
np.array([255,255,0]),
tolerance=5
),
TrackedObj(
'Sphere Floater',
np.array([255,165,0]),
tolerance=5,
contour_filter=lambda c: get_circularity(c) > 0.9
),
TrackedObj(
'Cyl Floater',
np.array([255,165,0]),
tolerance=5,
contour_filter=lambda c: get_circularity(c) < 0.9
)
]
img = img_yz.copy()
marked = img_yz.copy()
for tracked in tracked_objects:
#print('resolving %s' % tracked.name)
print('resolving %s' % tracked)
contours = find_contours(img.copy(), tracked.lower_color, tracked.upper_color)
# quick n dirty filter
contours = [contour for contour in contours if tracked.test_contour(contour)]
try:
marked = show_contours(marked, contours, name=tracked.name)
except Exception as e:
print('\tsomething went wrong with %s' % tracked.name)
cv2.imwrite('test.png', marked)
#im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#for c in contours:
# calculate moments for each contour
#M = cv2.moments(c)
# calculate x,y coordinate of center
#cX = int(M["m10"] / M["m00"])
#cY = int(M["m01"] / M["m00"])
#cv2.circle(img, (cX, cY), 5, (255, 255, 255), -1)
#cv2.putText(img, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# display the image
#cv2.imshow("Image", img)
#cv2.waitKey(0)
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Colab-specific IPython.core.history.HistoryManager."""
import hashlib
import json
import time
from IPython import display
from IPython.core import history
class ColabHistoryManager(history.HistoryManager):
"""Colab-specific history manager to store cell IDs with executions.
This allows us to associate code executions with the cell which was executed
in Colab's UI.
"""
_input_hist_cells = [{'code': '', 'cell_id': '', 'start_time': 0}]
# TODO(b/193678454): Remove this in early 2022 when we no longer need
# backwards compatibility.
_supports_cell_ran = True
def reset(self, new_session=True):
super(ColabHistoryManager, self).reset(new_session=new_session)
if new_session:
self._input_hist_cells[:] = [{'code': '', 'cell_id': '', 'start_time': 0}]
def store_inputs(self, line_num, source, source_raw=None):
"""Variant of HistoryManager.store_inputs which also stores the cell ID."""
super(ColabHistoryManager, self).store_inputs(
line_num, source, source_raw=source_raw)
# The parent_header on the shell is the message that resulted in the code
# execution request. Grab the cell ID out of that.
cell_id = self.shell.parent_header.get('metadata',
{}).get('colab', {}).get('cell_id')
self._input_hist_cells.append({
'code': source_raw,
'cell_id': cell_id,
'start_time': time.time(),
})
def _history_with_cells_as_json(self):
"""Utility accessor to allow frontends an expression to fetch history.
Returns:
A Javascript display object with the execution history.
"""
# To be able to access the raw string as an expression we need to transfer
# the plain string rather than the quoted string representation. The
# Javascript display wrapper is used for that.
return display.Javascript(json.dumps(self._input_hist_cells))
def _executed_cells_as_json(self, include_source_hash=False):
"""Provides frontends an expression to fetch previously executed cells.
Args:
include_source_hash: If true, include a hash of the code that ran.
Returns:
A Javascript display object of a dict of the executed cell IDs to their
execution index. If include_source was specified, the items in the dict
are dicts with 'executionCount' and 'sourceHash' fields.
"""
cells = dict()
for i, cell in enumerate(self._input_hist_cells):
if include_source_hash:
# LINT.IfChange(execution_count)
cells[cell['cell_id']] = {
'executionCount':
i,
'sourceHash':
hashlib.md5(cell['code'].encode('utf8')).hexdigest()[:10]
}
# LINT.ThenChange()
else:
cells[cell['cell_id']] = i
# To be able to access the raw string as an expression we need to transfer
# the plain string rather than the quoted string representation. The
# Javascript display wrapper is used for that.
return display.Javascript(json.dumps(cells))
|
# Models
from django.contrib.auth.models import User
from django.test import TestCase
from users.views import spawn_user
from .models import MessageThread
from .views import send_message, find_group
# useful utility function to find members by username
# I should maybe actually put this in like users.members
def get_member(*usernames):
members = []
for x in usernames:
members.append(User.objects.get(username=x).member)
return tuple(members)
class Messaging(TestCase):
def setUp(self):
a_user = spawn_user('Alice', 'alice@tagarople.com', 'ySLLe8uy')
b_user = spawn_user('Bob', 'bob@tagarople.com', 'X3u9C4bp')
e_user = spawn_user('Eve', 'eve@shady.site', '5rX6zrQm')
alice = a_user.member
bob = b_user.member
eve = e_user.member
ab = MessageThread()
ab.save()
ab.participants.add(alice, bob)
ae = MessageThread()
ae.save()
ae.participants.add(alice, eve)
abe = MessageThread()
abe.save()
abe.participants.add(alice, bob, eve)
# m1 = Message(thread=ab, sender=alice, content='Hello')
def test_get_threads(self):
alice = User.objects.get(username='Alice').member
bob = User.objects.get(username='Bob').member
eve = User.objects.get(username='Eve').member
alice, bob, eve = get_member('Alice', 'Bob', 'Eve')
# print("test_get_threads(alice, bob)")
# print(MessageThread.get_thread(alice, bob))
def test_get_threads_create(self):
alice, bob, eve = get_member('Alice', 'Bob', 'Eve')
# print("test_get_threads(bob, eve)")
# print(MessageThread.get_thread(bob, eve))
def test_send_message(self):
alice, bob = get_member('Alice', 'Bob')
thread = find_group(alice, bob)
# make sure there is only one message in thread so that get doesn't fail
thread.message_set.all().delete()
m = send_message(alice, thread, 'Hi Bob')
# makes sure that the thread contains exactly the message
self.assertEqual(str(thread.message_set.get()), str(m))
def test_messagethread_str(self):
print("test_messagethread_str")
alice, bob = get_member('Alice', 'Bob')
thread = find_group(alice, bob)
# make sure it's clear :P
thread.message_set.all().delete()
send_message(alice, thread, 'Hi Bob')
send_message(bob, thread, 'Hi Alice')
self.assertEqual(str(thread), 'Alice, Bob')
|
#!/usr/bin/env python
# encoding: utf-8
"""
基础配置文件
"""
DEBUG = False
CONNECT_TIMEOUT = 10
REQUEST_TIMEOUT = 10
|
from infinitystone.models.domains import infinitystone_domain
from infinitystone.models.endpoints import infinitystone_endpoint
from infinitystone.models.roles import infinitystone_role
from infinitystone.models.tenants import infinitystone_tenant
from infinitystone.models.users import infinitystone_user
from infinitystone.models.user_roles import infinitystone_user_role
from infinitystone.models.elements import infinitystone_element
from infinitystone.models.elements import infinitystone_element_interface
from infinitystone.models.elements import infinitystone_element_tag
|
from Environment import Environment
# import Environment as ev
# ev.generate_graph()
# L = Environment()
# print(L.generate_actions(L.state.drivers[0]))
# print(L.generate_all_actions())
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def create_pages(apps, schema_editor):
Page = apps.get_model("wagtailcore", "Page")
TopicListPage = apps.get_model("articles", "TopicListPage")
home_page = Page.objects.get(slug="home")
ContentType = apps.get_model("contenttypes", "ContentType")
topic_list_page_content_type, created = ContentType.objects.get_or_create(
model='topiclistpage',
app_label='articles'
)
# Create topics page
topics_page = TopicListPage.objects.create(
title="Topics",
slug='topics',
content_type_id=topic_list_page_content_type.pk,
path='000100010004',
depth=3,
numchild=0,
url_path='/home/topics/',
)
home_page.numchild += 1
home_page.save()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__latest__'),
('articles', '0005_auto_20150626_1948'),
]
operations = [
migrations.RunPython(create_pages),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class NaptConnectionEventArgs(object):
def __init__(self, conn, data = None, offset = 0, size = 0):
self.connection = conn
self.data = data
self.offset = offset
self.size = size
|
#!/usr/bin/env python3
import sys
import math
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.stats import norm
import subprocess as sp
from tqdm import tqdm
from kitti import *
#Z: 1.71389 0.379994
#H: 1.52715 0.136921
#W: 1.62636 0.102967
#L: 3.88716 0.430096
#T: -0.0743265 1.71175
Z = []
H = []
W = []
L = []
T = []
sp.check_call('mkdir -p stats', shell=True)
with open('train.txt', 'r') as f:
tasks = [int(l.strip()) for l in f]
for pk in tqdm(tasks):
sample = Sample(pk, LOAD_LABEL2)
#[z, x, y, h, w, l, obj.rot, _])
boxes = sample.get_voxelnet_boxes(["Car"])
for box in boxes:
x, y, z, h, w, l, t, _ = box
Z.append(z)
H.append(h)
W.append(w)
L.append(l)
T.append(t)
pass
pass
def hist_plot (X, label):
# 对X值画直方图并估计正太分布, 限制在limits范围内
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(1, 1, 1)
mu, sigma = norm.fit(X)
n, bins, _ = ax.hist(X, 100, density=True)
y = mlab.normpdf(bins, mu, sigma)
ax.plot(bins, y)
ax.set_xlabel(label)
print('%s: %g %g' % (label, mu, sigma))
fig.savefig('stats/%s.png' % label)
pass
hist_plot(Z, 'Z')
hist_plot(H, 'H')
hist_plot(W, 'W')
hist_plot(L, 'L')
hist_plot(T, 'T')
|
from django import test
from myapp import tests
from myapp import utils
class IsReadOnlyUserTest(test.TestCase):
def test_read_only_user(self):
"""Test a user whom is in the group Read_Only and ensure this returns True.
"""
user = tests.setup_test_user(True)
results = utils.is_read_only_user(user)
self.assertTrue(results)
def test_regular_user(self):
"""Test a user not setup in the group Read_Only and ensure this returns False.
"""
user = tests.setup_test_user()
results = utils.is_read_only_user(user)
self.assertFalse(results)
class PopulateCountryListTest(test.TestCase):
def test_carribean_islands(self):
"""Ensure correct countries are returned when Eastern Carribean Islands is specified as
a country.
"""
results = utils.populate_country_list(["CE"])
expected = ["AI", "AG", "DM", "GD", "MS", "KN", "LC", "VC"]
self.assertListEqual(results, expected)
def test_west_africa(self):
"""Ensure correct countries are returned when West Africa CFA is specified as a country.
"""
results = utils.populate_country_list(["WA"])
expected = ["BF", "BJ", "CI", "GW", "ML", "NE", "SN", "TG"]
self.assertListEqual(results, expected)
def test_central_africa(self):
"""Ensure correct countries are returned when Central Africa CFA is specified as a country.
"""
results = utils.populate_country_list(["AA"])
expected = ["CF", "CG", "CM", "GA", "GQ", "TD"]
self.assertListEqual(results, expected)
def test_united_states(self):
"""Ensure correct countries are returned when United States of America is specified as a country.
"""
results = utils.populate_country_list(["US"])
expected = ["AS", "BQ", "FM", "GU", "MH", "MP", "PR", "TC", "UM", "US", "VG", "VI"]
self.assertListEqual(results, expected)
def test_finland(self):
"""Ensure correct countries are returned when Finland is specified as a country.
"""
results = utils.populate_country_list(["FI"])
expected = ["AX", "FI"]
self.assertListEqual(results, expected)
def test_france(self):
"""Ensure correct countries are returned when France is specified as a country.
"""
results = utils.populate_country_list(["FR"])
expected = ["BL", "FR", "MC", "MF", "MQ", "PM", "RE", "TF", "YT"]
self.assertListEqual(results, expected)
def test_norway(self):
"""Ensure correct countries are returned when Norway is specified as a country.
"""
results = utils.populate_country_list(["NO"])
expected = ["BV", "NO", "SJ"]
self.assertListEqual(results, expected)
def test_australia(self):
"""Ensure correct countries are returned when Australia is specified as a country.
"""
results = utils.populate_country_list(["AU"])
expected = ["AU", "CC", "CX", "HM", "NF", "NR", "TV"]
self.assertListEqual(results, expected)
def test_denmark(self):
"""Ensure correct countries are returned when Denmark is specified as a country.
"""
results = utils.populate_country_list(["DK"])
expected = ["DK", "FO", "GL"]
self.assertListEqual(results, expected)
def test_united_kingdom(self):
"""Ensure correct countries are returned when United Kingdom is specified as a country.
"""
results = utils.populate_country_list(["GB"])
expected = ["GB", "GS", "IO"]
self.assertListEqual(results, expected)
def test_new_zealand(self):
"""Ensure correct countries are returned when New Zealand is specified as a country.
"""
results = utils.populate_country_list(["NZ"])
expected = ["NU", "NZ", "PN", "TK"]
self.assertListEqual(results, expected)
def test_israel(self):
"""Ensure correct countries are returned when Israel is specified as a country.
"""
results = utils.populate_country_list(["IL"])
expected = ["IL", "PS"]
self.assertListEqual(results, expected)
|
from selenium import webdriver
from selenium.webdriver.common.keys import keys
import time
class botTwitter:
def __init__(self, username, password):
self.username = username
self.password = password
self.bot = webdriver.Firefox()
def login(self):
bot = self.bot
bot.get = ('https://twitter.com')
time.sleep(3)
email = bot.find_element_by_class_name('email-input')
password = bot.find_element_by_name('session[password]')
email.clear()
password.clear()
email.send_keys(self.username)
password.send_keys(self.password)
password.send_keys(keys.RETURN)
time.sleep(3)
def like_Twitter(self, hashtag):
bot = self.bot
bot.get('https://twitter.com/seach?q=' + hashtag + '&src=typd')
time.sleep(3)
for i in range(1,3):
bot.execute_script('window.scrollTo(0, document.body.scrollHeight)')
time.sleep(3)
matheus = botTwitter('matheus.2018@gmail.com', 'admin1234' )
matheus.login()
|
from pylab import *
import IPython
import fileio
from os import listdir
from os.path import isfile, join
def subplots(label):
"""Draws four subplots of each of the variables"""
data = [M,E,chi,Cv]
labels = ["$\overline{M}$","$\overline{E}$","$\overline{\chi}$","$\overline{C}_V$"]
for j in xrange(len(labels)):
subplot(2,2,j+1)
plot(T,data[j])
xlabel("$T$")
ylabel(labels[j])
fig = gcf()
fig.suptitle(label)
savefig("plots/%s.png" % files[i])
files = [f for f in listdir("results") if isfile(join("results",f)) and f[:7] == "results"]
files.sort()
ion()
if len(files) > 0:
print("Available data:")
for i in xrange(len(files)):
print("(%d) %s" % (i,files[i]))
file_num = input("File: ")
filename = "results/%s" % files[file_num]
T,E,M,Mtheory,Cv,chi = fileio.readdata(filename)
IPython.embed()
else:
print("No data available yet! Use postprocess.py to generate some.")
|
import os
import time
import docker
import pytest
import requests
from hamcrest import *
def build_helper_image(client):
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
client.images.build(
tag='chaos-swarm-helper-test',
path=basedir,
rm=True,
)
def create_helper_service(client):
return client.services.create(
image='chaos-swarm-helper-test',
mounts=['/var/run/docker.sock:/var/run/docker.sock:rw'],
labels={'chaos-swarm-helper': 'v1'},
endpoint_spec=docker.types.EndpointSpec(mode='dnsrr', ports={8080: (8080, 'tcp', 'host')}),
mode=docker.types.ServiceMode('global')
)
def await_helpers_healthy():
deadline = time.time() + 20
while time.time() < deadline:
try:
response = requests.get('http://localhost:8080/health', timeout=0.25)
if response.status_code == 200:
break
except requests.exceptions.ConnectionError:
pass
except requests.exceptions.ReadTimeout:
pass
else:
raise RuntimeError('Timeout waiting for helper service')
def await_task(client, service, timeout=10):
deadline = time.time() + timeout
while time.time() < deadline:
if len(service.tasks()) == 0:
continue
if 'ContainerStatus' not in service.tasks()[0]['Status']:
continue
return
raise AssertionError('Timeout waiting for tasks on %s' % service.name)
def await_container_status(client, id, status, timeout=10):
deadline = time.time() + timeout
while time.time() < deadline:
container = client.containers.get(id)
if container is not None and container.status == status:
return
raise AssertionError(
'Timeout awaiting status %s on %s: %s' % (status, id, container.attrs)
)
def print_log_tail(service):
for line in service.logs(stdout=True, stderr=True):
print(line.decode().rstrip())
@pytest.fixture(scope='module')
def client():
return docker.from_env()
@pytest.fixture(scope='module', autouse=True)
def ensure_helpers(client):
filters = {
'label': 'chaos-swarm-helper=v1'
}
installed = client.services.list(filters=filters)
if len(installed) == 1:
installed[0].remove()
elif len(installed) > 1:
raise RuntimeError('more than one helper service')
build_helper_image(client)
helpers = create_helper_service(client)
await_helpers_healthy()
yield helpers
print_log_tail(helpers)
helpers.remove()
@pytest.fixture()
def test_service(client):
service = client.services.create(image='redis')
await_task(client, service)
yield service
service.remove()
def test_kill_one_task(client, test_service):
first_container_id = test_service.tasks()[0]['Status']['ContainerStatus']['ContainerID']
response = requests.post(
'http://localhost:8080/submit',
json={
'selector': {'services': {'id': test_service.id}},
'targets': 1,
'action': ['pumba', 'kill', 'container'],
}
)
assert response.status_code == 200
assert response.json()['executions'][0]['target'] == first_container_id
await_container_status(client, first_container_id, 'exited')
def test_submit_finds_no_targets():
response = requests.post(
'http://localhost:8080/submit',
json={
'selector': {'services': {'name': 'no-such-service'}},
'targets': 1,
'action': ['pumba'],
}
)
assert response.status_code == 400
assert response.json()['status'] == 'failure'
assert response.json()['message'].startswith('no targets found')
def test_submit_encounters_error(test_service):
response = requests.post(
'http://localhost:8080/submit',
json={
'selector': {'services': {'id': test_service.id}},
'targets': 1,
'action': ['no-such-command'],
}
)
assert response.status_code == 500 # TODO: 400
assert response.json()['status'] == 'failure'
assert response.json()['message'].startswith('known actions')
|
from importlib import import_module
import inspect
from django.test import TestCase
from django.urls import reverse, resolve
def test_events_urls():
# Order of elements in tuples of urls:
# 0: url name
# 1: url route
# 2: view function/class name
# -1: args/kwargs
# for kwargs, integers have to written as string -> some issue with how resolve() works
urls = [
('events', '/events/', 'EventList'),
('event_create', '/events/create-event/', 'EventCreate'),
('event_detail', '/events/some-slug/', 'EventDetail', {'slug': 'some-slug'}),
('modify_event_registration', '/events/modify-registration/10/', 'modify_registration', {'pk': '10'}),
('event_update', '/events/modify-event/some-slug/', 'EventUpdate', {'slug': 'some-slug'}),
('event_delete', '/events/delete-event/some-slug/', 'delete_event', {'slug': 'some-slug'}),
(
'modify_event_descr_truncate_num',
'/events/modify-event-truncate-num/10/',
'modify_descr_truncate_num',
{'pk': '10'},
),
(
'modify_event_truncated_descr',
'/events/modify-event-truncated-descr/10/',
'modify_truncated_descr',
{'pk': '10'},
),
]
with_args_len = max([len(t) for t in urls])
for tup in urls:
resolver = resolve(tup[1])
assert resolver.url_name == tup[0], f'URL name error'
assert resolver.func.__name__ == tup[2], f'View class/function name error'
if len(tup) == with_args_len:
# assert resolver.kwargs == tup[-1], f'URL kwargs error'
module_name = resolver.func.__module__
object_name = resolver.func.__name__
imported_object = getattr(import_module(module_name), object_name)
if inspect.isclass(imported_object):
bases = imported_object.__bases__
if len(bases) == 1 and bases[0].__name__ == 'DetailView':
assert imported_object.slug_url_kwarg in tup[-1].keys(), (
f'slug_url_kwarg in {object_name} does not match url kwarg in urls.py')
elif inspect.isfunction(imported_object):
# inspect.signature used instead of inspect.getfullargspec because the latter does not show parameters after 'request' -> reason unknown
for key in tup[-1].keys():
try:
args = inspect.signature(imported_object).parameters[key]
except KeyError:
raise KeyError(f'{object_name} does not have a parameter named {key}')
|
from rest_framework import serializers
from .models import *
class UserVeriCodeSerializer(serializers.ModelSerializer):
class Meta:
model = UserVeriCodeModel
fields = ('uniq_id', 'user_mobile_no', 'verification_code', 'cre_date', 'upt_date')
class NaverCloudLogSerializer(serializers.ModelSerializer):
class Meta:
model = NaverCloudLogModel
fields = ('uniq_id', 'user_mobile_no', 'api_type', 'response_json', 'cre_date', 'upt_date')
class UserPurposeInfoSerializer(serializers.ModelSerializer):
class Meta:
model = UserPurposeInfoModel
fields = ('uniq_id', 'user_mobile_no', 'purpose_code', 'cre_date', 'upt_date')
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetUserAssignedIdentityResult',
'AwaitableGetUserAssignedIdentityResult',
'get_user_assigned_identity',
'get_user_assigned_identity_output',
]
@pulumi.output_type
class GetUserAssignedIdentityResult:
"""
A collection of values returned by getUserAssignedIdentity.
"""
def __init__(__self__, client_id=None, id=None, location=None, name=None, principal_id=None, resource_group_name=None, tags=None, tenant_id=None):
if client_id and not isinstance(client_id, str):
raise TypeError("Expected argument 'client_id' to be a str")
pulumi.set(__self__, "client_id", client_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if principal_id and not isinstance(principal_id, str):
raise TypeError("Expected argument 'principal_id' to be a str")
pulumi.set(__self__, "principal_id", principal_id)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The Client ID of the User Assigned Identity.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The Azure location where the User Assigned Identity exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The Service Principal ID of the User Assigned Identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags assigned to the User Assigned Identity.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The Tenant ID of the User Assigned Identity.
"""
return pulumi.get(self, "tenant_id")
class AwaitableGetUserAssignedIdentityResult(GetUserAssignedIdentityResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetUserAssignedIdentityResult(
client_id=self.client_id,
id=self.id,
location=self.location,
name=self.name,
principal_id=self.principal_id,
resource_group_name=self.resource_group_name,
tags=self.tags,
tenant_id=self.tenant_id)
def get_user_assigned_identity(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUserAssignedIdentityResult:
"""
Use this data source to access information about an existing User Assigned Identity.
## Example Usage
### Reference An Existing)
```python
import pulumi
import pulumi_azure as azure
example = azure.authorization.get_user_assigned_identity(name="name_of_user_assigned_identity",
resource_group_name="name_of_resource_group")
pulumi.export("uaiClientId", example.client_id)
pulumi.export("uaiPrincipalId", example.principal_id)
pulumi.export("uaiTenantId", example.tenant_id)
```
:param str name: The name of the User Assigned Identity.
:param str resource_group_name: The name of the Resource Group in which the User Assigned Identity exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:authorization/getUserAssignedIdentity:getUserAssignedIdentity', __args__, opts=opts, typ=GetUserAssignedIdentityResult).value
return AwaitableGetUserAssignedIdentityResult(
client_id=__ret__.client_id,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
principal_id=__ret__.principal_id,
resource_group_name=__ret__.resource_group_name,
tags=__ret__.tags,
tenant_id=__ret__.tenant_id)
@_utilities.lift_output_func(get_user_assigned_identity)
def get_user_assigned_identity_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetUserAssignedIdentityResult]:
"""
Use this data source to access information about an existing User Assigned Identity.
## Example Usage
### Reference An Existing)
```python
import pulumi
import pulumi_azure as azure
example = azure.authorization.get_user_assigned_identity(name="name_of_user_assigned_identity",
resource_group_name="name_of_resource_group")
pulumi.export("uaiClientId", example.client_id)
pulumi.export("uaiPrincipalId", example.principal_id)
pulumi.export("uaiTenantId", example.tenant_id)
```
:param str name: The name of the User Assigned Identity.
:param str resource_group_name: The name of the Resource Group in which the User Assigned Identity exists.
"""
...
|
from django.shortcuts import get_object_or_404
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from rest_framework import status
from rest_framework.response import Response
from .models import Deck, Flashcard
from .serializers import DeckSerializer, CardSerializer, RatingSeriallizer
@api_view(['GET', 'POST'])
@csrf_exempt
def decks_list(request):
"""
List all decks
"""
if request.method == 'GET':
if 'name' in request.GET:
decks = Deck.objects.filter(owner=request.user, name=request.GET['name'])
else:
decks = Deck.objects.filter(owner=request.user)
serializer = DeckSerializer(decks, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = DeckSerializer(data=request.data)
if serializer.is_valid():
if request.user.is_anonymous:
return Response(serializer.errors,
status=status.HTTP_401_UNAUTHORIZED)
else:
serializer.save(owner=request.user)
return Response(serializer.data,
status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def deck_details(request, deck_id):
"""
Deck details
"""
if request.method == 'GET':
deck = get_object_or_404(Deck, pk=deck_id, owner=request.user)
serializer = DeckSerializer(deck)
return Response(serializer.data)
elif request.method == 'DELETE':
deck = get_object_or_404(Deck, pk=deck_id, owner=request.user)
deck.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
@csrf_exempt
def cards_list(request, deck_id):
"""
List all flashcards
"""
if request.method == 'GET':
if 'days' in request.GET:
cards = Flashcard.objects.get_cards_to_study(deck_id=deck_id,
user=request.user, days=int(request.GET['days']))
else:
cards = Flashcard.objects.filter(deck__id=deck_id, deck__owner=request.user)
serializer = CardSerializer(cards, many=True)
return Response(serializer.data)
elif request.method == 'POST':
try:
deck = Deck.objects.get(id=deck_id)
except ObjectDoesNotExist:
return Response(serializer.errors,
status=status.HTTP_401_BAD_REQUEST)
serializer = CardSerializer(data=request.data)
if serializer.is_valid():
if request.user.is_anonymous:
return Response(serializer.errors,
status=status.HTTP_401_UNAUTHORIZED)
else:
serializer.save(owner=request.user, deck=deck)
return Response(serializer.data,
status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_401_BAD_REQUEST)
@api_view(['GET'])
def card_details(request, deck_id, card_id):
"""
Card details
"""
if request.method == 'GET':
card = get_object_or_404(Flashcard, pk=card_id, deck__id=deck_id,
owner=request.user)
serializer = CardSerializer(card)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_401_BAD_REQUEST)
@api_view(['GET', 'POST'])
def card_ratings(request, deck_id, card_id):
"""
Card ratings (state)
"""
if request.method == 'GET':
card = get_object_or_404(Flashcard, pk=card_id, deck__id=deck_id,
owner=request.user)
serializer = RatingSeriallizer(card)
return Response(serializer.data)
elif request.method == 'POST':
card = get_object_or_404(Flashcard, pk=card_id, deck__id=deck_id,
owner=request.user)
serializer = RatingSeriallizer(card, data=request.data)
if serializer.is_valid():
serializer.save(rating=request.data['rating'])
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_401_BAD_REQUEST)
|
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from rest_framework import generics, authentication, permissions,\
status
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.response import Response
from rest_framework.parsers import MultiPartParser, JSONParser
from rest_framework.views import APIView
from . import serializer
from core import models
def check_file_size_limit(picture_size, size_limit):
"""
Returns true if the image has valid size limit.
size_limit: in bytes
"""
if picture_size > size_limit:
return False
else:
return True
class CreateUserView(generics.CreateAPIView):
"""Creates a new user in the system"""
serializer_class = serializer.CreateUserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create a new auth token for user"""
serializer_class = serializer.AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = serializer.ManageUserSerializer
authentication_classes = [authentication.TokenAuthentication, ]
permission_classes = [permissions.IsAuthenticated, ]
def get_object(self):
"""Retrieve and return authenticated user"""
return self.request.user
class UserImageUploadView(APIView):
"""View to upload or view image for user"""
serializer_class = serializer.UserImageUploadSerializer
authentication_classes = [authentication.TokenAuthentication, ]
permission_classes = [permissions.IsAuthenticated, ]
parser_classes = [JSONParser, MultiPartParser]
def get(self, request, format=None):
"""To get user profile picture"""
user = get_user_model().objects.get(email=request.user)
user_profile = models.UserProfile.objects.get(user=user)
# Preparing the data manually as per our serializer
data = {'user': {'username': user.username},
'image': user_profile.image or None}
# Serializing our prepared data
ser = serializer.UserImageUploadSerializer(
user_profile, data=data, context={"request": request})
# Returning appropariate response
if ser.is_valid():
return_ser_data = {'id': ser.data.get('id'),
'image': ser.data.get('image')}
return Response(return_ser_data, status=status.HTTP_200_OK)
else:
return Response(ser.errors, status=status.HTTP_400_BAD_REQUEST)
def post(self, request, format=None):
"""To save the profile picture"""
user = get_user_model().objects.get(email=request.user)
user_profile = models.UserProfile.objects.get(user=user)
# Formatting the data to as per our defined serializer
data = {'user': {'username': user.username},
'image': request.data.get('image')}
# Serializing our data
ser = serializer.UserImageUploadSerializer(
user_profile, data=data, context={"request": request})
if ser.is_valid():
if ser.validated_data:
# Checking for size limit of uploaded file(max 2 Mb)
# Converting 20Mb = 20 * 1024 * 1024 bytes = 20971520 bytes
if not check_file_size_limit(request.data.get('image').size,
size_limit=20971520):
msg = _('File size too large. Maximum allowed size: 20 Mb')
res = {'image': [msg]}
return Response(res, status=status.HTTP_400_BAD_REQUEST)
# Deleting the old image before uploading new image
if user_profile.image:
user_profile.image.delete()
# Saving the model
ser.save(user=user)
return_ser_data = {'id': ser.data.get('id'),
'image': ser.data.get('image')}
return Response(return_ser_data, status=status.HTTP_200_OK)
else:
return Response(ser.errors, status=status.HTTP_400_BAD_REQUEST)
|
# Generated by Django 3.2.5 on 2021-11-30 10:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('takeouts_app', '0034_auto_20211013_1344'),
]
operations = [
migrations.AddField(
model_name='containerstakeoutrequest',
name='archive_description',
field=models.TextField(blank=True, verbose_name='описание для выноса архива'),
),
]
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
import urllib.request, urllib.parse, urllib.error, json
import json
from jwcrypto import jwt, jwk
class OpenIDAuthMethod(object):
def __init__(self):
"""
Retrieve auth server config and set up the validator
:param config_url: the discovery URI
:param audience: client ID to verify against
"""
# first read the parameters from the keys.json file
# including the config_url and the audience
key_file = open('conf/net/auth/openid_auth.json')
key_data = json.load(key_file)
discoveryURI = key_data["discoveryURI"]
audience = key_data["clientID"]
# Now, use them to retrieve the configuration
self.config = json.loads(OpenIDAuthMethod.__fetch_content__(discoveryURI))
self.config['audience'] = audience
# Fetch signing key/certificate
jwk_response = OpenIDAuthMethod.__fetch_content__(self.config['jwks_uri'])
self.jwk_keyset = jwk.JWKSet.from_json(jwk_response)
@staticmethod
def __fetch_content__(url):
response = urllib.request.urlopen(url)
return response.read()
def __verify_claim__(self, decoded_token_json):
if decoded_token_json['iss'] != self.config['issuer']:
raise Exception('Invalid Issuer')
if decoded_token_json['aud'] != self.config['audience']:
raise Exception('Invalid Audience')
def verifyUserToken(self, token):
"""
Verify the token with the provided JWK certificate and claims
:param token: the token to verify
:return: the decoded ID token body
"""
decoded_token = jwt.JWT(key=self.jwk_keyset, jwt=token)
decoded_json = json.loads(decoded_token.claims)
logging.debug("decoded_json = %s" % decoded_json)
self.__verify_claim__(decoded_json)
email = decoded_json['email']
logging.debug("After verifying claim, returning valid email = " % email)
return email
|
>>> cur.execute('''UPDATE PopByRegion SET Population=100600 WHERE Region = "Japan"''')
<sqlite3.Cursor object at 0x7f7f281921f0>
>>> cur.execute('SELECT * FROM PopByRegion WHERE Region = "Japan"')
<sqlite3.Cursor object at 0x7f7f281921f0>
>>> cur.fetchone()
('Japan', 100600)
>>> cur.execute('DELETE FROM PopByRegion WHERE Region < "L"')
<sqlite3.Cursor object at 0x7f7f281921f0>
>>> cur.execute('SELECT * FROM PopByRegion')
<sqlite3.Cursor object at 0x7f7f281921f0>
>>> cur.fetchall()
[('Southeastern Africa', 743112)]
>>> cur.execute('INSERT INTO PopByregion VALUES ("Japan", 100562)')
<sqlite3.Cursor object at 0x7f7f281921f0>
>>> cur.execute('DROP TABLE PopByRegion')
<sqlite3.Cursor object at 0x7f7f281921f0> |
"""这个是base64"""
print(__name__)
|
import setuptools
setuptools.setup(
name="buildbot-washer",
version="1.1.0",
author="Roberto Abdelkader Martínez Pérez",
author_email="robertomartinezp@gmail.com",
description="Buildbot Utility Library",
packages=setuptools.find_packages(exclude=["tests", "docs"]),
license="Apache",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Framework :: Buildout :: Extension',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Testing'
],
install_requires=[
"environconfig==1.7.0"
],
entry_points={
"buildbot.steps": [
"TriggerFromFile = washer.master.steps.triggerfromfile:TriggerFromFile",
"WasherTask = washer.master.steps.washertask:WasherTask",
"ReduceTriggerProperties = washer.master.steps.reducetriggerproperties:ReduceTriggerProperties"
],
"buildbot.worker": [
"WasherDockerLatentWorker = washer.master.worker.docker:WasherDockerLatentWorker"
]
}
)
|
from __future__ import absolute_import, division, print_function
import sys
import libtbx.utils
import xml.etree.ElementTree as ET
from libtbx import easy_pickle
from libtbx.utils import Sorry
import libtbx.load_env
from libtbx import smart_open
import os
import csv
web_urls = {"rcsb": "http://www.rcsb.org/pdb/rest/customReport.xml?"+\
"pdbids={pdb_list}"+\
"&customReportColumns=structureId,refinementResolution,rWork,rFree"
}
def get_experimental_pdb_info(pdbids, site="rcsb"):
"""
returns list of tuples (pdb_id, resolution, rwork, rfree) and dict
pdbid: (resolution, rwork, rfree)
"""
rlist = []
rdict = {}
assert site in ["rcsb"]
pdb_list = ",".join(pdbids)
url = web_urls.get(site).format(pdb_list=pdb_list)
data = libtbx.utils.urlopen(url)
str_data = data.read()
root = ET.fromstring(str_data)
for record in root:
pdbid = record[0].text
resolution = None if record[1].text == 'null' else float(record[1].text)
rwork = None if record[2].text == 'null' else float(record[2].text)
rfree = None if record[3].text == 'null' else float(record[3].text)
tup = (pdbid, resolution, rwork, rfree)
rlist.append(tup)
rdict[record[0].text] = tup[1:]
return rlist, rdict
class pdb_info_local(object):
def __init__(self):
"""
Loads pickle with data. Path is temporary in current work dir.
Should be centralized somewhere else upon going to production.
"""
db_dict = {}
pdb_info_file = libtbx.env.find_in_repositories(
relative_path="cctbx_project/iotbx/bioinformatics/pdb_info.csv.gz",
test=os.path.isfile)
csv_file = smart_open.for_reading(file_name=pdb_info_file)
csv_reader = csv.reader(csv_file,delimiter=";")
for row in csv_reader:
db_dict[row[0]] = (row[1],row[2],row[3],row[4],row[5])
self.db_dict = db_dict
def _get_info(self, pdbid, skip_none=True, raise_if_absent=False):
info = self.db_dict.get(pdbid.upper(), None)
if info is None and raise_if_absent:
raise Sorry("Not in database")
if skip_none and info is not None and info[0] is None:
info = None
return info
def get_info_list(self, pdbids, skip_none=True, raise_if_absent=False):
"""
Get info about pdbids (list of strings) in form of list of tuples
(pdbid, resolution, rwork, rfree). Easy to sort.
"""
result = []
for pdbid in pdbids:
info = self._get_info(pdbid, raise_if_absent=raise_if_absent)
if info is not None:
result.append( (pdbid,) + info)
return result
def get_info_dict(self, pdbids, skip_none=True, raise_if_absent=False):
"""
Get info about pdbids (list of strings) in form of dict
pdbid: (resolution, rwork, rfree). Easy to lookup.
"""
result = {}
for pdbid in pdbids:
info = self._get_info(pdbid, raise_if_absent=raise_if_absent)
if info is not None:
result[pdbid] = info
return result
def get_all_experimental_pdb_info_to_pkl():
"""
Get info (resolution, rwork, rfree) for all PDB from RCSB and dump into
pickle file:
pdb_dict 5.1 Mb.
Takes ~15 seconds from LBL.
Use only xray diffraction.
"""
get_all = "http://www.rcsb.org/pdb/rest/customReport.xml?"+\
"pdbids=*&customReportColumns=" +\
"structureId,refinementResolution,rWork,rFree,experimentalTechnique,rObserved"
print(get_all)
rdict = {}
data = libtbx.utils.urlopen(get_all)
str_data = data.read()
# print str_data
root = ET.fromstring(str_data)
n_bad = 0
for record in root:
if record[4].text != "X-RAY DIFFRACTION":
continue
pdbid = record[0].text
resolution = None if record[1].text == 'null' else float(record[1].text)
rwork = None if record[2].text == 'null' else float(record[2].text)
rfree = None if record[3].text == 'null' else float(record[3].text)
if rwork is None:
# put rObserved
rwork = None if record[5].text == 'null' else float(record[5].text)
tup = (pdbid, resolution, rwork, rfree)
rdict[record[0].text] = tup[1:]
# print tup
if tup.count(None) > 0:
print(tup)
n_bad += 1
print("Total bad records", n_bad)
easy_pickle.dump(file_name='pdb_dict.pickle', obj=rdict)
def tst_pdb_info_local():
# Enable before running.
# get_all_experimental_pdb_info_to_pkl()
info_local = pdb_info_local()
ans_dict_1 = {'1yjp': (1.8, 0.181, 0.19), '1ucs': (0.62, 0.133, 0.155)}
ans_list_1 = [('1ucs', 0.62, 0.133, 0.155), ('1yjp', 1.8, 0.181, 0.19)]
assert info_local.get_info_dict(["1ucs", "1yjp"]) == ans_dict_1
assert info_local.get_info_list(["1ucs", "1yjp"]) == ans_list_1
ans_dict_2 = {'1YJP': (1.8, 0.181, 0.19), '1UCS': (0.62, 0.133, 0.155)}
ans_list_2 = [('1UCS', 0.62, 0.133, 0.155), ('1YJP', 1.8, 0.181, 0.19)]
rlist, rdict = get_experimental_pdb_info(["1ucs", "1yjp"])
assert rlist == ans_list_2
assert rdict == ans_dict_2
def run(args):
tst_pdb_info_local()
if __name__ == '__main__':
run(sys.argv[1:])
|
from matplotlib import animation as animation
import numpy as np
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.pyplot as plt
import time
import matplotlib
#from IPython.display import HTML
import matplotlib.patches as patches
class visualize2D_anim():
def __init__(self, dir=None,label=0):
self.dir= dir
self.label = label
self.data3D= self.readData2()
self.predict_name=self.cvtName()
def readData(self):
lines = open(self.dir,'rt').read().strip().split('\n')
skeleton_data=[]
for l in lines:
frame_data = np.array([float(v) for v in l.strip().split(' ')])
frame_data = frame_data[1:]
frame_data = np.reshape(frame_data,(21,3))
skeleton_data.append(frame_data)
skeleton_data=np.array(skeleton_data)
#print(skeleton_data.shape)
return skeleton_data
def readData2(self):
lines = open("/media/data3/duc/F-PHAB/FPHAB/fphab_data/"+self.dir,'rt').read().strip().split('\n')
frame_num = int(len(lines) / 21)
frameDatas=[]
fr=[]
for i,l in enumerate(lines):
frame_data = np.array([float(v) for v in l.strip().split(' ')])
fr.append(frame_data)
for frame in range(frame_num):
frameData = fr[(frame*21):(frame+1)*21]
frameData = np.array(frameData)
frameDatas.append(frameData)
frameDatas=np.array(frameDatas)
print('shape:{}'.format(frameDatas.shape))
return frameDatas
def realName(self):
name=self.dir.split('/')[-1]
name=name.split('_')[0]
return int(name)
def anim_skel(self):
seq = self.data3D
fig = plt.figure(figsize=[10,10])
ax = fig.add_subplot(111)
lines = []
sct=[]
print(seq.shape)
N = len(seq)
data = np.array(list(range(0,N))).transpose()
#joints order
joints_order_org=[v-1 for v in [1,2,2,7,7,8,8,9,1,3,3,10,10,11,11,12,1,4,4,13,13,14,14,15,1,5,5,16,16,17,17,18,1,6,6,19,19,20,20,21]]
joints_order = joints_order_org[::-1]
#print(len(joints_order))
skel = seq [0,:,:]
#color list
c=['purple','blue','green','yellow','red']
c.reverse()
count_color,count= 0,1
for id1,id2 in zip(joints_order[::2],joints_order[1::2]):
xs, ys = [],[]
xs=[skel[id1,0],skel[id2,0]]
ys=[100-skel[id1,1],100-skel[id2,1]]
line,= plt.plot(xs,ys,color=c[count_color],lw=5)
scatter=plt.scatter(xs,ys,color=c[count_color],lw=3)
if(count%4==0):
count_color+=1
count+=1
lines.append(line)
sct.append(scatter)
minx,miny=min(seq[0,:,0]),min(seq[0,:,1])
maxx,maxy=max(seq[0,:,0])-minx,max(seq[0,:,1])-miny
rect = patches.Rectangle((minx,miny),maxx, maxy, linewidth=1, edgecolor='green', facecolor='none',label="change")
ax.add_patch(rect)
text=ax.text(minx, maxy, self.predict_name)
if int(self.realName()-1)==int(self.label):
bbox_color='green'
else:
bbox_color='red'
print("label:"+str(self.realName()-1)+' '+str(self.label))
text.set_bbox(dict(facecolor=bbox_color, alpha=0.4))
plt.grid(False)
print(seq[0,0,0],seq[0,0,1])
plt.xlim(seq[0,0,0]-200,seq[0,0,0]+200)
plt.ylim(100-seq[0,0,1]-200,100-seq[0,0,1]+200)
plt.title(self.cvt_r_Name(self.realName()))
#plt.legend(rect,"hii")
def update(num,data, lines,sct,rect,text):
for i,line in enumerate(lines):
segment = np.zeros((2,2))
joint_1 = joints_order[i*2]
joint_2 = joints_order[i*2+1]
#print(joint_1,joint_2)
xs=[seq[num,joint_1,0],seq[num,joint_2,0]]
ys=[100-seq[num,joint_1,1],100-seq[num,joint_2,1]]
#print(xs,ys)
data=np.hstack((xs,ys))
data=data.reshape(2,2).transpose()
#print(data)
line.set_xdata(xs)
line.set_ydata(ys)
sct[i].set_offsets(data)
minx,miny=min(seq[num,:,0])-5,min(100-seq[num,:,1])-5
maxx,maxy=max(seq[num,:,0])-minx+5,max(100-seq[num,:,1])-miny+5
rect.set_width(maxx)
rect.set_height(maxy)
rect.set_xy((minx,miny))
text.set_position((rect.get_x(),rect.get_height()+3+rect.get_y()))
#return lines, rect, sct
anim = animation.FuncAnimation(fig, update, frames=N,fargs=(data,lines,sct,rect,text),interval=100,)
nametxt=self.dir.split('/')[-1]
act,_,s,th=nametxt.split('_')
anim.save('/media/data3/duc/F-PHAB/FPHAB/results/vs/'+self.cvtName()+'____s'+s+'_'+th+'.gif', writer='pillow')
print('done')
def cvtName(self):
label= self.label
dir= "/media/data3/duc/F-PHAB/FPHAB/label_fphab.txt"
file_names=open(dir, 'rt').read().strip().split('\n')
return file_names[int(label)]
def cvt_r_Name(self,name=0):
label= name
dir= "/media/data3/duc/F-PHAB/FPHAB/label_fphab.txt"
file_names=open(dir, 'rt').read().strip().split('\n')
return file_names[int(label)-1] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.