content
stringlengths 5
1.05M
|
|---|
# Copyright (C) 2015 Haruhiko Matsuo <halm.matsuo@gmail.com>
#
# Distributed under the MIT License.
# See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT
"""Decorators"""
from functools import wraps
from itertools import product
import numpy as np
def tag_maker(counter_method):
"""This is a decorator function creating a key for
the PA event dictionary.
Parameters
----------
monitor_level : monitor level
'Application' (or 'A'), 'Process' ('P'), 'Thread' ('T')
pid: process id (MPI rank)
tid: thread id
"""
def _tag_expander(*tag_in):
"""If the monitor level is given by its acronym,
it is expand to the long form before it is passed to
the decorator."""
if tag_in[1][0] == 'T':
monitor_level = 'Thread'
pid = tag_in[2]
tid = tag_in[3]
tag = (tag_in[0], monitor_level, pid, tid)
elif tag_in[1][0] == 'P':
monitor_level = 'Process'
pid = tag_in[2]
tag = (tag_in[0], monitor_level, pid)
elif tag_in[1][0] == 'A':
monitor_level = 'Application'
tag = (tag_in[0], monitor_level)
else:
print("error")
return tag
@wraps(counter_method)
def _tag_maker(*args):
"""decorator"""
tag = _tag_expander(*args)
res = counter_method(*tag)
return res
return _tag_maker
def element_wise(counter_method):
"""This is a decorator function allowing multi-process/thread input.
Note that this decorator should always follow the decorator 'tag_maker'.
"""
def _make_iterator(*args):
"""Make a compound iterator from a process iterator and
a thread one.
Note that 'Application' case should not execute this
function."""
monitor_level = args[1]
arg_pid = args[2]
if hasattr(arg_pid, '__iter__'):
pid_itr = (i for i in xrange(arg_pid[0], arg_pid[1]))
else:
pid_itr = (arg_pid,)
if monitor_level == 'Thread':
arg_tid = args[3]
if hasattr(arg_tid, '__iter__'):
tid_itr = (i for i in xrange(arg_tid[0], arg_tid[1]))
else:
tid_itr = (arg_tid,)
if monitor_level == 'Process':
return_itr = pid_itr
elif monitor_level == 'Thread':
return_itr = (pid_itr, tid_itr)
return return_itr
@wraps(counter_method)
def _element_wise(*args):
"""Distribute multi-process/thread input"""
if args[1] == 'Thread':
pid_itr, tid_itr = _make_iterator(*args)
retval = [counter_method(args[0], args[1], pid, tid)
for pid, tid in product(pid_itr, tid_itr)]
return np.array(retval)
elif args[1] == 'Process':
pid_itr = _make_iterator(*args)
retval = [counter_method(args[0], args[1], pid) for pid in pid_itr]
return np.array(retval)
elif args[1] == 'Application':
return np.array(counter_method(*args))
else:
print 'Unknown monitor level'
return _element_wise
def return_val_unit(counter_method):
"""Convert cycle counts to another unit.
The unit can be 'sec' (default), 'rate', or 'cycle'.
"""
@wraps(counter_method)
def _return_val_unit(*args, **kwargs):
if kwargs == {}:
ret_unit = 'sec'
else:
ret_unit = kwargs['unit']
if ret_unit == 'sec':
cycle_counts = args[0].d.cycle_counts1(*args[1:]) # exclude 'self'
cpu_cycle = args[0].e[0][2] * 1.0e6 # Convert MHz to Hz
res = counter_method(*args) / cpu_cycle
elif ret_unit == 'rate':
cycle_counts = args[0].d.cycle_counts1(*args[1:]) # exclude 'self'
if hasattr(cycle_counts, '__getitem__'):
res = counter_method(*args) / cycle_counts.astype(float)
else:
res = counter_method(*args) / float(cycle_counts)
elif ret_unit == 'cycle':
res = counter_method(*args)
return res
return _return_val_unit
def monitor_level_checker(counter_method):
"""This is a decorator function checking monitor level.
"""
def _check_monitor_level(*tag_in):
d = tag_in[0].d # self.RawDat
ml = tag_in[1][0] # monitor level
bad_mon_lev = '[Invalid monitor level] '
if d.is_hybrid and ml != 'T':
raise Exception(bad_mon_lev + 'Use monitor level: Thread.')
elif d.is_thread and ml != 'T':
raise Exception(bad_mon_lev + 'Use monitor level: Thread.')
elif d.is_flatmpi and ml != 'P':
raise Exception(bad_mon_lev + 'Use monitor level: Process.')
elif d.is_single and ml != 'A':
raise Exception(bad_mon_lev + 'Use monitor level: Application.')
@wraps(counter_method)
def _monitor_level_checker(*args, **kwargs):
"""decorator"""
_check_monitor_level(*args)
res = counter_method(*args, **kwargs)
return res
return _monitor_level_checker
|
from django.apps import AppConfig
class HTMLBlockConfig(AppConfig):
name = 'glitter.blocks.html'
label = 'glitter_html'
|
"""Discover LG WebOS TV devices."""
from netdisco.util import urlparse
from . import SSDPDiscoverable
# pylint: disable=too-few-public-methods
class Discoverable(SSDPDiscoverable):
"""Add support for discovering LG WebOS TV devices."""
def info_from_entry(self, entry):
"""Return the most important info from a uPnP entry."""
return urlparse(entry.values['location']).hostname
def get_entries(self):
"""Get all the LG WebOS TV device uPnP entries."""
return self.find_by_device_description(
{
"deviceType": "urn:schemas-upnp-org:device:Basic:1",
"modelName": "LG Smart TV"
}
)
|
from __future__ import annotations
from abc import ABCMeta
from collections.abc import Mapping, Set, AsyncGenerator
from contextlib import closing, asynccontextmanager, AsyncExitStack
from typing import Final
from pymap.context import cluster_metadata, connection_exit
from pymap.exceptions import InvalidAuth, ResponseError
from pymap.interfaces.backend import BackendInterface
from pymap.interfaces.login import IdentityInterface
from pymap.interfaces.session import SessionInterface
from pymap.plugin import Plugin
from pymapadmin.grpc.admin_pb2 import Result, SUCCESS, FAILURE
from ..errors import get_unimplemented_error
from ..typing import Handler
__all__ = ['handlers', 'BaseHandler', 'LoginHandler']
#: Registers new admin handler plugins.
handlers: Plugin[type[BaseHandler]] = Plugin('pymap.admin.handlers')
class BaseHandler(Handler, metaclass=ABCMeta):
"""Base class for implementing admin request handlers.
Args:
backend: The backend in use by the system.
"""
def __init__(self, backend: BackendInterface) -> None:
super().__init__()
self.config: Final = backend.config
self.login: Final = backend.login
def _get_admin_keys(self) -> Set[bytes]:
admin_keys: set[bytes] = set()
if self.config.admin_key is not None:
admin_keys.add(self.config.admin_key)
remote_keys = cluster_metadata.get().remote.get('admin')
if remote_keys is not None:
admin_keys.update(remote_keys.values())
return admin_keys
@asynccontextmanager
async def catch_errors(self, command: str) -> AsyncGenerator[Result, None]:
"""Context manager to catch
:class:`~pymap.exceptions.ResponseError` exceptions and include them in
the response.
Args:
command: The admin command name.
"""
response = b'. OK %b completed.' % command.encode('utf-8')
result = Result(code=SUCCESS, response=response)
async with AsyncExitStack() as stack:
connection_exit.set(stack)
try:
yield result
except NotImplementedError as exc:
raise get_unimplemented_error() from exc
except ResponseError as exc:
result.code = FAILURE
result.response = bytes(exc.get_response(b'.'))
result.key = type(exc).__name__
@asynccontextmanager
async def login_as(self, metadata: Mapping[str, str], user: str) \
-> AsyncGenerator[IdentityInterface, None]:
"""Context manager to login and get an identity object.
Args:
stream: The grpc request/response stream.
user: The user to authorize as.
Raises:
:class:`~pymap.exceptions.InvalidAuth`
"""
admin_keys = self._get_admin_keys()
try:
creds = self.login.tokens.parse(user, metadata['auth-token'],
admin_keys=admin_keys)
except (KeyError, ValueError) as exc:
raise InvalidAuth() from exc
yield await self.login.authenticate(creds)
@asynccontextmanager
async def with_session(self, identity: IdentityInterface) \
-> AsyncGenerator[SessionInterface, None]:
"""Context manager to create a mail session for the identity.
Args:
identity: The authenticated user identity.
Raises:
:class:`~pymap.exceptions.InvalidAuth`
"""
stack = connection_exit.get()
session = await stack.enter_async_context(identity.new_session())
stack.enter_context(closing(session))
yield session
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 12 00:01:10 2018
@author: deanecke
"""
import pandas as pd
import numpy as np
import os
from sklearn import tree, ensemble
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import BaggingClassifier, AdaBoostClassifier
from sklearn.utils import shuffle
from sklearn.externals import joblib
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.feature_selection import chi2, SelectKBest
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
class ExperimentsManager:
def __init__(self, dataset_csv):
use_cols = pd.read_csv(os.path.abspath('data/use_cols.csv'))
cols_to_drop = ['device_category']
y_col = 'device_category'
test = pd.read_csv(os.path.abspath(dataset_csv), usecols=use_cols, low_memory=False)
test = shuffle(test)
self.test = self.clear_missing_data(test)
self.x_test = self.perform_feature_scaling(self.test.drop(cols_to_drop, 1))
self.y_test = self.test[y_col]
self.devices = self.y_test.unique()
def experiment_random_forest(self):
"""
Running all the different random forest classifiers for all devices and prints
their AUC value accordingly
"""
for device in self.devices:
for criterion_name in ['gini','entropy']:
for forest_size in [3,7,11,15,19,21,23]:
clf = self.load_model_from_pkl(r"C:\Users\deanecke\Documents\Project_updated\IoT-device-type-identification-master\models\{0}\{0}_forest_{1}_{2}.pkl".format(device,criterion_name,forest_size))
y_test = np.array(pd.Series(self.get_is_dev_vec(device, self.y_test)))
pred = np.array(clf.predict(self.x_test))
fpr, tpr, thresholds = metrics.roc_curve(y_test, pred)
print("DEVICE:{0},CRITERION:{1},SIZE:{2}".format(device, criterion_name, forest_size))
print(metrics.auc(fpr, tpr))
def experiment_decision_trees(self):
"""
Running all the different decision trees classifiers for all devices and prints
their AUC value accordingly
"""
for device in self.devices:
for criterion_name in ['gini','entropy']:
for samples_size in [50,100,200,400]:
clf = self.load_model_from_pkl(r"C:\Users\deanecke\Documents\Project_updated\IoT-device-type-identification-master\models\{0}\{0}_cart_{1}_{2}_samples_leaf.pkl".format(device,criterion_name,samples_size))
y_test = np.array(pd.Series(self.get_is_dev_vec(device, self.y_test)))
pred = np.array(clf.predict(self.x_test))
fpr, tpr, thresholds = metrics.roc_curve(y_test, pred)
print("DEVICE:{0},CRITERION:{1},SIZE:{2}".format(device, criterion_name, samples_size))
print(metrics.auc(fpr, tpr))
def experiment_knn(self):
"""
Running the KNN (K Nearest Neighbours) classifier for
each device
"""
for device in self.devices:
clf = self.load_model_from_pkl(r"C:\Users\deanecke\Documents\Project_updated\IoT-device-type-identification-master\models\{0}\{0}_knn_5_uniform.pkl".format(device))
y_test = np.array(pd.Series(self.get_is_dev_vec(device, self.y_test)))
pred = np.array(clf.predict(self.x_test))
fpr, tpr, thresholds = metrics.roc_curve(y_test, pred)
print("DEVICE:{0}".format(device))
print(metrics.auc(fpr, tpr))
def experiment_sgd(self):
"""
Running the SGD (Stochastic Gradient Descent) classifier for
each device
"""
for device in self.devices:
clf = self.load_model_from_pkl(r"C:\Users\deanecke\Documents\Project_updated\IoT-device-type-identification-master\models\{0}\{0}_sgd.pkl".format(device))
y_test = np.array(pd.Series(self.get_is_dev_vec(device, self.y_test)))
pred = np.array(clf.predict(self.x_test))
fpr, tpr, thresholds = metrics.roc_curve(y_test, pred)
print("DEVICE:{0}".format(device))
print(metrics.auc(fpr, tpr))
def experiment_naive_bayes(self):
"""
Running the Gaussian Naive Bayes classifier for
each device
"""
for device in self.devices:
clf = self.load_model_from_pkl(r"C:\Users\deanecke\Documents\Project_updated\IoT-device-type-identification-master\models\{0}\{0}_naive_bayes.pkl".format(device))
y_test = np.array(pd.Series(self.get_is_dev_vec(device, self.y_test)))
pred = np.array(clf.predict(self.x_test))
fpr, tpr, thresholds = metrics.roc_curve(y_test, pred)
print("DEVICE:{0}".format(device))
print(metrics.auc(fpr, tpr))
def experiment_MLP(self):
"""
Running all the different MLP classifiers for all devices and prints
their AUC value accordingly
"""
for device in self.devices:
for first_layer_neurons in [1,2,3,4,5]:
for second_layer_neurons in [1,2,3,4,5]:
clf = self.load_model_from_pkl(r"C:\Users\deanecke\Documents\Project_updated\IoT-device-type-identification-master\models\{0}\{0}_MLP_{1}_{2}_sgd.pkl".format(device,first_layer_neurons,second_layer_neurons))
y_test = np.array(pd.Series(self.get_is_dev_vec(device, self.y_test)))
pred = np.array(clf.predict(self.x_test))
fpr, tpr, thresholds = metrics.roc_curve(y_test, pred)
print("DEVICE:{0},FIRST LAYER:{1},SECOND LAYER:{2}".format(device, first_layer_neurons, second_layer_neurons))
print(metrics.auc(fpr, tpr))
def load_model_from_pkl(self, pkl_file_full_path):
return joblib.load(pkl_file_full_path)
def is_dev(self, this_dev_name, dev_name):
return 1 if this_dev_name == dev_name else 0
def clear_missing_data(self, df):
df_with_nan = df.replace("?", np.NaN)
return df_with_nan.dropna(0)
def perform_feature_scaling(self, x_train):
"""
This method is used in order to perform feature scaling according to the
min-max scaler. The scaler can be replaced with another one, like the
standard scaler
"""
scaler = MinMaxScaler()
scaler.fit(x_train)
return pd.DataFrame(scaler.transform(x_train), columns=x_train.columns)
def get_is_dev_vec(self, this_dev_name, dev_names):
"""
This method generates a list with entries 0 or 1 to indicate which of the
entries in the dev_names list is the device we are currently training/testing
a classifier for.
"""
return [self.is_dev(this_dev_name, dev_name) for dev_name in dev_names]
|
from .Derivative import *
from .Series import *
from .FunctionCall import *
from .UnaryFormatting import *
from .Arithmatic import *
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from datetime import datetime
from vggface2_datagen import genid
from model import build_rec_model
np.set_printoptions(threshold=np.inf, linewidth=np.inf)
img_dir_path = '../datasets/vggface2/train_refined_resized/'
test_anno_file_path = 'anno/vggface2_test2_anno.txt'
output_path = 'output'
ishape = [112, 112, 3]
resnet_settings = [[16, 16, 64], [2, [2, 2]], [2, [2, 2]], [2, [2, 2]]]
dense_settings = []
total_identities = 100
total_same_identities = 20
model = build_rec_model(
ishape=ishape,
resnet_settings=resnet_settings,
dense_settings=dense_settings)
# model.summary()
model.load_weights('{}/clz/weights1.h5'.format(output_path), by_name=True)
batchx4d = genid(
anno_file_path=test_anno_file_path,
img_dir_path=img_dir_path,
ishape=ishape,
total_identities=total_identities,
total_same_identities=total_same_identities)
print('Start: {}'.format(datetime.now().time()))
pred_batchy_2dtensor = model.predict_on_batch(batchx4d) # (total_identities*total_same_identities, embedding_dims)
print('End: {}'.format(datetime.now().time()))
pred_batchy_3dtensor = tf.reshape(tensor=pred_batchy_2dtensor, shape=[total_identities, total_same_identities, -1])
pred_batchy_2dtensor = tf.math.reduce_mean(input_tensor=pred_batchy_3dtensor, axis=1) # (total_identities, embedding_dims)
Y = pred_batchy_2dtensor
X = pred_batchy_2dtensor
print('Start: {}'.format(datetime.now().time()))
V = np.zeros((total_identities, total_identities), dtype='float32')
for i in range(total_identities):
y = Y[i] # (embedding_dims,)
for j in range(total_identities):
x = X[j] # (embedding_dims,)
d = tf.norm(tensor=y-x, axis=-1)
V[j, i] = d
print('End: {}'.format(datetime.now().time()))
plt.figure(figsize=(7.35, 7.35))
plt.imshow(V)
plt.show()
|
#MenuTitle: Copy Download URL for Current App Version
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Puts the download URL of the current Glyphs app version into your clipboard for easy pasting.
"""
from AppKit import NSPasteboard, NSStringPboardType
def setClipboard( myText ):
"""
Sets the contents of the clipboard to myText.
Returns True if successful, False if unsuccessful.
"""
try:
myClipboard = NSPasteboard.generalPasteboard()
myClipboard.declareTypes_owner_( [NSStringPboardType], None )
myClipboard.setString_forType_( myText, NSStringPboardType )
return True
except Exception as e:
return False
appURL = "https://updates.glyphsapp.com/Glyphs%s-%s.zip" % (
Glyphs.versionString,
Glyphs.buildNumber,
)
if not setClipboard(appURL):
print("Warning: could not set clipboard to %s" % ( "clipboard text" ))
Message(title="Clipboard Error", message="Could not set the clipboard for whatever reason, so here is the URL:\n%s"%appURL, OKButton=None)
else:
# Floating notification:
Glyphs.showNotification(
"Download link copied",
"Ready for pasting: %s"%appURL,
)
|
#!/usr/bin/env python3
# coding: utf-8
# PSMN: $Id: get_serial.py 2997 2020-09-23 13:14:12Z ltaulell $
# SPDX-License-Identifier: CECILL-B OR BSD-2-Clause
""" POC: use execo to get chassis serial number from host(s)
quick & dirty
return a list of comma separated serial numbers
"""
import argparse
import execo
from ClusterShell.NodeSet import NodeSet
def get_args():
"""
read parser and return args (as args namespace)
"""
parser = argparse.ArgumentParser(description='Ask an host for Serial Number')
parser.add_argument('-d', '--debug', action='store_true', help='Active le debug')
parser.add_argument('-H', '--host', nargs=1, type=str, help='hostname(s), NodeSet syntax')
return parser.parse_args()
def get_data(host, debug=False):
""" use execo as a better subprocess substitute """
cmd = "cat /sys/class/dmi/id/product_serial"
process = execo.SshProcess(cmd, host, {'user': 'root'}, shell=False).run()
if debug:
print(process.stderr)
return process.stdout
if __name__ == '__main__':
""" """
args = get_args()
if args.debug:
debug = True
print(args)
else:
debug = False
nodes = NodeSet(args.host[0])
liste = []
for node in nodes:
raw = get_data(node, debug).split('\r')
liste.append(raw[0])
print(','.join(liste))
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import warnings
import threading
import numpy
import scipy.optimize
import pyFAI
FIT_PARAMETER = ['wavelength',
'distance',
'center_x',
'center_y',
'tilt',
'rotation']
LN_2 = numpy.log(2)
x = []
y = []
def gauss(x_max, y_max, fwhm, x_array):
return y_max * numpy.exp(-LN_2 * ((x_array - x_max) / fwhm * 2) ** 2)
def lorentz(x_max, y_max, fwhm, x_array):
return y_max * 1. / (1 + ((x_array - x_max) / fwhm * 2) ** 2)
def pseudo_voigt(x_max, y_max, fwhm, eta, x_array):
return eta * gauss(x_max, y_max, fwhm, x_array) + \
(1 - eta) * lorentz(x_max, y_max, fwhm, x_array)
def fit_maxima(x_data, y_data):
# print(x_data,y_data)
# error function
err = lambda p, x, y: y - (p[0] + p[1] * x + pseudo_voigt(p[2], p[3], p[4], p[5], x))
# approximate the fitting parameter
y_min = y_data.min()
y_max = y_data.max() - y_min
x_max = x_data[y_data.argmax()]
parameter = [y_min, 1E-6, x_max, y_max, 1, 1]
args = (x_data, y_data)
# fit PSV with linear background to data
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parameter, info = scipy.optimize.leastsq(err, parameter, args)
# return maximum position if valid solution, otherwise nan
maxima = parameter[2]
if info != 5 and maxima > x_data.min() and maxima < x_data.max():
return parameter[2]
else:
return numpy.nan
def _error_function(parameter, arguments):
'''
The error functions returns the distances using the current parameter.
This is a helper function for the fitting process.
'''
geometry, d_spacings, maxima, selected_parameter = arguments
mask = [sel in selected_parameter for sel in FIT_PARAMETER]
param = numpy.array(get_fit2d(geometry))
print 'update:',param
param[numpy.array(mask)] = parameter[numpy.array(mask)]
set_fit2d(geometry,*param)
return peak_distance(geometry, d_spacings, maxima)
def get_fit2d(geometry):
gdict = geometry.getFit2D()
return [geometry.get_wavelength()*1e-10,gdict['directDist'],gdict['centerX'],gdict['centerY'],gdict['tilt'],gdict['tiltPlanRotation']]
def set_fit2d(geometry, wavelength, distance, center_x, center_y, tilt, rotation):
geometry.set_wavelength(wavelength / 1e-10)
geometry.setFit2D(distance, center_x, center_y, tilt, rotation)
return geometry
def fit_geometry(geometry, maxima, d_spacings, selected_parameter):
args = [geometry, d_spacings, maxima[::-1], selected_parameter]
mask = numpy.array([sel in selected_parameter for sel in FIT_PARAMETER])
start_parameter = numpy.array(get_fit2d(geometry))
if len(start_parameter) >= len(maxima[0]):
raise Exception('More variables then fit points.')
_, cov_x, info, _, _ = scipy.optimize.leastsq(_error_function,
start_parameter,
args,
full_output=True)
fit_parameter = start_parameter
dof = len(maxima[0]) - len(start_parameter)
chisq = (info['fvec'] ** 2).sum()
info_out = {'calls': info['nfev'],
'dof': dof,
'sum_chi_square': chisq}
info_out['variance_residuals'] = chisq / float(dof)
for i, name in enumerate(selected_parameter):
if cov_x is not None:
ase = numpy.sqrt(cov_x[i, i]) * numpy.sqrt(chisq / float(dof))
else:
ase = numpy.NAN
info_out['ase_%s' % name] = ase
percent_ase = round(ase / (abs(fit_parameter[i]) * 0.01), 3)
info_out['percent_ase_%s' % name] = percent_ase
info_out[name] = fit_parameter[i]
return info_out
def radial_array(beam_center, shape):
'''
Returns array with shape where every point is the radial distance to the
beam center in pixel.
'''
pos_x, pos_y = numpy.meshgrid(numpy.arange(shape[1]),
numpy.arange(shape[0]))
return numpy.sqrt((pos_x - beam_center[0]) ** 2 + (pos_y - beam_center[1]) ** 2)
def ring_maxima(geometry, d_spacing, image, radial_pos, step_size):
'''
Returns x and y arrays of maximum positions [pixel] found on the ring
defined by pyFAI geometry and d_spacing on the given image
'''
# calculate circle positions along the ring with step_size
tth = 2 * numpy.arcsin(geometry.get_wavelength() / (2e-10 * d_spacing))
radius = (geometry.get_dist() * numpy.tan(tth)) / geometry.get_pixel1()
center = (geometry.getFit2D()['centerX'], geometry.getFit2D()['centerY'])
alpha = numpy.arange(0, numpy.pi * 2, step_size / float(radius))
circle_x = numpy.round(center[1] + numpy.sin(alpha) * radius)
circle_y = numpy.round(center[0] + numpy.cos(alpha) * radius)
# calculate roi coordinates
half_step = int(numpy.ceil(step_size / 2.))
x_0 = circle_x - half_step
x_1 = circle_x + half_step
y_0 = circle_y - half_step
y_1 = circle_y + half_step
# mask out rois which are not complete inside the image border
mask = numpy.where((x_0 >= 0) & (y_0 >= 0) &
(y_1 < image.shape[0]) & (x_1 < image.shape[1]))
x_0 = x_0[mask]
x_1 = x_1[mask]
y_0 = y_0[mask]
y_1 = y_1[mask]
maxima_x = []
maxima_y = []
for i in range(len(x_0)):
roi = image[int(y_0[i]):int(y_1[i]), int(x_0[i]):int(x_1[i])]
pos = radial_pos[int(x_0[i]):int(x_1[i]), int(y_0[i]):int(y_1[i])]
if roi.size < half_step ** 2: continue
# calculate roi histogram
try:
x_hist, y_hist, _, _ = pyFAI.ext.histogram.histogram(pos, roi, step_size)
except AssertionError: continue
# fit the radial maximum of the histogram
maximum = fit_maxima(x_hist, y_hist)
if maximum is numpy.nan: continue
# DEBUG
# rect = pylab.Rectangle((x_0[i], y_0[i]),
# abs(x_0[i]-x_1[i]),
# abs(y_0[i]-y_1[i]), fc='none')
#pylab.gca().add_patch(rect)
# calculate the pixel position of the maximum
scale = float(maximum / radius)
maxima_x.append(center[1] + (x_0[i] + half_step - center[1]) * scale)
maxima_y.append(center[0] + (y_0[i] + half_step - center[0]) * scale)
# DEBUG
#pylab.plot(center[0] + (x_0[i] + half_step - center[0]) * scale,
# center[1] + (y_0[i] + half_step - center[1]) * scale, '*')
return numpy.array(maxima_x), numpy.array(maxima_y), radial_pos
def peak_distance(geometry, d_spacings, (x_peaks, y_peaks)):
"""
Returns the minimum distances in 2*theta between given peaks in pixel and a
d_spacings in angstrom using the pyFAI geometry.
"""
wavelength = geometry.get_wavelength()
tth_rings = 2 * numpy.arcsin(wavelength / (2.0e-10 * d_spacings))
distance = [numpy.abs(tth_rings - tth).min() \
for tth in geometry.tth(y_peaks, x_peaks)]
return numpy.array(distance)
def circle_center_distance(center, x_array, y_array):
"""
Returns the algebraic distances between the 2D points in x_array, y_array
and the mean circle center.
"""
r_array = numpy.sqrt((x_array - center[0]) ** 2 + (y_array - center[1]) ** 2)
return r_array - r_array.mean()
def fit_circle(x_array, y_array):
"""
Returns the center (x, y) and the radius of the fitted circle defined by
the points in x_array and y_array.
"""
# coordinates of the barycenter
xc_m = numpy.mean(x_array)
yc_m = numpy.mean(y_array)
# fit circle center
center, _ = scipy.optimize.leastsq(circle_center_distance,
(xc_m, yc_m),
(x_array, y_array))
# calculate radius
radius = numpy.sqrt((x_array - center[0]) ** 2 +
(y_array - center[1]) ** 2).mean()
return center, radius
def quick_calibration(wavelength, pixel_size, d_spacing, x_array, y_array):
'''
Returns pyFAI geometry by fitting a circle to a d-spacing ring.
'''
center, radius = fit_circle(x_array, y_array)
tth = 2 * numpy.arcsin(wavelength / (2.0e-10 * d_spacing))
sdd = pixel_size[0] * radius * numpy.cos(tth) * 1. / numpy.sin(tth)
return PyFAIGeometry(sdd,
center[1] * pixel_size[1],
center[0] * pixel_size[0],
1e-6,
1e-6,
1e-6,
pixel_size[0],
pixel_size[1],
wavelength=wavelength)
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
# added at LBL #
def set_PyFAIGeometry(agb):
wave = agb.wavelength_ * 1.0E-10
a = PyFAIGeometry(
agb.sdd_,
agb.center_[1] * agb.pixel_size_[1],
agb.center_[0] * agb.pixel_size_[0],
1e-6,
1e-6,
1e-6,
agb.pixel_size_[0],
agb.pixel_size_[1],
wavelength=wave)
return a
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
class PyFAIGeometry(pyFAI.geometry.Geometry):
def set_fit2d(self,
wavelength,
distance,
center_x,
center_y,
tilt,
rotation):
self.set_wavelength(wavelength * 1e-10)
self.setFit2D(distance, center_x, center_y, tilt, rotation)
def get_fit2d(self):
param_dict = self.getFit2D()
return [self.get_wavelength() * 1e10,
param_dict['directDist'],
param_dict['centerX'],
param_dict['centerY'],
param_dict['tilt'],
param_dict['tiltPlanRotation']]
################################################################################
class FitThread(threading.Thread):
def __init__(self, geometry, d_spacings, image, selected_parameter, step_size):
threading.Thread.__init__(self)
self.geometry = geometry
self.d_spacings = d_spacings
self.image = image
self.selected_paramter = selected_parameter
self.step_size = step_size
self.status = [0, len(d_spacings)]
self.info = {}
self.circle_patches = []
def run(self):
# calculate maxima for every d_spacing
center = (self.geometry.getFit2D()['centerX'],
self.geometry.getFit2D()['centerY'])
radial_pos = radial_array(center, self.image.shape)
x_data, y_data, = [], []
# calculate maxima for every d_spacing
for d_spacing in self.d_spacings:
maxima_x, maxima_y, radial_pos = ring_maxima(self.geometry,
d_spacing,
self.image,
radial_pos,
self.step_size)
x_data.extend(maxima_x)
y_data.extend(maxima_y)
# create circle patch for every found maxima
# for i in range(len(maxima_x)):
# c_p = pylab.Circle((maxima_x[i], maxima_y[i]), 10, ec='red', fc='none')
# self.circle_patches.append(c_p)
self.status[0] += 1
# start fit
try:
info = fit_geometry(self.geometry,
(numpy.array(x_data), numpy.array(y_data)),
self.d_spacings,
self.selected_paramter)
info.update({'error': False, 'error_msg': ''})
self.info = info
except Exception as e:
self.info = {'error': True, 'error_msg': e.message}
def get_status(self):
return tuple(self.status)
def get_info(self):
return self.info
def get_circle_patches(self):
return self.circle_patches
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
# added at LBL #
def dpdak_saxs_calibration(img, geometry, d_spacings, fit_param, step_size):
fit_thread = FitThread(geometry, d_spacings, img, fit_param, step_size)
fit_thread.start()
params = geometry.get_fit2d()
return params[1] / 1000., [params[2], params[3]]
def saxs_calibration(img, agb):
max_iter = 10
tol = 10E-04
# d-spacing for Silver Behenate
d_spacings = numpy.array([58.367, 29.1835, 19.45567, 14.59175, \
11.6734, 9.72783, 8.33814, 7.29587])
# step size (same as in dpdak GUI)
step_size = 49
# Set PyFAI Geometry
geometry = set_PyFAIGeometry(agb)
# selcte parameters to fit
fit_param = ['distance', 'center_x', 'center_y', 'tilt']
# Run calibration
for i in range(10):
fit_thread = FitThread(geometry, d_spacings, img, fit_param, step_size)
fit_thread.start()
fit_thread.join()
# plt.plot(x, y)
# plt.show()
# Update calibrated data
params = geometry.get_fit2d()
agb.setSDD(params[1] / 1000.)
agb.setCenter(params[2:4])
return agb
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
#if __name__ == "__main__":
# test_all()
|
class Tuner:
def __init__(self, desc):
self.description = desc
self.frequency = 0
def on(self):
print(f'{self.description} on.')
def off(self):
print(f'{self.description} off.')
def set_frequency(self, frequency):
print(f'{self.description} setting frequency to {frequency}.')
self.frequency = frequency
def set_am(self):
print(f'{self.description} setting AM mode.')
def set_fm(self):
print(f'{self.description} setting FM mode.')
def __str__(self):
return self.description
|
from typing import List
Vector = List[float]
height_weight_age = [70, # inches,
170, # pounds,
40 ] # years
grades = [95, # exam1
80, # exam2
75, # exam3
62 ] # exam4
def add(v: Vector, w: Vector) -> Vector:
"""Adds corresponding elements"""
assert len(v) == len(w), "Vectors must be the same length"
return [v_i + w_i for v_i, w_i in zip(v, w)]
assert add([1, 2, 3], [4, 5, 6]) == [5, 7, 9]
def subtract(v: Vector, w: Vector) -> Vector:
"""subtracts corresponding elements"""
assert len(v) == len(w), "Vectors must be the same length"
return [v_i - w_i for v_i, w_i in zip(v, w)]
assert subtract([5,7,9], [4,5,6]) == [1,2,3]
def vector_sum(vectors: List[Vector]) -> Vector:
"""Sums all corresponding elements"""
# Check the list is not empty
assert vectors, "No vectors provided!"
# Check the vectors are the same size
num_elements = len(vectors[0])
assert all(len(v) == num_elements for v in vectors), "different sizes!"
# the i-th element of the result is the sum of every vector[i]
return [sum(vector[i] for vector in vectors)
for i in range(num_elements)]
assert vector_sum([[1, 2], [3, 4], [5, 6], [7, 8]]) == [16, 20]
# We'll also need to be able to multiply a vector by a scalar, which we do simply by multiplyinh each element of the
# vector by that number
def scalar_multiply(c: float, v: Vector) -> Vector:
"""Multiplies every element by c"""
return [c * v_i for v_i in v]
assert scalar_multiply(2, [1,2,3]) == [2,4,6]
# This allows us to compute the componentwise means of a list of (same -sized) vectors:
def vector_mean (vectors: List[Vector]) -> Vector:
"""Computes the element-wise average"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
assert vector_mean([[1,2], [3,4], [5,6]]) == [3,4]
# A less obvious tool is the DOT PRODUCT. The dot product of two vectors is the sum of their componentwise product
def dot(v: Vector, w: Vector) -> float:
"""Computes v_1 * w_1 + ... + v_n * w_n"""
assert len(v) == len(w), "Vectors must be the same length"
return sum(v_i * w_i for v_i, w_i in zip(v, w))
assert dot([1,2,3], [4,5,6]) == 32 # 1*4 + 2*5 + 3*6
# If W has magnitude 1, the dot product measures how far the vector V extends in the W direction.
# For example, if w = [1, 0], then dot [v, w] is just the first component of v.
# Another way of saying this is that it's the length of the vector you'd get if you projected v onto w
# Using this, it's easy to compute a vector's sum of squares:
def sum_of_squares(v: Vector) -> float:
"""Returns v_1 * v_1 + ... + v_n * v_n"""
return dot(v,v)
assert sum_of_squares([1,2,3]) == 14 # 1*1 + 2*2 + 3*3
# Which we can use to compute its MAGNITIUDE (or length):
import math
def magnitude(v: Vector) -> float:
"""Returns the magnitude or length of v"""
return math.sqrt(sum_of_squares(v)) # math.sqrt is square root fucntion
assert magnitude([3,4]) == 5
# We now have all the pieces we need to compute the distance between two vectors.
def squared_distance(v: Vector, w: Vector) -> float:
"""Computes (v_1 - w_1) ** 2 + ... + (v_n - w_n) ** 2"""
return sum_of_squares(subtract(v,w))
def distance(v: Vector, w: Vector) -> float:
"""Computes the distance between v and w"""
return math.sqrt(squared_distance(v, w))
# This is possibly clearer if we write it as (the equivalent):
def distance(v: Vector, w: Vector) -> float:
return magnitude(subtract(v, w))
#---------------------------------------------------------------------------------------------------------------------
# Matrices
#----------------------------------------------------------------------------------------------------------------------
# A matrix is a two-dimensional collection of numbers. We will represent matrices as lists of lists, with each inner
# list having the same size and representing a ROW of the matrix. If A is a matrix, then A[i][j] is the element in the
# iTH row and the jTH column. Per mathemathical convention, we will frequently use capital letters to represent
# matrices. For example:
# Another type alias
Matrix = List[List[float]]
A = [[1,2,3], # A has 2 rows and 3 columns
[4,5,6]]
B = [[1,2], # B has 3 rows and 2 columns
[3,4],
[5,6]]
# Given this list-of-lists representation, the matrix A has len(A) rows and len(A[0]) column, which we consider its
# SHAPE
from typing import Tuple
def shape(A: Matrix) -> Tuple[int, int]:
"""Returns (# of rows A, # of columns of A)"""
num_rows = len(A)
num_cols = len(A[0]) if A else 0 # number of elements in first row
return num_rows, num_cols
assert shape([[1,2,3], [4,5,6]]) == (2,3) # 2 rows, 3 columns
# If a matrix has n rows and k columns, we will refer to it as an n x k matrix. We can think of each row of an n x k
# matrix as a vector of length k and each column as a vector of length n:
def get_row(A: Matrix, i: int) -> Vector:
"""Returns the i-th row of A (as a vector)"""
return A[i] # A[i] is already the ith row
def get_column(A: Matrix, j: int) -> Vector:
"""Returns the j-th column of A (as a vector)"""
return [A_i[j] # j-th element of row A_i
for A_i in A] # for each row in A_i
# We'll also want to be able to create a matrix given its shape and a fucntion for generalizing its elements.
# We can do this using a nested list comprehension
from typing import Callable
def make_matrix(num_rows: int,
num_cols: int,
entry_fn: Callable[[int,int], float]) -> Matrix:
"""
Returns a num_rows x num_cols matrix
whose (i, j) -th entry is entry_fn(i, j)
"""
return [[entry_fn(i,j) # given i, create a list
for j in range(num_cols)] # [entry_fn(i, 0), ...]
for i in range(num_rows)] # create one list for each i
# Given this function, you could make a 5x5 identity matrix (with 1s on the diagonal and 0s elsewhere, like so:
def identity_matrix(n: int) -> Matrix:
"""Returns the n x n identity matrix"""
return make_matrix(n, n, lambda i, j: 1 if i == j else 0)
print(identity_matrix(5))
# Matricies will be important for several reasons. First, we can use a matrix to represent a dataset consisting of
# multiple vectors, simply by considering each vector as a row of the matrix. For example, if you had the
# heights, weights and ages of 1,000 people, you could put them in a 1,000 x 3 matrix
data = [[70, 170, 40],
[65, 120, 26],
[77, 250, 19],
# ....
]
# Second, as we'll see later, we can use an n x k matrix to represent a linear function that maps k-dimentional
# vectors to n-dimentional vectors, Several of our techniques and concepts will involve such functions.
# Third, matrices can be used to represent binary relationships. In chapter1, we represented the edgest of a network
# as a colection of pairs (i, j). An alternative would be to create a matrix A such that A[i][j] is 1 if nodes i and j
# are connected and 0 otherwise
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# also represented as:
# user 0 1 2 3 4 5 6 7 8 9
#
friend_matrix = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0], # user 0
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0], # user 1
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], # user 2
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0], # user 3
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0], # user 4
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0], # user 5
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 6
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 7
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1], # user 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]] # user 9
|
import sys
__all__ = ['__version__', 'version_info']
import pkgutil
__version__ = pkgutil.get_data(__package__, 'VERSION').decode('ascii').strip()
version_info = tuple(int(v) if v.isdigit() else v
for v in __version__.split('.'))
del pkgutil
if sys.version_info < (2, 7):
print("os-tornado %s requires Python 2.7" % __version__)
sys.exit(1)
del sys
|
from datetime import timedelta
from django.conf import settings
from django_statsd.clients import statsd
from google.cloud import bigquery
from olympia.constants.applications import ANDROID, FIREFOX
# This is the mapping between the AMO usage stats `sources` and the BigQuery
# columns.
AMO_TO_BQ_DAU_COLUMN_MAPPING = {
'apps': 'dau_by_app_version, dau_by_fenix_build',
'countries': 'dau_by_country',
'locales': 'dau_by_locale',
'os': 'dau_by_app_os',
'versions': 'dau_by_addon_version',
}
# This is the mapping between the AMO download stats `sources` and the BigQuery
# columns.
AMO_TO_BQ_DOWNLOAD_COLUMN_MAPPING = {
'campaigns': 'downloads_per_campaign',
'contents': 'downloads_per_content',
'mediums': 'downloads_per_medium',
'sources': 'downloads_per_source',
}
AMO_STATS_DAU_VIEW = 'amo_stats_dau'
AMO_STATS_DOWNLOAD_VIEW = 'amo_stats_installs'
def make_fully_qualified_view_name(view):
return '.'.join([settings.BIGQUERY_PROJECT, settings.BIGQUERY_AMO_DATASET, view])
def get_amo_stats_dau_view_name():
return make_fully_qualified_view_name(AMO_STATS_DAU_VIEW)
def get_amo_stats_download_view_name():
return make_fully_qualified_view_name(AMO_STATS_DOWNLOAD_VIEW)
def create_client():
return bigquery.Client.from_service_account_json(
settings.GOOGLE_APPLICATION_CREDENTIALS
)
def rows_to_series(rows, count_column, filter_by=None):
"""Transforms BigQuery rows into series items suitable for the rest of the
AMO stats logic."""
for row in rows:
item = {
'count': row[count_column],
'date': row['submission_date'],
'end': row['submission_date'],
}
if filter_by:
# This filter is special because we have two columns instead of
# one.
# See: https://github.com/mozilla/addons-server/issues/14411
# See: https://github.com/mozilla/addons-server/issues/14832
if filter_by == AMO_TO_BQ_DAU_COLUMN_MAPPING['apps']:
item['data'] = {
ANDROID.guid: {
d['key']: d['value'] for d in row.get('dau_by_fenix_build', [])
},
FIREFOX.guid: {
d['key']: d['value'] for d in row.get('dau_by_app_version', [])
},
}
else:
item['data'] = {d['key']: d['value'] for d in row.get(filter_by, [])}
yield item
def get_updates_series(addon, start_date, end_date, source=None):
client = create_client()
select_clause = 'SELECT submission_date, dau'
filter_by = AMO_TO_BQ_DAU_COLUMN_MAPPING.get(source)
if filter_by:
select_clause = f'{select_clause}, {filter_by}'
query = f"""
{select_clause}
FROM `{get_amo_stats_dau_view_name()}`
WHERE addon_id = @addon_id
AND submission_date BETWEEN @submission_date_start AND @submission_date_end
ORDER BY submission_date DESC
LIMIT 365"""
statsd_timer = f'stats.get_updates_series.bigquery.{source or "no_source"}'
with statsd.timer(statsd_timer):
rows = client.query(
query,
job_config=bigquery.QueryJobConfig(
query_parameters=[
bigquery.ScalarQueryParameter('addon_id', 'STRING', addon.guid),
bigquery.ScalarQueryParameter(
'submission_date_start', 'DATE', start_date
),
bigquery.ScalarQueryParameter(
'submission_date_end', 'DATE', end_date
),
]
),
).result()
return rows_to_series(rows, count_column='dau', filter_by=filter_by)
def get_download_series(addon, start_date, end_date, source=None):
client = create_client()
select_clause = 'SELECT submission_date, total_downloads'
filter_by = AMO_TO_BQ_DOWNLOAD_COLUMN_MAPPING.get(source)
if filter_by:
select_clause = f'{select_clause}, {filter_by}'
query = f"""
{select_clause}
FROM `{get_amo_stats_download_view_name()}`
WHERE hashed_addon_id = @hashed_addon_id
AND submission_date BETWEEN @submission_date_start AND @submission_date_end
ORDER BY submission_date DESC
LIMIT 365"""
statsd_timer = f'stats.get_download_series.bigquery.{source or "no_source"}'
with statsd.timer(statsd_timer):
rows = client.query(
query,
job_config=bigquery.QueryJobConfig(
query_parameters=[
bigquery.ScalarQueryParameter(
'hashed_addon_id',
'STRING',
addon.addonguid.hashed_guid,
),
bigquery.ScalarQueryParameter(
'submission_date_start', 'DATE', start_date
),
bigquery.ScalarQueryParameter(
'submission_date_end', 'DATE', end_date
),
]
),
).result()
return rows_to_series(rows, count_column='total_downloads', filter_by=filter_by)
def get_addons_and_average_daily_users_from_bigquery():
"""This function is used to compute the 'average_daily_users' value of each
add-on (see `update_addon_average_daily_users()` cron task)."""
client = create_client()
query = f"""
SELECT addon_id, AVG(dau) AS count
FROM `{get_amo_stats_dau_view_name()}`
WHERE submission_date > DATE_SUB(CURRENT_DATE(), INTERVAL 13 DAY)
GROUP BY addon_id"""
rows = client.query(query).result()
return [
(row['addon_id'], row['count'])
for row in rows
if row['addon_id'] and row['count']
]
def get_averages_by_addon_from_bigquery(today, exclude=None):
"""This function is used to compute the 'hotness' score of each add-on (see
also `update_addon_hotness()` cron task). It returns a dict with top-level
keys being add-on GUIDs and values being dicts containing average
values."""
client = create_client()
one_week_date = today - timedelta(days=7)
four_weeks_date = today - timedelta(days=28)
query = f"""
WITH
this_week AS (
SELECT
addon_id,
AVG(dau) AS avg_this_week
FROM
`{get_amo_stats_dau_view_name()}`
WHERE
submission_date >= @one_week_date
GROUP BY
addon_id),
three_weeks_before_this_week AS (
SELECT
addon_id,
AVG(dau) AS avg_three_weeks_before
FROM
`{get_amo_stats_dau_view_name()}`
WHERE
submission_date BETWEEN @four_weeks_date AND @one_week_date
GROUP BY
addon_id)
SELECT
*
FROM
this_week
JOIN
three_weeks_before_this_week
USING
(addon_id)
"""
query_parameters = [
bigquery.ScalarQueryParameter('one_week_date', 'DATE', one_week_date),
bigquery.ScalarQueryParameter('four_weeks_date', 'DATE', four_weeks_date),
]
if exclude and len(exclude) > 0:
query = f'{query} WHERE addon_id NOT IN UNNEST(@excluded_addon_ids)'
query_parameters.append(
bigquery.ArrayQueryParameter('excluded_addon_ids', 'STRING', exclude)
)
rows = client.query(
query,
job_config=bigquery.QueryJobConfig(query_parameters=query_parameters),
).result()
return {
row['addon_id']: {
'avg_this_week': row['avg_this_week'],
'avg_three_weeks_before': row['avg_three_weeks_before'],
}
for row in rows
if row['addon_id']
}
def get_addons_and_weekly_downloads_from_bigquery():
"""This function is used to compute the 'weekly_downloads' value of each
add-on (see `update_addon_weekly_downloads()` cron task)."""
client = create_client()
query = f"""
SELECT hashed_addon_id, SUM(total_downloads) AS count
FROM `{get_amo_stats_download_view_name()}`
WHERE submission_date >= DATE_SUB(CURRENT_DATE(), INTERVAL 7 DAY)
GROUP BY hashed_addon_id"""
rows = client.query(query).result()
return [
(row['hashed_addon_id'], row['count'])
for row in rows
if row['hashed_addon_id'] and row['count']
]
|
import codecs
from typing import Optional, Tuple, Iterable
from unicodedata import normalize
from dedoc.data_structures.paragraph_metadata import ParagraphMetadata
from dedoc.data_structures.unstructured_document import UnstructuredDocument
from dedoc.readers.base_reader import BaseReader
from dedoc.readers.utils.hierarch_level_extractor import HierarchyLevelExtractor
from dedoc.data_structures.line_with_meta import LineWithMeta
class RawTextReader(BaseReader):
def __init__(self):
self.hierarchy_level_extractor = HierarchyLevelExtractor()
def _get_lines(self, path: str) -> Iterable[Tuple[int, str]]:
with codecs.open(path, errors="ignore", encoding="utf-8-sig") as file:
for line_id, line in enumerate(file):
line = normalize('NFC', line).replace("й", "й") # й replace matter
yield line_id, line
def read(self,
path: str,
document_type: Optional[str] = None,
parameters: Optional[dict] = None) -> Tuple[UnstructuredDocument, bool]:
lines = []
for line_id, line in self._get_lines(path):
metadata = ParagraphMetadata(page_id=0,
line_id=line_id,
predicted_classes=None,
paragraph_type="raw_text")
line_with_meta = LineWithMeta(line=line, hierarchy_level=None, metadata=metadata, annotations=[])
lines.append(line_with_meta)
lines = self.hierarchy_level_extractor.get_hierarchy_level(lines)
return UnstructuredDocument(lines=lines, tables=[]), False
def can_read(self, path: str, mime: str, extension: str, document_type: Optional[str]) -> bool:
return extension.endswith(".txt") and not document_type
|
class Solution(object):
def convert(self, s: str, numRows: int) -> str:
"""
The string "PAYPALISHIRING" is written in a zigzag pattern on a given number of rows
like this: (you may want to display this pattern in a fixed font for better legibility)
P A H N
A P L S I I G
Y I R
And then read line by line: "PAHNAPLSIIGYIR"
Write the code that will take a string and make this conversion given a number of rows:
>>> Solution().convert("PAYPALISHIRING", 3)
"PAHNAPLSIIGYIR"
>>> Solution().convert("PAYPALISHIRING", 4)
"PINALSIGYAHRPI"
:type s: str
:type numRows: int
:rtype: str
"""
if numRows == 1:
return s
rows = [''] * numRows
num = (numRows - 1) * 2
for i, item in enumerate(s):
if i % num >= numRows:
rows[(num - i % num) % numRows] += item
else:
rows[i % num] += item
return ''.join(rows)
def main():
print(Solution().convert("PAYPALISHIRING", 4))
if __name__ == '__main__':
main()
|
import requests
import json
import pandas as pd
# URL of the authentication endpoint
auth_url = "https://api.hatebase.org/4-4/authenticate"
# initialize authentication payload & headers
# use x-www-form-urlencoded as content-type
# replace this dummy key with your own
auth_payload = "api_key=[API KEY GOES HERE]"
headers = {
'Content-Type': "application/x-www-form-urlencoded",
'cache-control': "no-cache"
}
# authenticate against the API
auth_resp = requests.request("POST", auth_url, data=auth_payload, headers=headers)
print (auth_resp.json())
#store the token from response
token = auth_resp.json()["result"]["token"]
#Set vocab url and params
vocab_url = 'https://api.hatebase.org/4-4/get_vocabulary'
lang = 'eng'
resp_format = 'json'
# assemble the payload for our query
vocab_payload = "token=" + token + "&format=" + resp_format + "&language=" + lang
voc_resp = requests.request("POST", vocab_url, data=vocab_payload, headers=headers)
voc_json = voc_resp.json()
pages = voc_json['number_of_pages']
results = voc_json['number_of_results']
# create the vocabulary dataframe from the first resultset
df_voc = pd.DataFrame(voc_json["result"])
#create empty term list
english_term_list = []
# now get results of all the remaining pages
# append those results to our dataframe "df_voc"
for page in range(1,pages+1):
vocab_payload = "token=" + token + "&format=json&language=" + lang + "&page=" + str(page)
voc_resp = requests.request("POST", vocab_url, data=vocab_payload, headers=headers)
voc_json = voc_resp.json()
df_voc = df_voc.append(voc_json["result"])
english_term_list
# reset the df_voc index so that all entries are nicely numbered in an ascending way
df_voc.reset_index(drop=True, inplace=True)
#Full Term List
term_list = df_voc['term'].tolist()
#Filter frame to rows where terms are marked unambiguous
unambiguous_df = df_voc[df_voc['is_unambiguous'] == True]
unambiguous_term_list = unambiguous_df['term'].tolist()
print (unambiguous_term_list)
# save the vocabulary in the df_voc dataframe as a csv
df_voc.to_csv("c:/users/nh48/desktop/audit_test/hatebase_vocab.csv")
unambiguous_df.to_csv("c:/users/nh48/desktop/audit_test/hatebase_vocab_unambiguous.csv")
|
import yaml
import numpy as np
class setup:
def __init__(self, setup):
with open(setup,'r') as obj:
input=yaml.safe_load(obj)
print('loaded calculation setups:')
print(input)
self.structure_file=input['structure_file']
self.dyn_file=input['dyn_file']
self.style=input['style']
self.temperature=input['temperature']
self.broadening_factor=input['broadening_factor']
self.using_mean_spacing=input['using_mean_spacing']
self.omega_threshould=input['omega_threshould']
self.broadening_threshould=input['broadening_threshould']
self.two_dim=input['two_dim']
self.symmetrize_fc=input['symmetrize_fc']
if self.two_dim:
self.vdw_thickness=input['vdw_thickness']
def thermal_conductivity(setup_file):
calc_setup=setup(setup_file)
from pyAF.thermal_conductivity_AF import get_thermal_conductivity
results=get_thermal_conductivity(calc_setup)
return results
def resolved_thermal_conductivity(setup_file):
calc_setup=setup(setup_file)
from pyAF.thermal_conductivity_AF import get_resolved_thermal_conductivity
results=get_resolved_thermal_conductivity(calc_setup)
return results
|
"""
校内打卡相关函数
@create:2021/03/10
@filename:campus_check.py
@author:ReaJason
@email_addr:reajason@163.com
@blog_website:https://reajason.top
@last_modify:2021/03/15
"""
import time
import json
import requests
from setting import log
def get_id_list_v2(token, custom_type_id):
"""
通过校内模板id获取校内打卡具体的每个时间段id
:param token: 用户令牌
:param custom_type_id: 校内打卡模板id
:return: 返回校内打卡id列表
"""
post_data = {
"customerAppTypeId": custom_type_id,
"longitude": "",
"latitude": "",
"token": token,
}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/rules", data=post_data
)
return res.json()["customerAppTypeDto"]["ruleList"]
except:
return None
def get_id_list_v1(token):
"""
通过校内模板id获取校内打卡具体的每个时间段id(初版,暂留)
:param token: 用户令牌
:return: 返回校内打卡id列表
"""
post_data = {"appClassify": "DK", "token": token}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/childApps",
data=post_data,
).json()
if res.json()["appList"]:
app_list = res["appList"][-1]["customerAppTypeRuleList"] \
if res["appList"][-1]["customerAppTypeRuleList"] \
else res["appList"][0]["customerAppTypeRuleList"]
id_list = sorted(
app_list,
key=lambda x: x["id"],
)
res_dict = [
{"customerAppTypeId": j["id"], "templateid": f"clockSign{i + 1}"}
for i, j in enumerate(id_list)
]
return res_dict
return None
except:
return None
def get_customer_type_id(token):
"""
通过校内模板id获取校内打卡具体的每个时间段id(初版,暂留)
:param token: 用户令牌
:return: 返回校内打卡id列表
"""
post_data = {"appClassify": "DK", "token": token}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/childApps",
data=post_data,
).json()
for app in res["appList"]:
if '校内' in app['name']:
return app['id']
except:
return None
def get_campus_check_post(template_id, custom_rule_id, stu_num, token):
"""
获取打卡数据
:param template_id:
:param custom_rule_id:
:param stu_num:
:param token:
:return:
"""
campus_check_post_json = {
"businessType": "epmpics",
"jsonData": {
"templateid": template_id,
"customerAppTypeRuleId": custom_rule_id,
"stuNo": stu_num,
"token": token
},
"method": "userComeAppSchool",
"token": token
}
for _ in range(3):
try:
res = requests.post(
url="https://reportedh5.17wanxiao.com/sass/api/epmpics",
json=campus_check_post_json,
timeout=10,
).json()
except:
log.warning("完美校园校内打卡post参数获取失败,正在重试...")
time.sleep(1)
continue
if res["code"] != "10000":
"""
{'msg': '业务异常', 'code': '10007', 'data': '无法找到该机构的投票模板数据!'}
"""
log.warning(res['data'])
return None
data = json.loads(res["data"])
post_dict = {
"areaStr": data['areaStr'],
"deptStr": data['deptStr'],
"deptid": data['deptStr']['deptid'] if data['deptStr'] else None,
"customerid": data['customerid'],
"userid": data['userid'],
"username": data['username'],
"stuNo": data['stuNo'],
"phonenum": data["phonenum"],
"templateid": data["templateid"],
"updatainfo": [
{"propertyname": i["propertyname"], "value": i["value"]}
for i in data["cusTemplateRelations"]
],
"updatainfo_detail": [
{
"propertyname": i["propertyname"],
"checkValues": i["checkValues"],
"description": i["decription"],
"value": i["value"],
}
for i in data["cusTemplateRelations"]
],
"checkbox": [
{"description": i["decription"], "value": i["value"], "propertyname": i["propertyname"]}
for i in data["cusTemplateRelations"]
],
}
log.info("完美校园校内打卡post参数获取成功")
return post_dict
return None
def campus_check_in(phone, token, post_dict, custom_rule_id):
"""
校内打卡
:param phone: 电话号
:param token: 用户令牌
:param post_dict: 校内打卡数据
:param custom_rule_id: 校内打卡id
:return:
"""
check_json = {
"businessType": "epmpics",
"method": "submitUpInfoSchool",
"jsonData": {
"deptStr": post_dict["deptStr"],
"areaStr": post_dict["areaStr"],
"reportdate": round(time.time() * 1000),
"customerid": post_dict["customerid"],
"deptid": post_dict["deptid"],
"source": "app",
"templateid": post_dict["templateid"],
"stuNo": post_dict["stuNo"],
"username": post_dict["username"],
"phonenum": phone,
"userid": post_dict["userid"],
"updatainfo": post_dict["updatainfo"],
"customerAppTypeRuleId": custom_rule_id,
"clockState": 0,
"token": token,
},
"token": token,
}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/sass/api/epmpics", json=check_json
).json()
"""
{'msg': '业务异常', 'code': '10007', 'data': '请在正确的打卡时间打卡'}
"""
if res["code"] == "10000":
log.info(res)
elif res['data'] == "areaStr can not be null":
log.warning("当前用户无法获取校内打卡地址信息,请前往配置文件,campus_checkin 下的 areaStr 设置地址信息")
elif res['data'] == "请在正确的打卡时间打卡":
log.warning( f'当前已不在该打卡时间范围内,{res["data"]}')
else:
log.warning(res)
return {
'status': 1,
'res': res,
'post_dict': post_dict,
'check_json': check_json,
'type': post_dict["templateid"]
}
except:
errmsg = f"```校内打卡请求出错```"
log.warning("校内打卡请求出错")
return {'status': 0, 'errmsg': errmsg}
|
#MIT License
#
#Copyright (c) 2021 Nick Hurt
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
import datetime
import logging
import urllib
import pyodbc
import sys
import azure.functions as func
import json
from time import perf_counter
import requests,uuid
from requests.auth import HTTPBasicAuth
import asyncio
from azure.core.exceptions import AzureError
from azure.storage.filedatalake.aio import (
DataLakeServiceClient,
)
from azure.identity.aio import ClientSecretCredential #DefaultAzureCredential
async def main(msg: func.QueueMessage):
logging.info('Python queue trigger function processed a queue item.')
#eventloop = asyncio.get_event_loop()
result = json.dumps({
'id': msg.id,
'body': msg.get_body().decode('utf-8'),
'expiration_time': (msg.expiration_time.isoformat()
if msg.expiration_time else None),
'insertion_time': (msg.insertion_time.isoformat()
if msg.insertion_time else None),
'time_next_visible': (msg.time_next_visible.isoformat()
if msg.time_next_visible else None),
'pop_receipt': msg.pop_receipt,
'dequeue_count': msg.dequeue_count
})
result = json.loads(msg.get_body().decode('utf-8'))
logging.info("..............Dequeued: "+ json.dumps(result))
tenantid=os.environ["tenantID"]
spnid= os.environ["SPNID"]
spnsecret= os.environ["SPNSecret"]
connxstr=os.environ["DatabaseConnxStr"]
dbname = os.environ["dbname"]
dbschema = os.environ["dbschema"]
cnxn = pyodbc.connect(connxstr)
cursor = cnxn.cursor()
now = datetime.datetime.utcnow()
acls_changed = 0
vContinuationToken = ''
vContinuationMsg = ''
captureTime = now.strftime('%Y-%m-%d %H:%M:%S')
queue_itm = "select trans_status, continuation_token from " + dbschema + ".policy_transactions where id = "+str(result["id"])
cursor.execute(queue_itm)
transrow = cursor.fetchone()
if transrow:
logging.info('De-queued transaction ID '+ str(result["id"]) + ' with transaction status ' + transrow[0] + '. Checking for continuation token... ' + transrow[1] )
if transrow[1] != '':
vContinuationToken = transrow[1]
vContinuationMsg = 'Recovering with continuation token' + vContinuationToken
else:
logging.error('Could not find transaction record with ID ' + str(result["id"]) + '. Please contact support')
if transrow[0] not in ('Abort','Aborted'):
queue_upd = "update " + dbschema + ".policy_transactions set trans_status = 'De-queued', trans_reason = concat(trans_reason, '" + vContinuationMsg + "'), last_updated = '"+ captureTime + "' where trans_status <> 'Aborted' and trans_status <> 'Abort' and id = "+str(result["id"])
logging.info(queue_upd)
cursor.execute(queue_upd)
cnxn.commit()
#if len(recsupdated)>0:
try:
u1_start = perf_counter()
#storagetoken = getBearerToken(tenantid,"storage.azure.com",spnid,spnsecret)
#acls_changed += setADLSBulkPermissions(storagetoken, str(result["storage_url"]), str(result["acentry"]),str(result["trans_action"]),str(result["trans_mode"]))
default_credential = ClientSecretCredential( tenantid, spnid, spnsecret, )
#default_credential = DefaultAzureCredential()
urlparts = str(result["storage_url"]).split('/',4)
service_client = DataLakeServiceClient("https://{}".format(urlparts[2]),
credential=default_credential)
logging.info("Obtained service client")
async with service_client:
filesystem_client = service_client.get_file_system_client(file_system=urlparts[3])
logging.info('Setting ACLs recursively ' + str(result['acentry']))
acls_changed = await set_recursive_access_control(filesystem_client,urlparts[4], str(result["acentry"]),result["id"],u1_start, str(result["trans_mode"]), vContinuationToken)
#logging.info("No ACL changes = "+ str(acls_changed))
await filesystem_client.close()
await service_client.close()
now = datetime.datetime.utcnow()
captureTime = now.strftime('%Y-%m-%d %H:%M:%S')
u1_stop = perf_counter()
if not acls_changed or acls_changed <0: # there were either no files in the folder or some error or aborted due to user error
if not acls_changed or acls_changed == 0:
acls_changed=0
queue_comp = "update " + dbschema + ".policy_transactions set trans_status = 'Warning', acl_count = "+str(acls_changed) + ", last_updated = '"+ captureTime + "', trans_reason = concat(trans_reason,'Completed but did not set any ACLS. This may be due to an empty folder. Finished in " + str(format(u1_stop-u1_start,'.3f')) + " seconds. ') where id = "+str(result["id"])
elif acls_changed == -4:
queue_comp = "update " + dbschema + ".policy_transactions set trans_status = 'Aborted', acl_count = "+str(acls_changed) + ", last_updated = '"+ captureTime + "', trans_reason = concat(trans_reason,'Aborted due to user error correction in " + str(format(u1_stop-u1_start,'.3f')) + " seconds. ') where id = "+str(result["id"])
else:
queue_comp = "update " + dbschema + ".policy_transactions set trans_status = 'Error', acl_count = "+str(acls_changed) + ", last_updated = '"+ captureTime + "', trans_reason = concat(trans_reason,'Aborted in " + str(format(u1_stop-u1_start,'.3f')) + " seconds. ') where id = "+str(result["id"])
else:
queue_comp = "update " + dbschema + ".policy_transactions set trans_status = 'Done', acl_count = "+str(acls_changed) + ", last_updated = '"+ captureTime + "', trans_reason = concat(trans_reason,'Completed in " + str(format(u1_stop-u1_start,'.3f')) + " seconds. ') where id = "+str(result["id"])
logging.info("!!!!! Queue update SQL: "+queue_comp)
cursor.execute(queue_comp)
cnxn.commit()
logging.info("!!!!! Queue update completed: "+queue_comp)
except pyodbc.DatabaseError as err:
cnxn.commit()
sqlstate = err.args[1]
sqlstate = sqlstate.split(".")
logging.error('Error message: '.join(sqlstate))
except Exception as e:
logging.error('Error in line ' + str(sys.exc_info()[-1].tb_lineno) + ' occured when trying to process ACL work items:' + str(e))
queue_upd = "update " + dbschema + ".policy_transactions set trans_status = 'Queued', trans_reason = concat(trans_reason,'Requeuing due to error: " + str(e) + "') where id = "+str(result["id"])
cursor.execute(queue_upd)
cnxn.commit()
#potentially try to add the item back on the queue here
else:
cnxn.commit()
#print('Done')
finally:
cnxn.autocommit = True
else:
logging.error('Transaction ' + str(result["id"]) + ' in Abort state. Ignoring...')
def getBearerToken(tenantid,resourcetype,spnid,spnsecret):
endpoint = 'https://login.microsoftonline.com/' + tenantid + '/oauth2/token'
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
payload = 'grant_type=client_credentials&client_id='+spnid+'&client_secret='+ spnsecret + '&resource=https%3A%2F%2F'+resourcetype+'%2F'
#payload = 'resource=https%3A%2F%2F'+resourcetype+'%2F'
#print(endpoint)
#print(payload)
r = requests.post(endpoint, headers=headers, data=payload)
response = r.json()
print("Obtaining AAD bearer token for resource "+ resourcetype + "...")
try:
bearertoken = response["access_token"]
except KeyError:
print("Error obtaining bearer token: "+ response)
#print(bearertoken)
print("Bearer token obtained.\n")
return bearertoken
# a variation of the function above which access a dictionary object of users and groups so that we can set the ACLs in bulk with a comma seprated list of ACEs (access control entries)
def setADLSBulkPermissions(aadtoken, adlpath, acentry, trans_action, trans_mode,lcursor, lcnxn):
# Read documentation here -> https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/update
if aadtoken and acentry and adlpath and trans_action and trans_mode:
puuid = str(uuid.uuid4())
headers = {'x-ms-version': '2019-12-12','Authorization': 'Bearer %s' % aadtoken, 'x-ms-acl':acentry,'x-ms-client-request-id': '%s' % puuid}
request_path = adlpath+"?action="+ trans_action + "&mode=" + trans_mode
print("Setting ACLs " + acentry + " on " +adlpath + "...")
t1_start = perf_counter()
if devstage == 'live':
r = requests.patch(request_path, headers=headers)
response = r.json()
t1_stop = perf_counter()
#print(r.text)
if r.status_code == 200:
logging.info("Response Code: " + str(r.status_code) + "\nDirectories successful:" + str(response["directoriesSuccessful"]) + "\nFiles successful: "+ str(response["filesSuccessful"]) + "\nFailed entries: " + str(response["failedEntries"]) + "\nFailure Count: "+ str(response["failureCount"]) + f"\nCompleted in {t1_stop-t1_start:.3f} seconds\n")
return(int(response["filesSuccessful"]) + int(response["directoriesSuccessful"]))
else:
logging.error("Error: " + str(r.text))
raise Exception("Error while trying to set ACLs " + str(r.text))
return(0)
else:
logging.info("Environment setting was set to non-prod therefore no ACLs have been set. "+ devstage)
return(-2)
else:
logging.warning("Warning: Could not set ACLs as no users/groups were supplied in the ACE entry. This can happen when all users are either in the exclusion list or their IDs could not be found in AAD.")
return(-1)
#aces = spntype+':'+spn+spnaccsuffix + ':'+permissions+',default:'+spntype+':'+spn+spnaccsuffix + ':'+permissions,'x-ms-client-request-id': '%s' % puuid
async def set_recursive_access_control(filesystem_client,dir_name, acl,transid,proc_start,trans_mode, pContinuationToken):
# the progress callback is invoked each time a batch is completed
l1_start = perf_counter()
user_error_abort = False
async def progress_callback(acl_changes):
global user_error_abort
user_error_abort = False
dbname = "policystore" #os.environ["dbname"]
dbschema = "dbo" #os.environ["dbschema"]
logging.info(("Transaction " + str(transid) + " in progress. In this batch: {} directories and {} files were processed successfully, {} failures were counted. " +
"In total, {} directories and {} files were processed successfully, {} failures were counted.")
.format(acl_changes.batch_counters.directories_successful, acl_changes.batch_counters.files_successful,
acl_changes.batch_counters.failure_count, acl_changes.aggregate_counters.directories_successful,
acl_changes.aggregate_counters.files_successful, acl_changes.aggregate_counters.failure_count))
connxstr= connxstr=os.environ["DatabaseConnxStr"]
dbname = os.environ["dbname"]
dbschema = os.environ["dbschema"]
lcnxn = pyodbc.connect(connxstr)
lcursor = lcnxn.cursor()
now = datetime.datetime.utcnow()
captureTime = now.strftime('%Y-%m-%d %H:%M:%S')
l1_stop = perf_counter()
trans_status_check = "select trans_status from policy_transactions where trans_status = 'Abort' and id = "+str(transid)
lcursor.execute(trans_status_check)
aborttrans = lcursor.fetchall()
if len(aborttrans) > 0:
logging.error("!!! Aborting transaction in progress. ID = "+ str(transid))
user_error_abort = True
raise Exception("Abort in progress transaction due to user correction")
else:
# if not in abort status then update with continuation token
queue_comp = "update " + dbschema + ".policy_transactions set trans_status = 'InProgress', continuation_token = '" + str(acl_changes.continuation) + "',acl_count = "+str(acl_changes.aggregate_counters.files_successful) + ", last_updated = '"+ captureTime + "', trans_reason = 'Running for " + str(format(l1_stop-proc_start,'.3f')) + " seconds. ' where id = "+str(transid)
logging.info("!!!!! Queue update un progress SQL: "+queue_comp)
lcursor.execute(queue_comp)
lcnxn.commit()
# keep track of failed entries if there are any
failed_entries.append(acl_changes.batch_failures)
# illustrate the operation by using a small batch_size
#acl_change_result = ""
try:
#acls = 'user::rwx,group::r-x,other::rwx'
#acls = 'default:user:1ad1af70-791f-4d61-8bf1-27ccade3342a:rw-,default:user:9e501fc2-c687-4ba5-bfb9-b8afa948cb83:rw-,default:user:02b60873-3213-46aa-8889-8866e693d559:rw-'
failed_entries = []
#dir_name = "sample"
#logging.info("Raw directory named '{}'.".format(dir_name))
#logging.info("Clean directory named '{}'.".format(urllib.parse.unquote(dir_name)))
#directory_client = await filesystem_client.create_directory(dir_name)
#dir_name = 'base1/nyctaxidata/green'
directory_client = filesystem_client.get_directory_client(dir_name)
if trans_mode == 'modify':
if pContinuationToken != '':
acl_change_result = await directory_client.update_access_control_recursive(acl=acl,
continuation_token = pContinuationToken,
progress_hook=progress_callback,
batch_size=2000)
else:
acl_change_result = await directory_client.update_access_control_recursive(acl=acl,
progress_hook=progress_callback,
batch_size=2000)
elif trans_mode == 'remove':
if pContinuationToken != '':
acl_change_result = await directory_client.remove_access_control_recursive(acl=acl,
continuation_token = pContinuationToken,
progress_hook=progress_callback,
batch_size=2000)
else:
acl_change_result = await directory_client.remove_access_control_recursive(acl=acl,
progress_hook=progress_callback,
batch_size=2000)
else:
logging.error('Error during setting ACLs recursively for transaction '+str(transid) + ' due to unknown transaction mode ' + trans_mode)
return -2
await directory_client.close()
logging.info("Summary: {} directories and {} files were updated successfully, {} failures were counted."
.format(acl_change_result.counters.directories_successful, acl_change_result.counters.files_successful,
acl_change_result.counters.failure_count))
#if an error was encountered, a continuation token would be returned if the operation can be resumed
if acl_change_result.continuation is not None:
logging.info("The operation can be resumed by passing the continuation token {} again into the access control method."
.format(acl_change_result.continuation))
return acl_change_result.counters.files_successful
except AzureError as error:
logging.ERROR("aclWorkers: SDK Error whilst setting ACLs recursively: " + error.message + " at line no " + str(sys.exc_info()[-1].tb_lineno))
# if the error has continuation_token, you can restart the operation using that continuation_token
if error.continuation_token:
if trans_mode == 'modify':
acl_change_result = \
await directory_client.update_access_control_recursive(acl=acl,
continuation_token=error.continuation_token,
progress_hook=progress_callback,
batch_size=2000)
elif trans_mode == 'remove':
await directory_client.remove_access_control_recursive(acl=acl,
continuation_token=error.continuation_token,
progress_hook=progress_callback,
batch_size=2000)
else: None
await directory_client.close()
except Exception as e:
logging.error('Error during setting ACLs recursively for transaction '+str(transid) + ' due to ' + str(e) + ' at line no ' + str(sys.exc_info()[-1].tb_lineno) )
if user_error_abort:
return -4
else:
return -2
# get and display the permissions of the parent directory again
#await directory_client.close()
#acl_props = await directory_client.get_access_control()
#logging.info("New permissions of directory '{}' and its children are {}.".format(dir_name, acl_props['permissions']))
devstage = 'live'
|
import pytest
from app.tools.utils import get_domain_name
class TestGetDomainName:
@pytest.fixture
def item_source_domain_pairs(self, scraped_item_source_data):
return [
(itm['url'], src['domain']) for itm, src in scraped_item_source_data
]
def test_domain_getter_util(self, item_source_domain_pairs):
for url, domain_name in item_source_domain_pairs:
result = get_domain_name(url)
assert result == domain_name
@pytest.mark.parametrize('url', [
'www.google.com/incomplete.html',
'blog.stackoverflow.com',
'not-a-url',
])
def test_invalid_urls_return_empty_str(self, url):
assert get_domain_name(url) == ''
def test_exceptions_cause_domain_getter_to_return_none(self, monkeypatch):
def mocked_urlparse():
raise ValueError
monkeypatch.setattr('app.tools.utils.urlparse', mocked_urlparse)
result = get_domain_name('https://www.google.com')
assert result is None
|
import threading
from orchestrator.Task import Task
class Orchestrator:
def __init__(self):
self._tasks = []
self._semaphore = threading.Semaphore(2)
def run_task(self, task: Task) -> None:
self._tasks += [task]
task.append_on_finish_event(lambda: self._when_task_is_terminated(task))
task.append_on_error_event(lambda: self._when_task_is_onerror(task))
task.append_on_start_event(lambda: self._when_task_is_started(task))
task.start()
def _when_task_is_started(self, task):
self._semaphore.acquire()
def _when_task_is_terminated(self, task):
## self._tasks.remove(task)
self._semaphore.release()
def _when_task_is_onerror(self, task):
self._semaphore.release()
def remove_task(self, task_id):
tasks = []
for task in self._tasks:
if task["id"] != task_id:
tasks += [task]
else:
task.canceled = True
self._tasks = tasks
def tasks(self):
tasks = []
for task in self._tasks:
tasks += [{
"id": task.id(),
"description": task.description(),
"argument": task.argument(),
"type": task.type(),
"status": task.status,
"message": task.get_message(),
"running": task.running,
"progress": task.get_progress(),
"start_date": task.start_date,
"last_update_date": task.last_update_date,
"end_date": task.end_date
}]
return tasks
default_orchestrator = Orchestrator()
|
from django.conf import settings
from django.utils.six import string_types
from django.utils.translation import ugettext_lazy as _
_DEFAULT_KINDS = {
"other": 0,
"audit": 1,
"edit": 2,
"deletion": 3,
"note": 4,
"email": 5,
"warning": 6,
"error": 7
}
_DEFAULT_KIND_LABELS = {
"other": _("other"),
"audit": _("audit"),
"edit": _("edit"),
"deletion": _("deletion"),
"note": _("note"),
"email": _("email"),
"warning": _("warning"),
"error": _("error")
}
def _get_kind_labels(kinds):
kind_labels = _DEFAULT_KIND_LABELS.copy()
kind_labels.update(getattr(settings, "ANALOG_KIND_LABELS", {}))
kind_labels = dict(
(KINDS.get(mnemonic, mnemonic), label)
for (mnemonic, label) in kind_labels.items()
)
for key in list(kind_labels):
if isinstance(kind_labels[key], string_types):
kind_labels[key] = _(kind_labels[key])
return kind_labels
#: A mapping from kind mnemonic (string) to kind ID (integer)
KINDS = _DEFAULT_KINDS.copy()
KINDS.update(getattr(settings, "ANALOG_KINDS", {}))
#: A mapping from kind ID (integer) to label
KIND_LABELS = _get_kind_labels(KINDS)
KIND_IDS = set(KINDS.values())
|
# Generated by Django 3.2.13 on 2022-05-10 15:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ask_cfpb', '0039_2022_squash'),
]
operations = [
migrations.RemoveField(
model_name='answerpage',
name='user_feedback',
),
]
|
##
# File: MultiProcUtilTests.py
# Author: jdw
# Date: 17-Nov-2018
#
# Updates:
#
##
"""
"""
__docformat__ = "restructuredtext en"
__author__ = "John Westbrook"
__email__ = "jwest@rcsb.rutgers.edu"
__license__ = "Apache 2.0"
import logging
import random
import re
import unittest
from rcsb.utils.multiproc.MultiProcUtil import MultiProcUtil
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class StringTests(object):
"""A skeleton class that implements the interface expected by the multiprocessing
utility module --
"""
def __init__(self, **kwargs):
pass
def reverser(self, dataList, procName, optionsD, workingDir):
"""Lexically reverse the characters in the input strings.
Flag strings with numerals as errors.
Read input list and perform require operation and return list of
inputs with successful outcomes.
"""
_ = optionsD
_ = workingDir
successList = []
retList1 = []
retList2 = []
diagList = []
skipped = 0
for tS in dataList:
if re.search("[8-9]", tS):
# logger.info("skipped %r", tS)
skipped += 1
continue
rS1 = tS[::-1]
rS2 = rS1 + tS
sumC = 0
for s1 in tS:
sumC += ord(s1) - ord(s1)
diag = len(rS2)
successList.append(tS)
retList1.append(rS1)
retList2.append(rS2)
diagList.append(diag)
logger.debug("%s skipped %d dataList length %d successList length %d", procName, skipped, len(dataList), len(successList))
#
return successList, retList1, retList2, diagList
class MultiProcUtilTests(unittest.TestCase):
def setUp(self):
self.__verbose = True
def tearDown(self):
pass
def testMultiProcString(self):
""""""
try:
sCount = 10000
dataList = []
for _ in range(sCount):
sLength = random.randint(100, 30000)
dataList.append("".join(["9"] * sLength))
dataList.append("".join(["b"] * sLength))
#
logger.info("Length starting list is %d", len(dataList))
sTest = StringTests()
mpu = MultiProcUtil(verbose=True)
mpu.set(workerObj=sTest, workerMethod="reverser")
# sCount = 300 000
# 334s 4-proc
# 281s 8-proc
ok, failList, resultList, _ = mpu.runMulti(dataList=dataList, numProc=4, numResults=2, chunkSize=10)
#
logger.info("Multi-proc %r failures %r result length %r %r", ok, len(failList), len(resultList[0]), len(resultList[1]))
self.assertGreaterEqual(len(failList), 1)
#
self.assertEqual(sCount, len(resultList[0]))
self.assertEqual(sCount, len(resultList[1]))
self.assertFalse(ok)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def suiteMultiProc():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(MultiProcUtilTests("testMultiProcString"))
return suiteSelect
if __name__ == "__main__":
mySuite1 = suiteMultiProc()
unittest.TextTestRunner(verbosity=2).run(mySuite1)
|
"""Logic for comparing DICOM data sets and data sources"""
from copy import deepcopy
from hashlib import sha256
from typing import Optional, List
from pydicom import Dataset, DataElement
from pydicom.tag import BaseTag
def _shorten_bytes(val: bytes) -> bytes:
if len(val) > 16:
return b"*%d bytes, hash = %s*" % (len(val), sha256(val).hexdigest().encode())
return val
class DataDiff(object):
default_elem_fmt = "{elem.tag} {elem.name: <35} {elem.VR}: {value}"
def __init__(
self,
tag: BaseTag,
l_elem: Optional[DataElement],
r_elem: Optional[DataElement],
elem_fmt: str = default_elem_fmt,
):
self.tag = tag
self.l_elem = deepcopy(l_elem)
self.r_elem = deepcopy(r_elem)
self.elem_fmt = elem_fmt
def _format_elem(self, elem: DataElement) -> str:
value = elem.value
if isinstance(value, bytes):
value = _shorten_bytes(value)
return self.elem_fmt.format(elem=elem, value=value)
def __str__(self) -> str:
res = []
if self.l_elem is not None:
res.append("< %s" % self._format_elem(self.l_elem))
if self.r_elem is not None:
res.append("> %s" % self._format_elem(self.r_elem))
return "\n".join(res)
def diff_data_sets(left: Dataset, right: Dataset) -> List[DataDiff]:
"""Get list of all differences between `left` and `right` data sets"""
l_elems = iter(left)
r_elems = iter(right)
l_elem = r_elem = None
l_done = r_done = False
diffs = []
while True:
if l_elem is None and not l_done:
try:
l_elem = next(l_elems)
except StopIteration:
l_done = True
l_elem = None
if r_elem is None and not r_done:
try:
r_elem = next(r_elems)
except StopIteration:
r_done = True
r_elem = None
if l_elem is None and r_elem is None:
break
if l_elem is None:
assert r_elem is not None
diffs.append(DataDiff(r_elem.tag, l_elem, r_elem))
r_elem = None
elif r_elem is None:
assert l_elem is not None
diffs.append(DataDiff(l_elem.tag, l_elem, r_elem))
l_elem = None
elif l_elem.tag < r_elem.tag:
diffs.append(DataDiff(l_elem.tag, l_elem, None))
l_elem = None
elif r_elem.tag < l_elem.tag:
diffs.append(DataDiff(r_elem.tag, None, r_elem))
r_elem = None
else:
if l_elem.value != r_elem.value or l_elem.VR != r_elem.VR:
diffs.append(DataDiff(l_elem.tag, l_elem, r_elem))
l_elem = r_elem = None
return diffs
|
# -*- coding: utf-8 -*-
# @Time : DATE:2021/10/7
# @Author : yan
# @Email : 1792659158@qq.com
# @File : 07_supervoxel_clustering.py
import pclpy
from pclpy import pcl
import numpy as np
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Syntax is: %s <pcd-file> \n" % sys.argv[0],
"--NT Dsables the single cloud transform \n"
"-v <voxel resolution>\n-s <seed resolution>\n"
"-c <color weight> \n-z <spatial weight> \n"
"-n <normal_weight>\n")
exit()
cloud = pcl.PointCloud.PointXYZRGBA()
print('Loading point cloud...')
if pcl.io.loadPCDFile(sys.argv[1], cloud) == -1:
print('"Error loading cloud file!')
exit(-1)
disable_transform = '--NT' in sys.argv
voxel_resolution = 0.008
voxel_res_specified = '-v' in sys.argv
if voxel_res_specified:
index = sys.argv.index('-v') + 1
voxel_resolution = float(sys.argv[index])
seed_resolution = 0.1
seed_res_specified = '-s' in sys.argv
if seed_res_specified:
index = sys.argv.index('-s') + 1
seed_resolution = float(sys.argv[index])
color_importance = 0.2
if '-c' in sys.argv:
index = sys.argv.index('-c') + 1
color_importance = float(sys.argv[index])
spatial_importance = 0.4
if '-z' in sys.argv:
index = sys.argv.index('-z') + 1
spatial_importance = float(sys.argv[index])
normal_importance = 1.0
if '-n' in sys.argv:
index = sys.argv.index('-n') + 1
spatial_importance = float(sys.argv[index])
# 使用supervoxels
sv = pcl.segmentation.SupervoxelClustering.PointXYZRGBA(voxel_resolution, seed_resolution)
if disable_transform:
sv.setUseSingleCameraTransform(False)
sv.setInputCloud(cloud)
sv.setColorImportance(color_importance)
sv.setSpatialImportance(spatial_importance)
sv.setNormalImportance(normal_importance)
supervoxel_clusters = pcl.vectors.map_uint32t_PointXYZRGBA()
print('Extracting supervoxels!')
sv.extract(supervoxel_clusters)
print("Found %d supervoxels" % len(supervoxel_clusters))
viewer = pcl.visualization.PCLVisualizer('3D Viewer')
viewer.setBackgroundColor(0, 0, 0)
voxel_centroid_cloud = sv.getVoxelCentroidCloud()
viewer.addPointCloud(voxel_centroid_cloud, 'voxel centroids')
viewer.setPointCloudRenderingProperties(0, 2.0, 'voxel centroids')
viewer.setPointCloudRenderingProperties(1, 0.95, 'voxel centroids')
labeled_voxel_cloud = sv.getLabeledVoxelCloud()
viewer.addPointCloud(labeled_voxel_cloud, 'labeled voxels')
viewer.setPointCloudRenderingProperties(1, 0.5, 'labeled voxels')
sv_normal_cloud = sv.makeSupervoxelNormalCloud(supervoxel_clusters)
# 我们注释了法线可视化,这样图形很容易看到,取消注释可以看到超体素法线
# 事实上,pclpy可视化法线存在bug,我们先注释掉它。
# viewer.addPointCloudNormals(sv_normal_cloud, 1, 0.05, "supervoxel_normals")
print('Getting supervoxel adjacency')
# supervoxel_adjacency = 0 # std::multimap<std::uint32_t, std::uint32_t>未绑定
# sv.getSupervoxelAdjacency(supervoxel_adjacency)
# 为了绘制超体素邻接图,我们需要遍历超体素邻接multimap
while not viewer.wasStopped():
viewer.spinOnce(10)
|
import pybullet as p
from time import sleep
import pybullet_data
physicsClient = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.resetSimulation(p.RESET_USE_REDUCED_DEFORMABLE_WORLD)
p.resetDebugVisualizerCamera(4,-40,-30,[0, 0, 0])
p.setGravity(0, 0, -10)
tex = p.loadTexture("uvmap.png")
planeId = p.loadURDF("plane.urdf", [0,0,-2])
boxId = p.loadURDF("cube.urdf", [1,1,3],useMaximalCoordinates = True)
# p.startStateLogging(p.STATE_LOGGING_VIDEO_MP4, "reduced_torus.mp4")
cube = p.loadURDF("reduced_torus/reduced_torus.urdf", [1,1,1])
p.changeVisualShape(cube, -1, rgbaColor=[1,1,1,1], textureUniqueId=tex, flags=0)
p.setPhysicsEngineParameter(sparseSdfVoxelSize=0.25)
p.setRealTimeSimulation(0)
while p.isConnected():
p.stepSimulation()
p.getCameraImage(320,200)
p.setGravity(0,0,-10)
|
from nltk import word_tokenize, sent_tokenize
import networkx as nx
import enchant
import numpy as np
from scipy import stats as st
import matplotlib.pyplot as plt
from itertools import *
from tqdm import tqdm_notebook
import re
import RAKE
from ripser import ripser
import spacy
from nltk.corpus import brown
nlp = spacy.load('en', disable=['parser', 'ner'])
"""
Load the list of non-mathematical words from the Brown corpus;
if not saved, generate this list.
"""
try:
bwords = str(np.load('bwords.npy'))
except:
cats = brown.categories()
cats.remove('learned')
nlp.max_length = 10000000
brn = ' '.join(brown.words(categories=cats))
doc = nlp(brn)
bwords = ' '.join([w.lemma_.lower() for w in doc])
np.save('bwords.npy', bwords)
d = enchant.Dict("en_US") # For spell-checking
# List of stop words
stop_words = [x for x in RAKE.RanksNLLongStopList()
if len(x) > 1 or x == 'a']
stop_words.remove('value')
vowels = {'a', 'e', 'i', 'o', 'u', 'y'}
def freq_wholelabel(ind_lst, txt):
"""
Counts frequencies of the words contained in ``ind_lst``
in the text ``txt``
"""
freqs = []
for ind in ind_lst:
freqs.append(txt.count(ind))
return {k: f for k, f in zip(ind_lst, freqs)}
class Concept_Network:
"""
Class wrapping semantic networks: enables the construction of semantic
networks from text.
"""
def __init__(self):
self.index = None
self.text = None # text should not change
self.windows = None # windows is sliding windows based on text
self.dist_mat = None
self.cooc_mat = None
self.graph = None
self.index_labels = None
self.timeline = None
self.cutoff = None
self.bars = []
self.cooc_mat = None
self.weight_dist = None
def clean(self, textname, n_chapt):
"""
Cleans a text by the name of ``textname``, which has ``n_chapt``
chapters.
Assumes that texts are stored in a Textbooks directory, and each text
has an associated folder containing each separate text chapter as a
.txt file.
"""
alltext = []
for i in range(1, n_chapt+1):
# Read text
with open('Textbooks/{0}/chapter_{1}.txt'.format(textname,
i)) as chapter:
text = chapter.read()
import unicodedata
# Text normalization/cleaning
text = unicodedata.normalize('NFKD',
text).encode('ascii', 'ignore')
text = b' '.join(text.split()).decode()
text = text.replace('-', ' ')
text = nlp(text)
# Lemmatization
text = ' '.join(w.lemma_ for w in text)
sents = sent_tokenize(text)
sents = [word_tokenize(s) for s in sents]
# replace all strings with numbers with #
sents = [[w if not re.search(r"\d+", w) else '#' for w in s]
for s in sents]
# remove non-alphanumeric
sents = [[w for w in s if re.search('[^a-zA-Z-#]+', w) is None]
for s in sents]
sents = [[w.lower() for w in s] for s in sents]
text = sents
def quick_check(word):
"""
Helper function to screen out non-words to cast as variables.
Ensures word contains a vowel, spell checks, etc.
"""
if word == '#':
return True
elif not vowels.intersection(word):
return False
elif len(word) <= 2 and word not in stop_words:
return False
elif 2 < len(word) <= 4 and not d.check(word):
return False
else:
return True
# replace all potential variables with VAR
text = [[w if quick_check(w) else 'VAR' for w in s] for s in text]
alltext += text
self.text = list(filter(lambda x: len(x) > 0, alltext))
self.windows = self.text
return self.text
def capital_lemma(self, spacy_word):
"""
Helper function that returns the lemmatized version of a word via
spacy, but maintains the same capitalization as the original.
"""
out_word = ""
if spacy_word.shape_[0] == "X":
out_word += spacy_word.lemma_[0].upper()
else:
out_word += spacy_word.lemma_[0]
out_word += spacy_word.lemma_[1:]
return out_word
def clean_notlower(self, textname, n_chapt):
"""
Auxiliary text cleaning function (similar to ``self.clean``) that does
not uniformly make words lower-cased; maintains same capitalization as
original text.
"""
alltext = []
for i in range(1, n_chapt+1):
with open('Textbooks/{0}/chapter_{1}.txt'.format(textname,
i)) as chapter:
text = chapter.read()
import unicodedata
text = unicodedata.normalize('NFKD',
text).encode('ascii', 'ignore')
text = b' '.join(text.split()).decode()
text = text.replace('-', ' ')
text = nlp(text)
text = ' '.join(self.capital_lemma(w) for w in text)
sents = sent_tokenize(text)
sents = [word_tokenize(s) for s in sents]
# replace all things with numbers with #
sents = [[w if not re.search(r"\d+", w) else '#' for w in s]
for s in sents]
# remove non-alphanumeric
sents = [[w for w in s if re.search('[^a-zA-Z-#]+', w) is None]
for s in sents]
text = sents
def quick_check(word):
"""
Helper function to screen out non-words to cast as variables.
Ensures word contains a vowel, spell checks, etc.
"""
if word == '#':
return True
elif not vowels.intersection(word):
return False
elif len(word) <= 2 and word not in stop_words:
return False
elif 2 < len(word) <= 4 and not d.check(word):
return False
else:
return True
# replace all potential variables with VAR
text = [[w if quick_check(w) else 'VAR' for w in s] for s in text]
alltext += text
self.text = list(filter(lambda x: len(x) > 0, alltext))
self.windows = self.text
return self.text
def RAKE_keywords(self):
"""
Extracts the RAKE keywords from the text
"""
# Additional, non-mathematically-contentful stop-words to consider
# in the RAKE run
extra_sl = ['example', 'counterexample', 'text', 'texts',
'undergraduate', 'chapter', 'definition',
'notation', 'proof', 'exercise', 'result']
sl = nlp(' '.join(stop_words + extra_sl))
sl = [w.lemma_ for w in sl]
rake = RAKE.Rake(sl + ['VAR', '#', '-pron-', '-pron'])
text = '. '.join([' '.join(x) for x in self.text])
# Run RAKE
kws = rake.run(text, minCharacters=3, maxWords=4, minFrequency=5)
keyphrases = [' '.join([j for j in k[0].split(' ')
if j not in {'var', '#', '-pron-', '-pron'}])
for k in kws]
keyphrases = set([k for k in keyphrases if len(k) > 0])
index = list(map(lambda x: x.split(), keyphrases))
self.index = index
return index
# determines order of first occurrence of index terms.
# prunes out the index elements which do not occur within the text
# (i.e., within a single sentence), and orders them w.r.t. first occurrence
# very very important to run before doing anything else
def occurrence(self, debug=False, n_allowed=None):
"""
Does two things:
(1) Ensures that all the extracted index terms actually appear within
the text (so we don't have any extracted nodes with no data)
(2) Goes through the text to determine the order of introduction of
concepts, so that the resulting co-occurence matrices have indices
ordered by introduction.
"""
sentences = self.text
index = list(map(set, self.index))
index_labels = np.array(list(map(lambda i: ' '.join(i), self.index)))
first_occs = [] # sentence number of first term occurrence
counts = []
conn_text = '. '.join([' '.join(s) for s in sentences])
for ind in index_labels:
for sent, n in zip(sentences, range(1, len(sentences)+1)):
if ind in ' '.join(sent):
first_occs.append(n)
break
else:
first_occs.append(0)
# count how many times index term appears in the text
counts.append(conn_text.count(ind))
counts = sorted(list(zip(index_labels, counts)),
key=lambda x: x[1], reverse=True)
ordered = [c[0] for c in counts]
first_occs = np.array(first_occs)
nonzeros = np.nonzero(first_occs)[0]
zeros = np.where(first_occs == 0)[0]
# Yield of how many extracted concepts were actually found
print("Yield percent:", len(nonzeros)/len(first_occs))
if debug:
print(index_labels[zeros])
# Remove terms which do not occur
index = np.array(index)[nonzeros]
index_labels = index_labels[nonzeros]
# Sort remaining into order of introduction
sort = np.argsort(first_occs[nonzeros])
index = index[sort]
index_labels = index_labels[sort]
index_labels = np.array([x for x in index_labels
if x in ordered[:n_allowed]])
index = np.array(list(map(lambda i: set(i.split()), index_labels)))
self.first_occs = np.sort(first_occs[nonzeros])
self.index_labels = index_labels
self.index = index
def sliding_window(self, lst, size):
"""
Generates a list of text sliding windows
of size ``size`` from a sentence, represented as a list of words
``lst``. Unused in analysis, but could be useful if you are interested
in co-occurence on a smaller scale than sentence-level.
"""
windows = []
for i in range(0, max(1, len(lst)-size+1)):
b, e = i, min(i+size, len(lst))
windows.append(lst[b:e])
return windows
def sliding_window_constr(self, size):
"""
Gets all sliding windows of size ``size`` from the text.
"""
windows = []
if self.text is not None:
for sent in self.text:
windows += self.sliding_window(sent, size)
self.windows = windows
def cooc_2d(self):
"""
Calculates the number of co-occurrences of the index phrases in the
text (output as the matrix ``cooc_mat``), as well as records the first
time a co-occurrence occurred in the text (``dist_mat``).
"""
dim = len(self.index_labels)
cooc_mat = np.zeros((dim, dim))
first_cooc = np.zeros((dim, dim))
sentences = self.windows
timeline = {}
for sent, num in tqdm_notebook(list(zip(sentences, range(1, len(sentences)+1)))):
joined_sent = ' '.join(sent)
for i in range(dim):
if self.index_labels[i] in joined_sent:
cooc_mat[i, i] += 1
if first_cooc[i, i] == 0:
first_cooc[i, i] = num
for j in range(i+1, dim):
if self.index_labels[j] in joined_sent:
cooc_mat[(np.array([i, j]), np.array([j, i]))] += 1
if first_cooc[i, j] == 0:
first_cooc[(np.array([i, j]), np.array([j, i]))] = num
timeline[tuple(self.index_labels[np.array([i, j])])] = num
first_cooc[first_cooc == 0] = np.inf
self.cutoff = len(sentences)
self.dist_mat = first_cooc
self.cooc_mat = cooc_mat
self.timeline = timeline
# make graph
G = nx.from_numpy_array(cooc_mat)
G.remove_edges_from(nx.selfloop_edges(G))
name_mapping = {i: label for i, label in enumerate(self.index_labels)}
nx.relabel_nodes(G, name_mapping, copy=False)
self.graph = G
return cooc_mat, first_cooc, timeline
def cooc_for_text(self, text_name, nchapts,
n_allowed=None, groups=None):
"""
Wrapper function that effectively does everything - clean text,
extract the RAKE keyphrases, calculate occurrence within the texts,
rank the keyphrases based off the RAKE score and inverse Brown
frequency, and calculate co-occurrence throughout the text.
"""
# Clean the text
self.clean(text_name, nchapts)
# Extract RAKE keyphrases
self.RAKE_keywords()
# Calculate occurrence of the keyphrases
self.occurrence()
brown_whole = freq_wholelabel(self.index_labels, bwords)
# Do augmented rake-IDF scoring
# first re-run rake, just for convenience
extra_sl = ['example', 'counterexample', 'text', 'texts',
'undergraduate', 'chapter', 'definition', 'notation',
'proof', 'exercise', 'result']
sl = nlp(' '.join(stop_words + extra_sl))
sl = [w.lemma_ for w in sl]
rake = RAKE.Rake(sl + ['VAR', '#', '-pron-'])
text = '. '.join([' '.join(x) for x in self.text])
kws = rake.run(text, minCharacters=3, maxWords=4, minFrequency=5)
keyphrases = [(' '.join([j for j in k[0].split(' ')
if j not in {'var', '#', '-pron-', '-pron'}]),
k[1]) for k in kws]
keyphrases = [k for k in keyphrases if len(k[0]) > 0]
rake_dict = {}
# give each the maximal associated score
for k, v in keyphrases:
if k not in rake_dict.keys() or rake_dict[k] < v:
rake_dict[k] = v
# now calculate scores
scores = []
for ind in self.index_labels:
score = rake_dict[ind]
# Add one to prevent divide by zero errors
brownscore = brown_whole[ind] + 1
score /= brownscore
scores.append((ind, score))
# take the top half of all scored keyphrases
phrases = sorted(scores, key=lambda x: x[1], reverse=True)
phrases = phrases[:int(len(phrases)/2)]
index = list(map(lambda x: x[0].split(), phrases))
self.index = index
self.occurrence()
# then calculate cooccurence
t = self.cooc_2d()
return t
def cooc_for_text_notlower(self, text_name, nchapts,
n_allowed=None, groups=None):
"""
Same thing as ``self.cooc_for_text``, but without forcing lower case.
"""
self.clean_notlower(text_name, nchapts)
self.RAKE_keywords()
self.occurrence()
brown_whole = freq_wholelabel(self.index_labels, bwords)
# Do augmented rake-IDF scoring
# first re-run rake, just for convenience
extra_sl = ['example', 'counterexample', 'text', 'texts',
'undergraduate', 'chapter', 'definition', 'notation',
'proof', 'exercise', 'result']
sl = nlp(' '.join(stop_words + extra_sl))
sl = [w.lemma_ for w in sl]
rake = RAKE.Rake(sl + ['VAR', '#', '-pron-'])
text = '. '.join([' '.join(x) for x in self.text])
kws = rake.run(text, minCharacters=3, maxWords=4, minFrequency=5)
keyphrases = [(' '.join([j for j in k[0].split(' ')
if j not in {'var', '#', '-pron-', '-pron'}]),
k[1]) for k in kws]
keyphrases = [k for k in keyphrases if len(k[0]) > 0]
rake_dict = {}
# give each the maximal associated score
for k, v in keyphrases:
if k not in rake_dict.keys() or rake_dict[k] < v:
rake_dict[k] = v
# now calculate scores
scores = []
for ind in self.index_labels:
score = rake_dict[ind]
brownscore = brown_whole[ind] + 1
score /= brownscore
scores.append((ind, score))
# take the top half
phrases = sorted(scores, key=lambda x: x[1], reverse=True)
phrases = phrases[:int(len(phrases)/2)]
index = list(map(lambda x: x[0].split(), phrases))
self.index = index
self.occurrence()
# then calculate cooccurence
t = self.cooc_2d()
return t
def cont_config_graph(self):
"""
Generates a continuous configuration null graph with same degree
distribution/strength distribution as the extracted semantic network.
See paper for more details.
Can only be run after runnin ``self.cooc_for_text``.
"""
nodes = list(self.graph.nodes)
degs = dict(self.graph.degree())
dT = sum(degs.values())
strengths = {n: sum([x[2]['weight']
for x in self.graph.edges(n, data=True)])
for n in nodes}
sT = sum(strengths.values())
# see if we've already computed the best-fit of the normed weights
if self.weight_dist is not None:
dist = self.weight_dist
else:
# calculate normed weights to determine distribution parameters
normedweights = []
for x in self.graph.edges(data=True):
s0, s1 = strengths[x[0]], strengths[x[1]]
d0, d1 = degs[x[0]], degs[x[1]]
s_uv = s0*s1/sT
d_uv = d0*d1/dT
normedweights.append(x[2]['weight']*d_uv/s_uv)
# Candidate distributions
DISTRIBUTIONS = [st.pareto, st.lognorm, st.levy, st.dweibull,
st.burr, st.fisk, st.loggamma, st.loglaplace,
st.powerlaw]
results = []
normedweights = np.array(normedweights)
# find the best fit
for dist in DISTRIBUTIONS:
try:
# attempt fit
pars = dist.fit(normedweights)
mle = dist.nnlf(pars, normedweights)
results.append((mle, dist.name, pars))
except:
pass
best_fit = sorted(results, key=lambda d: d[0])
print(best_fit[0])
dist = getattr(st, best_fit[0][1])(*best_fit[0][2])
self.weight_dist = dist
# construct the null graph
null_graph = np.zeros((len(nodes), len(nodes)))
for i in range(0, len(nodes)):
for j in range(i+1, len(nodes)):
d_uv = degs[nodes[i]]*degs[nodes[j]]/dT
if np.random.rand() < d_uv:
s_uv = strengths[nodes[i]]*strengths[nodes[j]]/sT
xi = self.weight_dist.rvs()
null_graph[i, j] = xi*s_uv/d_uv
null_graph[j, i] = xi*s_uv/d_uv
return null_graph
def random_index_null(self, return_index=False, return_otherstuff=False):
"""
Generates a random index null filtration for the text with a set of
randomly-selected words from the body of the text. See paper for more
details.
"""
tmp_index = self.index.copy()
tmp_index_labels = self.index_labels.copy()
tmp_cutoff = self.cutoff
tmp_first_cooc = self.dist_mat.copy()
tmp_cooc_tensor = self.cooc_mat.copy()
tmp_timeline = self.timeline.copy()
tmp_G = self.graph.copy()
tmp_first_occs = self.first_occs.copy()
extra_sl = ['example', 'counterexample', 'text', 'texts', 'undergraduate', 'chapter', 'definition', 'notation',
'proof', 'exercise', 'result']
sl = nlp(' '.join(stop_words + extra_sl))
sl = [w.lemma_ for w in sl]
textwds = set(sum(self.text, []))
textwds.difference_update(sl + ['VAR', '#', '-pron-', '-pron']) #
random_index = np.random.choice(list(textwds),
size=len(tmp_index),
replace=False)
self.index_labels = random_index.copy()
self.index = list(map(lambda x: x.split(), self.index_labels))
self.occurrence()
self.windows = self.text
t = self.cooc_2d()
new_first_occs = self.first_occs.copy()
new_cutoff = self.cutoff
self.index = tmp_index
self.index_labels = tmp_index_labels
self.first_occs = tmp_first_occs
self.graph = tmp_G
self.cutoff = tmp_cutoff
self.dist_mat = tmp_first_cooc
self.cooc_mat = tmp_cooc_tensor
self.timeline = tmp_timeline
if return_index:
return t, random_index
if return_otherstuff:
return t, new_first_occs, new_cutoff, random_index
else:
return t
def rnd_sent_ord_null(self):
"""
Generates a null filtration matrix for the text with the same index,
but with sentences randomly shuffled. See paper for more details.
"""
tmp_index = self.index.copy()
tmp_index_labels = self.index_labels.copy()
tmp_first_cooc = self.dist_mat.copy()
tmp_cooc_tensor = self.cooc_mat.copy()
tmp_timeline = self.timeline.copy()
tmp_G = self.graph.copy()
tmp_first_occs = self.first_occs.copy()
tmp_text = self.text.copy()
# shuffle the text
np.random.shuffle(self.text)
self.windows = self.text
t = self.cooc_2d()
new_cutoff = self.cutoff
self.index = tmp_index
self.index_labels = tmp_index_labels
self.first_occs = tmp_first_occs
self.graph = tmp_G
self.dist_mat = tmp_first_cooc
self.cooc_mat = tmp_cooc_tensor
self.timeline = tmp_timeline
self.text = tmp_text
return t, new_cutoff
def oaat_filtration(self, dist_mat):
"""
Takes as input a filtratino matrix ``dist_mat`` which does not
necessarily introduce edges one at a time (i.e., with sentence-level
granularity, so their may be some redundancy of values in the matrix)
and returns a filtration which does so. Useful for "unfurling"
a filtration. See paper for more details.
"""
oaat_mat = np.full_like(dist_mat, np.inf)
maxval = np.max(dist_mat[np.isfinite(dist_mat)]) # max value
count = 1
rel_timeline = {}
# iterate sentence by sentence
for v in range(1, int(maxval) + 1):
indices = list(zip(*np.where(dist_mat == v)))
# ensure we change (i, j) and (j, i) spots simultaneously
indices = list(set(tuple(sorted(i)) for i in indices))
nodes = [x for x in indices if x[0] == x[1]]
edges = [x for x in indices if x[0] != x[1]]
# randomly shuffle order of introduction for oaat
np.random.shuffle(nodes)
np.random.shuffle(edges)
# introduce all nodes first, in random order
for node in nodes:
oaat_mat[node] = count
rel_timeline[count] = self.index_labels[node[0]]
count += 1
# then introduce edges
for edge in edges:
for ind in [edge, tuple(reversed(edge))]:
oaat_mat[ind] = count
rel_timeline[count] = (self.index_labels[ind[0]],
self.index_labels[ind[1]])
count += 1
return oaat_mat, count, rel_timeline
def node_ordered_filtration(self):
"""
Creates a node-ordered filtration, where nodes and all their edges to
previously-introduced nodes are added in order of node introduction,
and for any given node, edges are added in random order. See paper for
more details.
"""
nd_dist_mat = np.full_like(self.dist_mat, np.inf)
fos = sorted(set(self.first_occs)) # unique values so we can randomize addition
count = 1
introduced_inds = []
# loop through values in first occurrences
for v in fos:
inds = np.where(self.first_occs == v)[0]
# randomly shuffle if there are multiple - if introduced in the same sentence,
# we want a random order
np.random.shuffle(inds)
for i in inds:
introduced_inds.append(i)
nd_dist_mat[i, i] = count
count += 1
# go through all the previously-introduced indices/concepts
allowed_prev = np.array(introduced_inds[:-1])
np.random.shuffle(allowed_prev)
# and introduce connections with those previous ones in a random order
for j in allowed_prev:
if self.dist_mat[i, j] != np.inf:
nd_dist_mat[i, j] = count
nd_dist_mat[j, i] = count
count += 1
return nd_dist_mat, count
def rnd_edge_filtration(self):
"""
Creates a random edge filtration, in which edges from the final network
are added in a random order. See paper for more details.
"""
G = self.graph
nodes = list(G.nodes)
edges = list(G.edges)
np.random.shuffle(edges)
A = np.full((len(nodes), len(nodes)), np.inf)
rel_timeline = {}
count = 1
for edge in edges:
i, j = nodes.index(edge[0]), nodes.index(edge[1])
# make sure corresponding nodes are introduced prior to introducing edge
for ind in [i, j]:
if A[ind, ind] == np.inf:
A[ind, ind] = count
count += 1
A[i, j] = count
A[j, i] = count
rel_timeline[count] = edge
count += 1
return A, count, rel_timeline
def get_barcode(self, filt_mat, maxdim=2):
"""
Calculates the persistent homology for a given filtration matrix
``filt_mat``, default dimensions 0 through 2. Wraps ripser.
"""
b = ripser(filt_mat, distance_matrix=True, maxdim=maxdim)['dgms']
return list(zip(range(maxdim+1), b))
"""
Old unused code regarding pointwise mutual information as a potential
alternative for judging relatedness of keyphrases/index phrases/concepts
# def PPMI(self):
# if self.cooc_mat is None:
# raise ValueError('No cooccurrence matrix')
# pmi = np.zeros((len(self.cooc_mat), len(self.cooc_mat)))
# for i in range(len(pmi)):
# for j in range(i, len(pmi)):
# num_i, num_j = self.cooc_mat[i, i], self.cooc_mat[j, j]
# num_cooc = self.cooc_mat[i, j]
# N = len(self.windows) # number of sentences
# npmi_val = np.log(num_cooc*N/(num_i*num_j))/(-np.log(num_cooc/N))
# pmi[i, j] = npmi_val
# pmi[j, i] = npmi_val
# pmi[np.isnan(pmi)] = 0
# pmi[pmi <= 0] = 0
# return pmi
# def PPMI_graph(self):
# m = self.PPMI()
# graph = nx.from_numpy_array(m)
# mapping = {i: label for i, label in enumerate(self.index_labels)}
# nx.relabel_nodes(graph, mapping, copy=False)
# graph.remove_edges_from(nx.selfloop_edges(graph))
# return graph
# def PPMI_filtration(self):
# m = self.PPMI()
# pmi_dis_mat = self.dist_mat.copy()
# pmi_dist_mat[m <= 0] = np.inf
# return pmi_dist_mat
"""
"""
Old unused code for a topological distance filtration null model
# topological distance filtration; adds nodes in by distance from first-introduced
# concept, where edge distance = 1/weight, since weight denotes strength.
# at each step, adds each node's edges to already-added nodes *only* in order of decreasing
# weight - from strongest to weakest connections
# some element of stochasticity due to ordering for equidistant/equal-weight nodes/edges
def topo_dist_filtration(self):
filt_mat = np.full((len(self.index_labels), len(self.index_labels)), np.inf) # eventual filtration
# edge pool
edges = set(self.graph.edges)
all_nodes = list(self.graph.nodes)
# nodes that have been added already
added_nodes = {self.index_labels[0]}
# get shortest path distances:
dists = nx.single_source_dijkstra_path_length(self.graph,
source=self.index_labels[0],
weight=lambda u, v, d: 1/d['weight']
if d['weight'] != 0 else None)
# all possible distance values - necessary for randomization in case of equality
vals = sorted(set(dists.values()))[1:]
n = 1
for val in vals:
# get nodes with the given distance value, randomly shuffle them
nodes = [n for n in dists.keys() if dists[n] == val]
np.random.shuffle(nodes)
for node in nodes:
# add node, get the edges associated with the added nodes
added_nodes.add(node)
# add node birth to filtration matrix
filt_mat[all_nodes.index(node), all_nodes.index(node)] = n
n += 1
# now look at the allowed edges
allowed_edges = [e for e in edges if e[0] in added_nodes and e[1] in added_nodes]
edges.difference_update(allowed_edges)
# get unique weight values - again important for randomization
weight_vals = sorted(set([self.graph.edges[ae]['weight'] for ae in allowed_edges]), reverse=True)
for wv in weight_vals:
wv_edges = [e for e in allowed_edges if self.graph.edges[e]['weight'] == wv]
np.random.shuffle(wv_edges)
for edge in wv_edges:
i, j = all_nodes.index(edge[0]), all_nodes.index(edge[1])
filt_mat[i, j] = n
filt_mat[j, i] = n
n += 1
return filt_mat, n
"""
"""
Old unused code for a connected filtration (where edges contributed to a
single connected component). May be incorrect.
# creates a filtration matrix where, starting from a random node, edges are added
# that only contribute to the single connected component (until the end, when the
# stragglers are added in)
# old and not used
# def connected_filtration(self):
# G = self.graph
# nodes = list(G.nodes)
# edges = list(G.edges)
# first_node = np.random.choice(nodes)
# allowed_nodes = {first_node}
# A = np.full((len(nodes), len(nodes)), np.inf)
# n = 1 # value we set the edge to; increment after each one
# while edges != []:
# allowed_edges = list(filter(lambda e: not set(e).isdisjoint(allowed_nodes), edges))
# if allowed_edges == []:
# allowed_edges = edges
# edge = random.choice(allowed_edges)
# edges.remove(edge)
# allowed_nodes.update(set(edge))
# i, j = nodes.index(edge[0]), nodes.index(edge[1])
# A[i, j] = n
# A[j, i] = n
# n += 1
# for i in range(len(nodes)):
# A[i, i] = 0
# print("Number of edges: " + str(n) + " (for filtration cutoff)")
# return A, n
"""
"""
Old unused code for a degree filtration, where nodes are added in order of
degree and their edges are added in order of decreasing degree
(so "strongest"/"most important" connections and topics are added earlier)
# creates a filtration matrix based on degree of nodes
# adds nodes in order of decreasing degree, and edges in order of decreasing weight
def degree_filtration(self):
G = self.graph
nodes = list(G.nodes)
edges = set(G.edges(data='weight'))
degs = sorted(list(dict(nx.degree(G)).items()), key=lambda x: x[1], reverse=True)
A = np.full((len(nodes),len(nodes)), np.inf)
n = 1
#print(degs)
while len(edges) > 0:
node = degs.pop(0)[0]
allowed_edges = list(filter(lambda e: node in e, edges))
edges = edges.difference(set(allowed_edges))
# random ordering
allowed_edges = sorted(allowed_edges, key=lambda x: x[2], reverse=True)
#print(allowed_edges)
for edge in allowed_edges:
i, j = nodes.index(edge[0]), nodes.index(edge[1])
A[i, j] = n
A[j, i] = n
n += 1
for i in range(len(nodes)):
A[i, i] = 0
return A, n
"""
"""
Old unused code for plotting barcodes, and annotating with the words/edges
representing the born and dead cycles.
def plot_barcode(self, bars, dims=[1,2], length=None, k=None, labels=None):
plt.figure(figsize=(12/2.54, 4), dpi=300)
colors = ['b', 'c', 'g']
count = 1
if length is not None:
cutoff = length
elif labels is not None:
cutoff = max(labels.keys()) + 1
else:
cutoff = self.cutoff
bars = dict(bars)
has_inf = False # notes whether there are infinitely-persisting cycles
for d in dims:
try:
bn = bars[d]
except KeyError:
print('Dimension not in barcode!')
color = colors.pop(0) #better not have any more than 3 dimensions
bn = sorted(bn, key=lambda x:x[0])
for b, i in zip(bn, range(len(bn))):
if b[1] == np.inf:
has_inf = True
b = (b[0], 1.5*cutoff) # arbitrary, so it overhangs
if i == 0:
plt.plot(b, [count, count], color=color,
label='Dimension {}'.format(d))
else:
plt.plot(b, [count, count], color=color,
label=None)
if labels == 'edge':
if b[1] - b[0] > cutoff/10 or b[1] == 1.5*cutoff:
f = lambda e: e[0] + '->' + e[1]
edge_1 = self.rel_timeline[b[0]]
plt.annotate(f(edge_1), (b[0]-1, count-0.4), horizontalalignment='right',fontsize=8)
if b[1] != 1.5*cutoff:
edge_2 = self.rel_timeline[b[1]]
plt.annotate(f(edge_2), (b[1]+1, count-0.4), horizontalalignment='left',fontsize=8)
elif labels != None: # requires a dict of edges
if b[1] - b[0] > cutoff/10 or b[1] == 1.5*cutoff:
f = lambda e: e[0] + '->' + e[1]
edge_1 = labels[b[0]]
plt.annotate(f(edge_1), (b[0]-1, count-0.4), horizontalalignment='right',fontsize=8)
if b[1] != 1.5*cutoff:
edge_2 = labels[b[1]]
plt.annotate(f(edge_2), (b[1]+1, count-0.4), horizontalalignment='left',fontsize=8)
count += 1
if has_inf:
plt.xlim(0, 1.1*cutoff)
#plt.axvline(x=cutoff, color='r', linestyle='--', label='End of Filtration')
if k is not None:
plt.axvline(x=k, color='m', linestyle='--', label='End of Connected Component')
plt.legend(loc='lower right', fontsize=8)
plt.show()
"""
"""
Old, unused code wrapping barcode generation and plot for the expositional
network
# Plots the barcode for the expositional ordering network
def plot_expos_barcode(self, dims=[1,2], labels='edge'):
# if self.ph_dict exists, it'll use that; otherwise, it'll make it
bars, _ = self.get_barcode(dims=dims)
self.plot_barcode(bars, dims=dims, labels=labels)
"""
|
"""
Contains the views and routes used by the Flask-webserver.
"""
from flask import (render_template, send_from_directory, Response, session,
redirect, request, url_for, flash)
from .app import app
import os
import functools
from playhouse.flask_utils import object_list, get_object_or_404
from .models import Entry
from .logic import get_static_graph_data, get_global_data_config
# pylint: disable=unused-argument
@app.errorhandler(404)
def not_found(exc):
"""
Called when a resource could not be found - a 404 has been raised.
"""
return Response("<h3>Not found</h3>"), 404
# pylint: enable=unused-argument
def login_required(fn):
"""
A decorator to be used on routes that require the user to be logged in.
"""
@functools.wraps(fn)
def inner(*args, **kwargs):
"""
The inner function of the decorator.
"""
if session.get("logged_in"):
return fn(*args, **kwargs)
return redirect(url_for("login", next=request.path))
return inner
@app.context_processor
def dropdown_processor():
"""
Context processor that makes the get_dropdown_data function
available to all templates.
"""
def get_dropdown_data():
"""
Returns dropdown data.
"""
return get_global_data_config()
return dict(get_dropdown_data=get_dropdown_data)
@app.route("/favicon.ico")
def favicon():
"""
Route in charge of finding the favicon.ico.
"""
return send_from_directory(os.path.join(app.root_path, "static", "img"),
"favicon.ico",
mimetype="image/vnd.microsoft.icon")
@app.route("/login", methods=["GET", "POST"])
def login():
"""
Route for logging in.
"""
next_url = request.args.get("next") or request.form.get("next")
if request.method == "POST" and request.form.get("password"):
password = request.form.get("password")
if password == app.config["ADMIN_PASSWORD"]:
session["logged_in"] = True
session.permanent = True
flash("You are now logged in.", "success")
return redirect(next_url or url_for("index"))
else:
flash("Incorrect password.", "danger")
return render_template("login.html", next_url=next_url)
@app.route("/logout", methods=["GET", "POST"])
def logout():
"""
Route for logging out.
"""
if request.method == "POST":
session.clear()
return redirect(url_for("login"))
return render_template("logout.html", page_title="Log out")
@app.route("/about")
def about():
"""
Renders the about page.
"""
return render_template("about.html", page_title="About")
@app.route("/")
@app.route("/index")
def index():
"""
Renders the index page.
"""
return render_template("index.html")
@app.route("/live")
def live():
"""
Renders the live page.
"""
return render_template("live.html", replay_available=False,
page_title="Live")
@app.route("/map")
def map():
"""
Renders a live map page.
"""
return render_template("map.html", page_title="Map")
@app.route("/graph/<data_id>")
def graph(data_id):
"""
Renders the graph page using a ``data_id``.
"""
json_data = get_static_graph_data(data_id, force=True)
return render_template("graph.html", graph_data=json_data, data_id=data_id,
replay_available=True, page_title="Graph")
@app.route("/replay/<data_id>")
def replay(data_id):
"""
Renders the replay page using a ``data_id``.
"""
return render_template("replay.html", data_id=data_id, page_title="Replay")
@app.route("/blog/")
@app.route("/blog/index")
def blog_index():
"""
Renders the index of the blog.
"""
query = Entry.public().order_by(Entry.timestamp.desc())
return object_list("blog_index.html", query, check_bounds=False,
page_title="Blog")
@app.route("/blog/create", methods=["GET", "POST"])
@login_required
def blog_create():
"""
Renders the 'create an entry' page for the blog.
"""
if request.method == "POST":
if request.form.get("title") and request.form.get("content"):
# pylint: disable=no-member
entry = Entry.create(
title=request.form["title"],
content=request.form["content"],
published=request.form.get("published") or False)
# pylint: enable=no-member
flash("Entry created successfully.", "success")
if entry.published:
return redirect(url_for("blog_detail", slug=entry.slug))
else:
return redirect(url_for("blog_edit", slug=entry.slug))
else:
flash("Title and Content are required.", "danger")
return render_template("blog_create.html", page_title="Create blog entry")
@app.route("/blog/drafts")
@login_required
def blog_drafts():
"""
Renders a page with entry drafts.
"""
query = Entry.draft().order_by(Entry.timestamp.desc())
return object_list("blog_index.html", query, check_bounds=False,
page_title="Drafts")
@app.route("/blog/detail/<slug>")
@app.route("/blog/<slug>")
def blog_detail(slug):
"""
Renders a blog entry.
"""
if session.get("logged_in"):
# pylint: disable=no-member
query = Entry.select()
# pylint: enable=no-member
else:
query = Entry.public()
entry = get_object_or_404(query, Entry.slug == slug)
return render_template("blog_detail.html", entry=entry,
page_title=entry.title)
@app.route("/blog/<slug>/edit", methods=["GET", "POST"])
@login_required
def blog_edit(slug):
"""
Renders the edit page of a blog entry.
"""
entry = get_object_or_404(Entry, Entry.slug == slug)
if request.method == "POST":
if request.form.get("title") and request.form.get("content"):
entry.title = request.form["title"]
entry.content = request.form["content"]
entry.published = request.form.get("published") or False
entry.save()
flash("Entry saved successfully.", "success")
if entry.published:
return redirect(url_for("blog_detail", slug=entry.slug))
else:
return redirect(url_for("blog_edit", slug=entry.slug))
else:
flash("Title and Content are required.", "danger")
return render_template("blog_edit.html", entry=entry,
page_title="Edit entry")
|
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import multiprocessing as mp
import threading
import time
import pandas as pd
import numpy as np
import scipy.stats as st
from tqdm import tqdm
#sys.path.append('./../dgonza26/infinity-mirror')
sys.path.append('./../..')
from src.graph_comparison import GraphPairCompare
from src.graph_stats import GraphStats
from src.utils import load_pickle, ColorPrint
def compute_graph_stats(root):
graph_stats = [GraphStats(graph=node.graph, run_id=1) for node in [root] + list(root.descendants)]
return graph_stats
def absolute(graph_stats):
for gs in graph_stats[1:]:
comparator = GraphPairCompare(graph_stats[0], gs)
dist = comparator.js_distance()
yield dist
def sequential(graph_stats):
prev = graph_stats[0]
for curr in graph_stats[1:]:
comparator = GraphPairCompare(prev, curr)
prev = curr
dist = comparator.js_distance()
yield dist
def absolute_js(graph_stats):
abs_js = [x for x in absolute(graph_stats)]
return abs_js
def sequential_js(graph_stats):
seq_js = [x for x in sequential(graph_stats)]
return seq_js
def length_chain(root):
return len(root.descendants)
def flatten(L):
return [item for sublist in L for item in sublist]
def mkdir_output(path):
if not os.path.isdir(path):
try:
os.mkdir(path)
except OSError:
print('ERROR: could not make directory {path} for some reason')
return
def compute_stats(js):
padding = max(len(l) for l in js)
for idx, l in enumerate(js):
while len(js[idx]) < padding:
js[idx] += [np.NaN]
mean = np.nanmean(js, axis=0)
ci = []
for row in np.asarray(js).T:
ci.append(st.t.interval(0.95, len(row)-1, loc=np.mean(row), scale=st.sem(row)))
return np.asarray(mean), np.asarray(ci)
def construct_table(abs_js, seq_js, model):
if abs_js != []:
abs_mean, abs_ci = compute_stats(abs_js)
abs_lower = abs_ci[:, 0]
abs_upper = abs_ci[:, 1]
else:
abs_mean = []
abs_lower = []
abs_upper = []
if seq_js != []:
seq_mean, seq_ci = compute_stats(seq_js)
seq_lower = seq_ci[:, 0]
seq_upper = seq_ci[:, 1]
else:
seq_mean = []
seq_lower = []
seq_upper = []
gen = [x + 1 for x in range(len(abs_mean))]
rows = {'model': model, 'gen': gen, 'abs_mean': abs_mean, 'abs-95%': abs_lower, 'abs+95%': abs_upper, 'seq_mean': seq_mean, 'seq-95%': seq_lower, 'seq+95%': seq_upper}
df = pd.DataFrame(rows)
return df
def get_filenames(base_path, dataset, models):
filenames = []
print(f'loading {dataset} {models[0]}')
for model in models:
path = os.path.join(base_path, dataset, model)
for subdir, dirs, files in os.walk(path):
for filename in files:
if 'seq' in filename and 'rob' not in filename:
#print(f'loading {filename}')
filenames.append(os.path.join(subdir, filename))
# yield load_pickle(os.path.join(subdir, filename))
ColorPrint.print_bold(f"Found {len(filenames)} graph files to be loaded.")
return filenames
def load_graph(filename):
# todo: ask about the slice
root = load_pickle(filename)
return root
def parallel_thing(root):
graph_stats = compute_graph_stats(root)
local_abs_js = absolute_js(graph_stats)
local_seq_js = sequential_js(graph_stats)
return [local_abs_js, local_seq_js]
def driver():
pass
if __name__ == '__main__':
base_path = '/data/infinity-mirror'
dataset = 'eucore'
models_list = ['BTER']
num = 10
for model in models_list:
models = [model]
output_path = os.path.join(base_path, dataset, models[0], 'jensen-shannon')
mkdir_output(output_path)
filenames = get_filenames(base_path, dataset, models)
graphs_list = []
results_lock = threading.Lock()
# pandas dict variables
abs_js = []
seq_js = []
read_pbar = tqdm(len(filenames), desc="Reading Files", position=0, leave=False)
work_pbar = tqdm(len(filenames), desc="Processing Files", position=1, leave=True)
active_reads_Lock = threading.Lock()
active_reads = 0
pending_work_Lock = threading.Lock()
pending_work = 0
active_work_Lock = threading.Lock()
active_work = 0
def read_update(result):
global active_reads
global pending_work
global graphs_list
with active_reads_Lock:
active_reads -= 1
with pending_work_Lock:
pending_work += 1
graphs_list.append(result)
read_pbar.update()
def work_update(result):
# store results in global lists
with results_lock:
global abs_js
global seq_js
#global M
abs_js.append(result[0])
seq_js.append(result[1])
# update work status variables
global active_work
with active_work_Lock:
active_work -= 1
work_pbar.update()
work_pool = mp.Pool(num)
with mp.Pool(num) as read_pool:
while filenames or graphs_list:
if active_reads + pending_work + active_work <= num:
if filenames:
filename = filenames.pop(0) # take the first item
active_reads += 1
read_pool.apply_async(load_graph, [filename], callback=read_update)
# graphs_list.append(read_update(load_graph(filename)))
for idx, graph in enumerate(graphs_list):
active_work += 1
work_pool.apply_async(parallel_thing, [graph], callback=work_update)
graphs_list.pop(idx)
pending_work -= 1
else:
for idx, graph in enumerate(graphs_list):
active_work += 1
work_pool.apply_async(parallel_thing, [graph], callback=work_update)
graphs_list.pop(idx)
pending_work -= 1
#ColorPrint.print_blue(f'Sleeping {active_reads}, {pending_work}, {active_work}')
time.sleep(10)
# wait until everything is off of the queue
while active_work > 0:
time.sleep(num)
work_pool.close()
df = construct_table(abs_js, seq_js, models[0])
df.to_csv(f'{output_path}/{dataset}_{models[0]}_js.csv', float_format='%.7f', sep='\t', index=False)
|
# Time: O(n)
# Space: O(1)
# 995
# In an array A containing only 0s and 1s, a K-bit flip consists of choosing a (contiguous) subarray
# of length K and simultaneously changing every 0 in the subarray to 1, and every 1 in the subarray to 0.
#
# Return the minimum number of K-bit flips required so that there is no 0 in the array. If it is not possible,
# return -1.
class Solution(object):
# When we flip a subarray like A[i], A[i+1], ..., A[i+K-1]
# we can instead flip our target. This is critical to avoid O(n*K) time complexity,
# (swap K items for every eligible i).
# And at position i+K, flip back our target.
def minKBitFlips(self, A, K): # USE THIS
"""
:type A: List[int]
:type K: int
:rtype: int
"""
n = len(A)
flip, ans = [False]*n, 0 # flip array stores whether this elem will flip
target = 0 # when target is encoutered, we must flip
for i in xrange(n):
if i>=K and flip[i-K]:
target = 1 - target # compensate and change target back
if A[i] == target: # or A[i] ^ target == 0
if i > n-K:
return -1
flip[i] = True
ans += 1
target = 1 - target # for next K-1 elems, will flip 1 to 0,
# so next 1 is bad and needs flip
return ans
def minKBitFlips_kamyu(self, A, K): # not understand
result, curr = 0, 0
for i in xrange(len(A)):
if i >= K:
curr -= A[i-K]//2
if curr & 1 ^ A[i] == 0: # bit and > bit xor > bit or
if i+K > len(A):
return -1
A[i] += 2
curr, result = curr+1, result+1
return result
print(Solution().minKBitFlips([0,0,0,1,0,1,1,0], 3)) # 3
print(Solution().minKBitFlips([0, 1, 0], 1)) # 2
print(Solution().minKBitFlips([1, 1, 0], 2)) # -1
|
from PIL import Image
def cat(im1: Image, im2: Image, dim: tuple, append_on: tuple):
'''
Concatenates two images together
im1 : PIL.Image of the first image
im2 : PIL.Image of the second image
dim : Dimensions of the output image
append_on : Dimensions on where to paste the second image to the first image
'''
dst = Image.new('RGB', dim)
dst.paste(im1, (0, 0))
dst.paste(im2, append_on)
return dst
def merge(imagefp1, imagefp2, outputfp, attach='h'):
'''
Merges two images together
imagefp1: File path to the first image
imagefp2: File path to the second image
outputfp: File path to the output image
attach: Attach images horizontally (landscape) or vertically (portrait) mode
'''
im1 = Image.open(imagefp1)
im2 = Image.open(imagefp2)
dim = (0,0)
append_on = (0,0)
match attach:
case 'h':
dim = (im1.width + im2.width, im1.height)
append_on = (im1.width, 0)
case 'v':
dim = (im1.width, im1.height + im2.height)
append_on = (0, im1.height)
cat(im1, im2, dim, append_on)
|
# -*- coding: utf-8 -*-
"""
Copyright 2022 Mitchell Isaac Parker
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib_venn import venn2
from .color import change_hex_alpha, get_lst_colors, gray_hex, black_hex
from .path import append_file_path
from .table import (
format_val,
make_dict,
lst_col,
build_label_dict,
mask_equal,
get_ncols,
)
from .col import pdb_id_col
grid_hex = change_hex_alpha(gray_hex, 0.25)
def prep_plot_col(
df,
col,
color_palette=None,
rename_vals=None,
order_lst=None,
label_count=False,
count_chain=True,
count_pdb=False,
count_cf=False,
return_palette=True,
):
df[col] = df[col].map(str)
for index in list(df.index.values):
df.at[index, col] = str(df.at[index, col]).split(" (N=")[0]
if rename_vals is not None:
if type(rename_vals) == dict:
rename_dict = rename_vals
elif type(rename_vals) == list:
val_lst = lst_col(df, col, unique=True, return_str=True)
rename_lst = format_val(rename_vals, return_str=True)
rename_dict = make_dict(val_lst, rename_lst)
df[col] = df[col].map(rename_dict)
if order_lst is not None:
order_lst = format_val(order_lst, return_str=True)
df = mask_equal(df, col, order_lst)
return_lst = order_lst.copy()
else:
return_lst = lst_col(df, col, unique=True, return_str=True)
if label_count:
col_dict = build_label_dict(
df,
col,
return_str=True,
count_chain=count_chain,
count_pdb=count_pdb,
count_cf=count_cf,
)
df[col] = df[col].map(col_dict)
for i, row in enumerate(return_lst):
return_lst[i] = col_dict[row]
if type(color_palette) == dict:
return_dict = dict()
for val in return_lst:
return_dict[val] = color_palette[val.split(" (")[0]]
else:
return_dict = color_palette
if return_palette:
return df, return_lst, return_dict
else:
return (
df,
return_lst,
)
def make_legend_plot(
plot_path,
legend_dict,
plot_width=2,
plot_height=2,
font_size=7,
marker_shape="s",
marker_size=3,
legend_cols=None,
legend_title=None,
color_text=False,
):
fig, ax = plt.subplots()
fig.set_size_inches(plot_width, plot_height)
handles = [
plt.plot(
[],
[],
marker=marker_shape,
ls="",
markersize=marker_size,
markeredgewidth=0,
markerfacecolor=color,
label=label,
color=color,
)[0]
for label, color in legend_dict.items()
]
if legend_cols is None:
legend_cols = get_ncols(list(legend_dict.keys()))
label_color = None
if color_text:
label_color = "linecolor"
legend = ax.legend(
handles=handles,
fontsize=font_size,
ncol=legend_cols,
frameon=False,
loc="center",
bbox_to_anchor=(0.5, 0.5),
title=legend_title,
labelcolor=label_color,
)
if legend_title is not None:
plt.setp(legend.get_title(), fontsize=font_size)
bbox_extra_artists = (legend,)
plt.axis("off")
append_file_path(plot_path)
plt.savefig(
plot_path,
format="pdf",
bbox_extra_artists=bbox_extra_artists,
bbox_inches="tight",
pad_inches=0.0,
dpi=600,
)
plt.close()
def make_venn_plot(
lst_1,
lst_2,
plot_path=None,
label_1=None,
label_2=None,
color_1=None,
color_2=None,
color_inter=None,
count_color="black",
plot_title=None,
plot_height=2,
plot_width=2,
font_size=7,
alpha=0.75,
):
if color_1 is None:
color_1 = gray_hex
if color_2 is None:
color_2 = gray_hex
if color_inter is None:
color_inter = black_hex
fig, ax = plt.subplots()
fig.set_size_inches(plot_width, plot_height)
total = len(set(lst_1 + lst_2))
v = venn2(
[set(lst_1), set(lst_2)],
set_labels=(label_1, label_2),
set_colors=(color_1, color_2),
alpha=alpha,
subset_label_formatter=lambda x: f"{x}\n({(x/total):1.0%})",
ax=ax,
)
bbox_extra_artists = tuple()
for text in v.set_labels:
text.set_fontsize(font_size)
bbox_extra_artists += (text,)
for text in v.subset_labels:
if text is not None:
if text.get_text() == "0\n(0%)":
text.set_text("")
else:
text.set_fontsize(font_size * 0.75)
text.set_color(count_color)
bbox_extra_artists += (text,)
if plot_title is not None:
title = fig.suptitle(plot_title, fontsize=font_size)
bbox_extra_artists += (title,)
if plot_path is None:
return fig
else:
append_file_path(plot_path)
plt.savefig(
plot_path,
format="pdf",
bbox_extra_artists=bbox_extra_artists,
bbox_inches="tight",
pad_inches=0.0,
dpi=600,
)
plt.close()
def make_stacked_barplot(
plot_df,
col_col,
hue_col,
plot_path,
col_order=None,
rename_col=None,
col_count=False,
hue_order=None,
rename_hue=None,
hue_count=False,
hue_palette=None,
x_str=None,
y_str=None,
font_size=7,
plot_height=2,
plot_width=2,
line_width=0.5,
show_legend=True,
legend_pad=10,
legend_cols=None,
bar_width=0.5,
bar_alpha=1,
count_chain=True,
count_pdb=False,
count_cf=False,
id_col=None,
show_barh=False,
):
df = plot_df.copy(deep=True)
if id_col is None:
id_column = pdb_id_col
df, col_lst = prep_plot_col(
df,
col_col,
rename_vals=rename_col,
order_lst=col_order,
label_count=col_count,
count_chain=count_chain,
count_pdb=count_pdb,
count_cf=count_cf,
return_palette=False,
)
df, hue_lst, hue_palette = prep_plot_col(
df,
hue_col,
color_palette=hue_palette,
rename_vals=rename_hue,
order_lst=hue_order,
label_count=hue_count,
count_chain=count_chain,
count_pdb=count_pdb,
count_cf=count_cf,
)
hue_color_dict = get_lst_colors(hue_lst, palette=hue_palette, return_dict=True)
sns.set_context("paper")
sns.set_style("whitegrid")
sns.set_palette(list(hue_color_dict.values()))
df = pd.pivot_table(
df,
index=col_col,
columns=hue_col,
values=id_column,
aggfunc="nunique",
).fillna(0)
df = df.reindex(index=col_lst)
df = df.reindex(columns=hue_lst)
if show_barh:
plot_kind = "barh"
grid_axis = "y"
if x_str is None:
x_str = "% Structures"
if y_str is None:
y_str = col_col
else:
plot_kind = "bar"
grid_axis = "x"
if x_str is None:
x_str = col_col
if y_str is None:
y_str = "% Structures"
df.apply(lambda x: x / sum(x) * 100, axis=1).plot(
kind=plot_kind,
stacked=True,
linewidth=0,
width=bar_width,
alpha=bar_alpha,
figsize=(plot_width, plot_height),
legend=show_legend,
)
if show_barh:
plt.xticks(fontsize=font_size * 0.75)
else:
plt.xticks(fontsize=font_size * 0.75, rotation=45, ha="right")
plt.yticks(fontsize=font_size * 0.75)
plt.grid(axis=grid_axis, color=grid_hex, linewidth=line_width)
x_label = plt.xlabel(x_str, fontsize=font_size)
y_label = plt.ylabel(y_str, fontsize=font_size)
bbox_extra_artists = (x_label, y_label)
if show_legend or type(show_legend) == dict:
if legend_cols is None:
legend_cols = get_ncols(hue_lst)
if type(show_legend) == dict:
handles = [
plt.plot(
[],
[],
marker="o",
ls="",
markeredgewidth=0,
markersize=3,
markerfacecolor=show_legend[hue],
label=hue,
color=hue,
)[0]
for hue in list(show_legend.keys())
]
else:
handles = [
plt.plot(
[],
[],
marker="o",
ls="",
markeredgewidth=0,
markersize=3,
markerfacecolor=hue_color_dict[hue],
label=hue,
)[0]
for hue in hue_lst
]
legend = plt.legend(
handles=handles,
fontsize=font_size * 0.75,
ncol=legend_cols,
loc="upper center",
frameon=False,
bbox_to_anchor=(0.5, 0),
borderaxespad=legend_pad,
)
bbox_extra_artists += (legend,)
sns.despine(left=True)
plt.savefig(
plot_path,
format="pdf",
bbox_inches="tight",
bbox_extra_artists=bbox_extra_artists,
dpi=600,
)
plt.close()
|
import pytest
from dask.distributed import Client
from dask_kubernetes.experimental import KubeCluster
@pytest.fixture
def cluster(kopf_runner, docker_image):
with kopf_runner:
with KubeCluster(name="foo", image=docker_image) as cluster:
yield cluster
def test_kubecluster(cluster):
with Client(cluster) as client:
client.scheduler_info()
cluster.scale(1)
assert client.submit(lambda x: x + 1, 10).result() == 11
def test_multiple_clusters(kopf_runner, docker_image):
with kopf_runner:
with KubeCluster(name="bar", image=docker_image) as cluster1:
with Client(cluster1) as client1:
assert client1.submit(lambda x: x + 1, 10).result() == 11
with KubeCluster(name="baz", image=docker_image) as cluster2:
with Client(cluster2) as client2:
assert client2.submit(lambda x: x + 1, 10).result() == 11
def test_multiple_clusters_simultaneously(kopf_runner, docker_image):
with kopf_runner:
with KubeCluster(name="fizz", image=docker_image) as cluster1, KubeCluster(
name="buzz", image=docker_image
) as cluster2:
with Client(cluster1) as client1, Client(cluster2) as client2:
assert client1.submit(lambda x: x + 1, 10).result() == 11
assert client2.submit(lambda x: x + 1, 10).result() == 11
def test_multiple_clusters_simultaneously_same_loop(kopf_runner, docker_image):
with kopf_runner:
with KubeCluster(name="fizz", image=docker_image) as cluster1, KubeCluster(
name="buzz", image=docker_image, loop=cluster1.loop
) as cluster2:
with Client(cluster1) as client1, Client(cluster2) as client2:
assert cluster1.loop is cluster2.loop is client1.loop is client2.loop
assert client1.submit(lambda x: x + 1, 10).result() == 11
assert client2.submit(lambda x: x + 1, 10).result() == 11
def test_cluster_from_name(kopf_runner, docker_image):
with kopf_runner:
with KubeCluster(name="abc", image=docker_image) as firstcluster:
with KubeCluster.from_name("abc") as secondcluster:
assert firstcluster == secondcluster
def test_additional_worker_groups(kopf_runner, docker_image):
with kopf_runner:
with KubeCluster(
name="additionalgroups", n_workers=1, image=docker_image
) as cluster:
cluster.add_worker_group(name="more", n_workers=1)
with Client(cluster) as client:
client.wait_for_workers(2)
assert client.submit(lambda x: x + 1, 10).result() == 11
cluster.delete_worker_group(name="more")
|
# Top k (soft criteria)
k = 10
# Max top (hard criteria)
maxtop = 3
# Number of examples per image
g = 8
# Run through the adjacency matrix
softcorrect = 0
hardcorrect = 0
totalnum = 0
for j, i in enumerate(F):
if ((j-1) % g) == 0:
continue
topk = i.argsort()[-k:]
# Soft criteria
if j/g in topk/g:
softcorrect += 1
totalnum +=1
# Hard criteria
hardcriteria = sum([1 for jj in (j/g == topk[-maxtop:]/g) if jj])
if hardcriteria == maxtop:
hardcorrect+=1
# Print out results
print "Not considering disguised handwriting"
print "Top %d (soft criteria) = %f" %( k, (softcorrect+0.0) / totalnum )
print "Top %d (hard criteria) = %f" %( k, (hardcorrect+0.0) / totalnum / maxtop )
|
# -*- coding: utf-8 -*-
import os.path
import shutil
from django.db.models import Q
from django.urls import reverse_lazy
from django.http import HttpResponseRedirect
from django.views.generic import CreateView, ListView, DeleteView
from django.core.paginator import Paginator
from tool.forms.xray_from import XrayTaskForm
from ..models import CheckTask
from util.loggers import logger
from util.loginmixin import LoginMixin
class XrayTaskListView(ListView):
"""
扫描信息信息列表 视图
"""
model = CheckTask
context_object_name = 'check'
template_name = "tool/xray_injection/xray_list.html"
search_value = ""
order_field = "-createtime"
created_by = ''
pagenum = 5 # 每页分页数据条数
def get_queryset(self):
search = self.request.GET.get("search")
order_by = self.request.GET.get("orderby")
filter_state = self.request.GET.get("created_by")
if order_by:
# check_pro = CheckTask.objects.all().order_by(order_by)
check_pro = CheckTask.objects.exclude(scan_type='check').order_by(order_by)
self.order_field = order_by
else:
# check_pro = CheckTask.objects.all().order_by(self.order_field)
check_pro = CheckTask.objects.exclude(scan_type='check').order_by(self.order_field)
if filter_state:
if filter_state == '有注入':
# 查询不等于 空并且 task_report 不为 NO
check_pro = CheckTask.objects.exclude(scan_type='check', ).filter(
~Q(task_report='') & ~Q(task_report='NO'))
else:
check_pro = CheckTask.objects.exclude(scan_type='check', ).filter(
Q(task_report='NO') | Q(task_report=''))
self.created_by = filter_state
check_pro = check_pro
if search:
# 任务名称 、创建人、
check_pro = check_pro.filter(
Q(check_name__icontains=search) | Q(creator__icontains=search))
self.search_value = search
self.count_total = check_pro.count()
paginator = Paginator(check_pro, self.pagenum)
page = self.request.GET.get('page')
project_dev = paginator.get_page(page)
return project_dev
def get_context_data(self, *args, **kwargs):
context = super(XrayTaskListView, self).get_context_data(*args, **kwargs)
context['count_total'] = self.count_total
context['search'] = self.search_value
context['orderby'] = self.order_field
context['objects'] = self.get_queryset()
context['created_by'] = self.created_by
return context
class XrayTaskCreateView(LoginMixin, CreateView):
"""
添加扫描任务 视图
"""
model = CheckTask
form_class = XrayTaskForm
template_name = "tool/xray_injection/xray_add.html"
def get_form_kwargs(self):
# Ensure the current `request` is provided to ProjectCreateForm.
kwargs = super(XrayTaskCreateView, self).get_form_kwargs()
kwargs.update({'request': self.request})
return kwargs
class XrayTaskDeleteView(LoginMixin, DeleteView):
"""
删除扫描任务
"""
# template_name_suffix='_delete'
template_name = "tool/xray_injection/xray_delete.html"
model = CheckTask
success_url = reverse_lazy('xraylist')
def delete(self, request, *args, **kwargs):
"""
Call the delete() method on the fetched object and then redirect to the
success URL.
"""
self.object = self.get_object()
success_url = self.get_success_url()
flie_dir = self.object.task_report
self.object.delete()
# 删除目录文件
try:
if os.path.exists(flie_dir):
logger.info(f'{flie_dir} 删除目录文件成功!!')
# shutil.rmtree(flie_dir) # 删除目录下的文件
os.remove(flie_dir)
except Exception as e:
logger.error(e)
return HttpResponseRedirect(success_url)
return HttpResponseRedirect(success_url)
|
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
from django.conf.urls import include
from django.contrib import admin
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^$', views.recipe_list, name='recipe_list'),
url(r'^recipe/(?P<pk>\d+)/$', views.recipe_detail, name='recipe_detail'),
url(r'^recipe/new/$', views.recipe_new, name='recipe_new'),
url(r'^recipe/(?P<pk>\d+)/edit/$', views.recipe_edit, name='recipe_edit'),
url(r'^login/$', auth_views.login, name='login_user'),
url(r'^recipe/search/$', views.search, name='search'),
url(r'^logout/$', views.logout_view, name='logout_user')
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import argparse
import os
import sys
from comet_ml import Experiment, ExistingExperiment
import torch
from torch.utils.data import DataLoader
from data.dataloader import DatasetFactory
from runner import ImprovedVideoGAN
def get_parser():
global parser
parser = argparse.ArgumentParser(description='VideoGAN')
run_args = parser.add_argument_group('Run', "Run options")
run_args.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on test set')
run_args.add_argument('--root-dir', default='/', type=str, metavar='PATH',
help='directory containing videos and index file (default: /)')
run_args.add_argument('--index-file', default='mjpeg-index.txt', type=str, metavar='FILENAME',
help='index file referencing all videos relative to root_dir (default: mjpeg-index.txt)')
run_args.add_argument('--save-dir', default='extra/', type=str, metavar='PATH',
help='path to directory for saved outputs (default: extra/)')
run_args.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
run_args.add_argument('--epochs', default=40, type=int, metavar='N',
help='number of total epochs to run')
learner_args = parser.add_argument_group("Learner")
learner_args.add_argument('--spec-norm', default=False, action='store_true',
help='set to True to use spectral normalization')
learner_args.add_argument('--no-gp', default=False, action='store_true',
help='set to True to stop using Gradient Penalty')
learner_args.add_argument('--drift-penalty', default=False, action='store_true',
help='set to True to use small drift penalty (prevents discriminator loss drifting)')
learner_args.add_argument('--zero-centered', default=False, action='store_true',
help='set to True to use zero-centered GP')
learner_args.add_argument('--one-sided', default=False, action='store_true',
help='set to True to use one-sided GP')
compute_args = parser.add_argument_group("Compute")
compute_args.add_argument('-j', '--workers', default=0, type=int, metavar='N',
help='number of data loading workers (default: 0)')
compute_args.add_argument('--num-gpu', default=1, type=int, help='number of GPUs to use')
compute_args.add_argument('--cache-dataset', default=False, action='store_true',
help='Keep all data in memory (default: False if switch is absent)')
model_args = parser.add_argument_group("Model")
model_args.add_argument('--arch', metavar='ARCH', default='basic_fcn', type=str,
help='model architecture: ' + ' (default: basic_fcn)')
model_args.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
model_args.add_argument('--num-frozen', default=0, type=int, metavar='N',
help='# frozen cnv2 layers')
model_args.add_argument('--zdim', default=100, type=int,
help='Dimensionality of hidden features (default: 100)')
optimizer_args = parser.add_argument_group("Optimizer")
optimizer_args.add_argument('-b', '--batch-size', default=64, type=int,
metavar='N', help='mini-batch size (default: 64)')
optimizer_args.add_argument('--lr', '--learning-rate', default=0.0001, type=float,
metavar='LR', help='initial learning rate (default: 1e-4')
optimizer_args.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
optimizer_args.add_argument('--weight-decay', '--wd', default=0.0, type=float,
metavar='W', help='weight decay (default: 0.0)')
optimizer_args.add_argument('--lr_step', default='40,60', help='decreasing strategy')
optimizer_args.add_argument('--beta1', default=0.5, type=float,
help='Beta parameter for ADAM (default: 0.5)')
pre_process_args = parser.add_argument_group("Pre-process")
pre_process_args.add_argument('--color-sat', default=0.0, type=float,
help='factor for saturation jitter transform (default: 0.0)')
pre_process_args.add_argument('--color-hue', default=0.0, type=float,
help='factor for hue jitter transform (default: 0.0)')
log_args = parser.add_argument_group("Log")
log_args.add_argument('--print-freq', '-p', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
log_args.add_argument('--exp-name', default='dev', type=str, nargs='+',
help='The experiment name (default: deb)')
log_args.add_argument('--exp-key', default='', type=str,
help='The key to an existing experiment to be continued (default: None)')
log_args.add_argument('--exp-disable', default=False, action='store_true',
help='Disable CometML (default: False if switch is absent)')
return parser
def get_experiment(args):
if args.resume and args.exp_key:
experiment = ExistingExperiment(
disabled=args.exp_disable,
previous_experiment=args.exp_key,
log_code=True,
log_env_gpu=True,
log_env_cpu=True,
log_env_details=True,
log_env_host=True,
log_git_metadata=True,
log_git_patch=True,
)
else:
experiment = Experiment(disabled=args.exp_disable)
if isinstance(args.exp_name, list):
for tag in args.exp_name:
experiment.add_tag(tag)
else:
experiment.add_tag(args.exp_name)
experiment.log_text(' '.join(sys.argv))
return experiment
def main(args):
experiment = get_experiment(args)
assert os.path.exists(args.save_dir), f'save-dir {args.save_dir} does not exist'
assert torch.cuda.device_count() >= args.num_gpu, f'You have requested more gpus than are available'
if isinstance(args.exp_name, list):
exp_name = '_'.join(args.exp_name)
else:
exp_name = args.exp_name
out_dir = os.path.join(args.save_dir, exp_name)
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
experiment.log_text(f"{sys.executable} {' '.join(sys.argv)} --resume {os.path.join(out_dir, 'checkpoint.model')} "
f"--exp-key {experiment.id}")
if args.cache_dataset and args.workers > 0:
ResourceWarning("You are using multiple workers and keeping data in memory, this will multiply memory usage"
"by the number of workers.")
dataset = DatasetFactory.get_dataset(os.path.join(args.root_dir, args.index_file), cache_dataset=args.cache_dataset)
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
drop_last=True,
)
GAN = ImprovedVideoGAN(
dataloader=dataloader,
experiment=experiment,
device=DEVICE,
num_gpu=args.num_gpu,
n_epochs=args.epochs,
batch_size=args.batch_size,
learning_rate=args.lr,
weight_decay=args.weight_decay,
z_dim=args.zdim,
beta1=args.beta1,
critic_iterations=5,
out_dir=out_dir,
spec_norm=args.spec_norm,
no_gp=args.no_gp,
drift_penalty=args.drift_penalty,
zero_centered=args.zero_centered,
one_sided=args.one_sided,
)
if args.resume != '':
GAN.load(args.resume)
if not args.evaluate:
train(GAN, exp_name, experiment)
else:
evaluate(GAN)
def evaluate(GAN: ImprovedVideoGAN):
fvd = []
for _ in range(100):
fvd.append(GAN.evaluate())
print(f"avg FVD: {sum(fvd) / len(fvd)}")
def train(GAN, exp_name, experiment):
try:
GAN.train()
except BaseException as e:
GAN.save()
raise e
finally:
experiment.send_notification(f"Experiment {exp_name} is complete")
if __name__ == '__main__':
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # sets device for model and PyTorch tensors
parser = get_parser()
main(parser.parse_args())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 10 12:52:42 2021
@author: jasperhajonides
"""
from sklearn.model_selection import train_test_split
from tools.sliding_window import sliding_window
from custom_classifier import sample_data
from custom_classifier.custom_classification import CustomEstimator
# define parameters of simulated data + window size for decoding
n_classes = 10
n_trials = 100
num_samples = 100
window_size = 10
# simulate
X, y = sample_data.sample_data_time(n_classes = n_classes, n_trials = n_trials,
num_samples = num_samples)
# Compute sliding window. Every time point includes the data from the past
# n time points, where n is the window_size.
X_time = sliding_window(X,size_window = window_size,demean=False)
# Simple split in train and test set
X_train, X_test, y_train, y_test = train_test_split(X_time, y, test_size = 0.2,
random_state = 42)
#%% Run LDA
# initiate arrays for classifier output
out_cos = np.zeros((int((n_trials*n_classes)*0.2),n_classes,num_samples))
out_cos_conv = np.zeros((int((n_trials*n_classes)*0.2),num_samples))
out_predict = np.zeros((num_samples))
# r
for tp in range(window_size,num_samples):
#
clf = CustomEstimator(est_class=LinearDiscriminantAnalysis).fit(X_train[:,:,tp],y_train)
# out = clf.predict_proba(X_test[:,:,10])
out_cos[:,:,tp] = clf.predict_conv_proba(X_test[:,:,tp],y_test)
out_cos_conv[:,tp] = clf.predict_conv(X_test[:,:,tp],y_test)
out_predict[tp] = clf.score(X_test[:,:,tp],y_test)
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(3,1,1)
# plot data
ax.plot(X[0::n_trials,0,:].T)
ax.set_title('Data')
# plot class predictions, centered around correct class
ax = plt.subplot(3,1,2)
ax.imshow(out_cos.mean(0),aspect='auto')
ax.set_title('Predictions')
ax.set_yticks([np.floor(n_classes/2)])
ax.set_yticklabels(['Correct class'])
#
ax = plt.subplot(3,1,3)
ax.plot(out_cos_conv.mean(0),label='cosine convolved')
ax.plot(out_predict,label='class prediction acc (%)')
ax.set_title('Accuracy predictions')
ax.legend()
plt.tight_layout()
|
# coding: utf-8
"""A SRU client.
:copyright: Copyright 2020 Andreas Lüschow
"""
import inspect
import logging
import time
import requests
from srupy.iterator import BaseSRUIterator, SRUResponseIterator
from srupy.response import SRUResponse
from .models import Explain
logger = logging.getLogger(__name__)
class SRUpy(object):
"""Client for sending SRU requests.
Use it like this::
>>> py_sru = SRUpy('http://elis.da.ulcc.ac.uk/cgi/oai2')
>>> records = py_sru.get_records()
>>> records.next()
:param endpoint: The endpoint of the SRU server.
:type endpoint: str
:param http_method: Method used for requests (GET or POST, default: GET).
:type http_method: str
:param protocol_version: The SRU protocol version.
:type protocol_version: str
:param iterator: The type of the returned iterator
(default: :class:`srupy.iterator.SRUResponseIterator`)
:param max_retries: Number of retry attempts if an HTTP
request fails (default: 0 = request
only once). SRUpy will use the value
from the retry-after header (if present)
and will wait the specified number of
seconds between retries.
:type max_retries: int
:param retry_status_codes: HTTP status codes to
retry (default will only retry on 503)
:type retry_status_codes: iterable
:param default_retry_after: default number of seconds to wait
between retries in case no retry-after
header is found on the response
(defaults to 60 seconds)
:type default_retry_after: int
:param encoding: Can be used to override the encoding used
when decoding the server response. If not
specified, `requests` will use the encoding
returned by the server in the `content-type`
header. However, if the `charset`
information is missing, `requests` will fallback to
`'ISO-8859-1'`.
:type encoding: str
:param default_namespace: default namespace of the responded xml
:type default_namespace: str
:param request_args: Arguments to be passed to requests when issuing HTTP
requests. See the `documentation of requests
<http://docs.python-requests.org/en/master/api/#main-interface>`_
for all available parameters.
"""
def __init__(self, endpoint,
http_method='GET',
protocol_version='2.0',
iterator=SRUResponseIterator,
max_retries=0,
retry_status_codes=None,
default_retry_after=60,
encoding=None,
default_namespace='{http://docs.oasis-open.org/ns/search-ws/sruResponse}',
**request_args):
"""Docstring."""
self.endpoint = endpoint
if http_method not in ['GET', 'POST']:
raise ValueError("Invalid HTTP method: %s! Must be GET or POST.")
if protocol_version not in ['2.0', '1.2', '1.1']:
raise ValueError(
"Invalid protocol version: %s! Must be 2.0, 1.2 or 1.1")
self.http_method = http_method
self.protocol_version = protocol_version
if inspect.isclass(iterator) and issubclass(iterator, BaseSRUIterator):
self.iterator = iterator
else:
raise TypeError(
"Argument 'iterator' must be subclass of %s"
% BaseSRUIterator.__name__)
self.max_retries = max_retries
self.retry_status_codes = retry_status_codes or [503]
self.default_retry_after = default_retry_after
self.sru_namespace = default_namespace
self.encoding = encoding
self.request_args = request_args
def harvest(self, **kwargs):
"""Make HTTP requests to the SRU server.
:param kwargs: SRU HTTP parameters.
:rtype: :class:`srupy.SRUResponse`
"""
http_response = self._request(kwargs)
for _ in range(self.max_retries):
if self._is_error_code(http_response.status_code) \
and http_response.status_code in self.retry_status_codes:
retry_after = self.get_retry_after(http_response)
logger.warning(
"HTTP %d! Retrying after %d seconds..."
% (http_response.status_code, retry_after))
time.sleep(retry_after)
http_response = self._request(kwargs)
http_response.raise_for_status()
if self.encoding:
http_response.encoding = self.encoding
return SRUResponse(http_response, params=kwargs)
def _request(self, kwargs):
"""Docstring."""
if self.http_method == 'GET':
return requests.get(self.endpoint,
params=kwargs, **self.request_args)
return requests.post(self.endpoint,
data=kwargs, **self.request_args)
def get_records(self, **kwargs):
"""Issue a SRU request to fetch records.
:rtype: :class:`srupy.iterator.BaseSRUIterator`
"""
# TODO: Default Parameter hier eintragen
# Default Wert für z. B. recordSchema wird vom
# jeweiligen Server festgelegt;
# also vorher ExplainFile anschauen!
params = {
# 'query': 'dog and cat and mouse',
# 'queryType': 'cql',
# default value is 1
'startRecord': 1,
# default value is determined by the server
# 'maximumRecords': 10,
# 'recordSchema': 'mods',
# 'record_XML_escaping' = True
# resultSetTTL = True
# Stylesheet = True
# extension_parameters
# sortKeys = True
# facet_parameters
# renderedBy = True
# httpAccept = True
# responseType = True
# recordPacking = True
}
params.update(kwargs)
if 'query' not in params.keys():
raise KeyError("Request parameter 'query' must be set")
return self.iterator(self, params)
def explain(self):
"""Issue an Explain request to a SRU server.
:rtype: :class:`srupy.models.Explain`
"""
return Explain(self.harvest())
def get_retry_after(self, http_response):
"""Docstring."""
if http_response.status_code == 503:
try:
return int(http_response.headers.get('retry-after'))
except TypeError:
return self.default_retry_after
return self.default_retry_after
@staticmethod
def _is_error_code(status_code):
"""Docstring."""
return status_code >= 400
|
from django.shortcuts import render
from .models import *
from .tables import *
from .tables import Rank_awpTable, Rank_retakeTable
from .filters import Rank_awpFilter, Rank_retakeFilter
from django.views.generic import TemplateView
from django_filters.views import FilterView
from django_tables2.views import SingleTableMixin
class Rank_awpListView(SingleTableMixin, FilterView):
model = Rank_awp
table_class = Rank_awpTable
template_name = 'gamestatistics/stats-awp.html'
filterset_class = Rank_awpFilter
class Rank_retakeListView(SingleTableMixin, FilterView):
model = Rank_retake
table_class = Rank_retakeTable
template_name = 'gamestatistics/stats-retake.html'
filterset_class = Rank_retakeFilter
class SurfStatsView(TemplateView):
template_name = 'gamestatistics/stats_surf.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
times = []
myresult = CkPlayertimes.objects.raw(
"""
SELECT t.mapname, t.runtimepro, t.name, t.steamid
FROM (
SELECT mapname, MIN(runtimepro) as minruntimepro FROM `ck_playertimes` GROUP BY mapname)
AS x
INNER JOIN ck_playertimes AS t
ON t.mapname=x.mapname AND t.runtimepro=x.minruntimepro
"""
)
for result in myresult:
time = result.runtimepro
minute = time//60
second = int(time-minute*60)
ms = int((time-minute*60-second)*1000)
time = "{}:{}:{}".format(minute, second, ms)
times.append(
{
"mapname": result.mapname,
"time": time,
"name": result.name,
"steamid": result.steamid
}
)
context["times"] = times
return context
def AwpStatsView(request, steamid):
user = Rank_awp.objects.get(steam=steamid)
playtime = user.time()
if user.deaths == 0:
KD = str(round(user.kills, 2))
else:
KD = str(round(user.kills/user.deaths, 2))
if user.rounds_ct + user.rounds_tr == 0:
ADR = str(round(user.damage, 2))
else:
ADR = str(round(user.damage/(user.rounds_ct + user.rounds_tr), 2))
if user.kills == 0:
HS = str(round(0, 2)*100)+" %"
else:
HS = str(round(user.headshots/user.kills, 2)*100)+" %"
stats = {
"name": user.name,
"score": user.score,
"kills": user.kills,
"deaths": user.deaths,
"KD": KD,
"ADR": ADR,
"HS": HS,
"hits": user.hits,
"shots": user.shots,
"head": user.headshots,
"chest": user.chest,
"stomach": user.stomach,
"left_arm": user.left_arm,
"right_arm": user.right_arm,
"left_leg": user.left_leg,
"right_leg": user.right_leg,
"playtime": playtime
}
return render(request, 'gamestatistics/stats-awp-player.html', stats)
def RetakesStatsView(request, steamid):
user = Rank_retake.objects.get(steam=steamid)
playtime = user.time()
if user.deaths == 0:
KD = str(round(user.kills, 2))
else:
KD = str(round(user.kills/user.deaths, 2))
if user.rounds_ct + user.rounds_tr == 0:
ADR = str(round(user.damage, 2))
else:
ADR = str(round(user.damage/(user.rounds_ct + user.rounds_tr), 2))
if user.kills == 0:
HS = "{} %".format(round(0, 2)*100)
else:
HS = "{} %".format(round(user.headshots/user.kills, 2)*100)
stats = {
"name": user.name,
"score": user.score,
"kills": user.kills,
"deaths": user.deaths,
"KD": KD,
"ADR": ADR,
"HS": HS,
"hits": user.hits,
"shots": user.shots,
"head": user.headshots,
"chest": user.chest,
"stomach": user.stomach,
"left_arm": user.left_arm,
"right_arm": user.right_arm,
"left_leg": user.left_leg,
"right_leg": user.right_leg,
"knife": user.knife,
"glock": user.glock,
"hkp2000": user.hkp2000,
"usp_silencer": user.usp_silencer,
"p250": user.p250,
"deagle": user.deagle,
"elite": user.elite,
"fiveseven": user.fiveseven,
"tec9": user.tec9,
"cz75a": user.cz75a,
"revolver": user.revolver,
"nova": user.nova,
"xm1014": user.xm1014,
"mag7": user.mag7,
"sawedoff": user.sawedoff,
"bizon": user.bizon,
"mac10": user.mac10,
"mp9": user.mp9,
"mp7": user.mp7,
"ump45": user.ump45,
"p90": user.p90,
"galilar": user.galilar,
"ak47": user.ak47,
"scar20": user.scar20,
"famas": user.famas,
"m4a1": user.m4a1,
"m4a1_silencer": user.m4a1_silencer,
"aug": user.aug,
"ssg08": user.ssg08,
"sg556": user.sg556,
"awp": user.awp,
"g3sg1": user.g3sg1,
"m249": user.m249,
"negev": user.negev,
"hegrenade": user.hegrenade,
"inferno": user.inferno,
"playtime": playtime
}
return render(request, 'gamestatistics/stats-retake-player.html', stats)
|
BBS_V1 = {
"@context": {
"@version": 1.1,
"id": "@id",
"type": "@type",
"BbsBlsSignature2020": {
"@id": "https://w3id.org/security#BbsBlsSignature2020",
"@context": {
"@version": 1.1,
"@protected": True,
"id": "@id",
"type": "@type",
"challenge": "https://w3id.org/security#challenge",
"created": {
"@id": "http://purl.org/dc/terms/created",
"@type": "http://www.w3.org/2001/XMLSchema#dateTime",
},
"domain": "https://w3id.org/security#domain",
"proofValue": "https://w3id.org/security#proofValue",
"nonce": "https://w3id.org/security#nonce",
"proofPurpose": {
"@id": "https://w3id.org/security#proofPurpose",
"@type": "@vocab",
"@context": {
"@version": 1.1,
"@protected": True,
"id": "@id",
"type": "@type",
"assertionMethod": {
"@id": "https://w3id.org/security#assertionMethod",
"@type": "@id",
"@container": "@set",
},
"authentication": {
"@id": "https://w3id.org/security#authenticationMethod",
"@type": "@id",
"@container": "@set",
},
},
},
"verificationMethod": {
"@id": "https://w3id.org/security#verificationMethod",
"@type": "@id",
},
},
},
"BbsBlsSignatureProof2020": {
"@id": "https://w3id.org/security#BbsBlsSignatureProof2020",
"@context": {
"@version": 1.1,
"@protected": True,
"id": "@id",
"type": "@type",
"challenge": "https://w3id.org/security#challenge",
"created": {
"@id": "http://purl.org/dc/terms/created",
"@type": "http://www.w3.org/2001/XMLSchema#dateTime",
},
"domain": "https://w3id.org/security#domain",
"nonce": "https://w3id.org/security#nonce",
"proofPurpose": {
"@id": "https://w3id.org/security#proofPurpose",
"@type": "@vocab",
"@context": {
"@version": 1.1,
"@protected": True,
"id": "@id",
"type": "@type",
"sec": "https://w3id.org/security#",
"assertionMethod": {
"@id": "https://w3id.org/security#assertionMethod",
"@type": "@id",
"@container": "@set",
},
"authentication": {
"@id": "https://w3id.org/security#authenticationMethod",
"@type": "@id",
"@container": "@set",
},
},
},
"proofValue": "https://w3id.org/security#proofValue",
"verificationMethod": {
"@id": "https://w3id.org/security#verificationMethod",
"@type": "@id",
},
},
},
"Bls12381G1Key2020": "https://w3id.org/security#Bls12381G1Key2020",
"Bls12381G2Key2020": "https://w3id.org/security#Bls12381G2Key2020",
}
}
|
#!/usr/bin/env python3
import gemmi
import copy
import numpy as np
from scipy.spatial import distance
import string
from rdkit import Chem
from rdkit.Geometry import Point3D
import warnings
from . import xyz2mol
import argparse
def parse_arguments():
"""
Parse command line arguments.
"""
parser = argparse.ArgumentParser(
description="Python script for building supercell that can be used with PBC in MD simulation."
)
parser.add_argument(
'--input',
"-i",
type=str,
help="Input cif file",
required=True
)
parser.add_argument(
'--prefix',
"-pre",
type=str,
help="Prefix for pdb and csv file.",
required=True
)
parser.add_argument(
'--a_min_max',
"-a",
type=float,
help="If this is two arguments: Minimum and maximum unit cell replicates along direction `a`. \
If this is a single argument: Minimum length of axis `a` in the supercell. Units [nm]",
required=False,
default=[-1,1],
nargs='+',
)
parser.add_argument(
'--b_min_max',
"-b",
type=float,
help="If this is two arguments: Minimum and maximum unit cell replicates along direction `b`. \
If this is a single argument: Minimum length of axis `b` in the supercell. Units [nm]",
required=False,
default=[-1,1],
nargs='+',
)
parser.add_argument(
'--c_min_max',
"-c",
type=float,
help="If this is two arguments: Minimum and maximum unit cell replicates along direction `c`. \
If this is a single argument: Minimum length of axis `c` in the supercell. Units [nm]",
required=False,
default=[-1,1],
nargs='+',
)
parser.add_argument(
'--addhs',
"-ah",
action='store_true',
help="Remove any existing hydrogen and add protonate molecule. Requires OpenEye Toolkits.",
required=False,
default=False,
)
parser.add_argument(
'--addwater',
"-aw",
type=int,
help="Number of water molecules to add",
required=False,
default=0,
)
parser.add_argument(
'--use_symmetry_operations',
"-op",
action='store_true',
help="Use symmetry operations in cif file instead of space group.",
required=False,
default=False,
)
parser.add_argument(
'--n_protonation_attempts',
"-np",
type=int,
help="Number of attempts to compute protonatation states in unit cell.",
required=False,
default=0,
)
parser.add_argument(
'--use_openeye',
"-oe",
action='store_true',
help="Use openeye-toolkit for topology building. Otherwise use xyz2mol.",
required=False,
default=False,
)
return parser.parse_args()
def combine_mols(mol_list):
mol_list = copy.deepcopy(mol_list)
N_mol_per_unitcell = len(mol_list)
mol_combo = Chem.Mol()
for mol in mol_list:
mol_combo = Chem.CombineMols(mol_combo, mol)
return mol_combo
class FfWrapper(object):
def __init__(self, mol, only_hydrogen=True):
from rdkit.Chem import AllChem as Chem
self._mol = copy.deepcopy(mol)
mp = Chem.MMFFGetMoleculeProperties(mol)
self._ffm = Chem.MMFFGetMoleculeForceField(mol, mp)
self._H_atom_list = list()
self._all_atom_list = list()
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 1:
self._H_atom_list.append(atom.GetIdx())
self._all_atom_list.append(atom.GetIdx())
self._H_atom_list = np.array(self._H_atom_list)
self._H_idxs = np.arange(mol.GetNumAtoms()*3, dtype=int)
self._H_idxs = self._H_idxs.reshape((mol.GetNumAtoms(), 3))
self._H_idxs = self._H_idxs[self._H_atom_list]
self._H_idxs_flat = self._H_idxs.flatten()
self._all_atom_list = np.array(self._all_atom_list)
self._all_idxs = np.arange(mol.GetNumAtoms()*3, dtype=int)
self._all_idxs = self._all_idxs.reshape((mol.GetNumAtoms(), 3))
self._all_idxs_flat = self._H_idxs.flatten()
self._num_all_atoms = len(self._all_atom_list)
self._num_H_atoms = len(self._H_atom_list)
self.only_hydrogen = only_hydrogen
@property
def num_all_atoms(self):
return self._num_all_atoms
@property
def num_H_atoms(self):
return self._num_H_atoms
@property
def H_atom_list(self):
return self._H_atom_list
@property
def H_idxs(self):
return self._H_idxs
@property
def H_idxs_flat(self):
return self._H_idxs_flat
@property
def all_atom_list(self):
return self._all_atom_list
@property
def all_idxs(self):
return self._all_idxs
@property
def all_idxs_flat(self):
return self._all_idxs_flat
@property
def ffm(self):
return self._ffm
@property
def mol(self):
return self._mol
@property
def pos(self):
pos = np.array(self.ffm.Positions())
if self.only_hydrogen:
pos = pos[self.H_idxs_flat].tolist()
return pos
@pos.setter
def pos(self, pos):
from rdkit.Chem import AllChem as Chem
conformer = self._mol.GetConformer()
if self.only_hydrogen:
pos_stack = np.array(pos).reshape((self.num_H_atoms, 3))
for H_atm_idx, atm_idx in enumerate(self.H_atom_list):
conformer.SetAtomPosition(
int(atm_idx),
Point3D(
*pos_stack[H_atm_idx]
)
)
else:
pos_stack = np.array(pos).reshape((self.num_all_atoms, 3))
for atm_idx in range(self._mol.GetNumAtoms()):
conformer.SetAtomPosition(
int(atm_idx),
Point3D(
*pos_stack[atm_idx]
)
)
mp = Chem.MMFFGetMoleculeProperties(self._mol)
self._ffm = Chem.MMFFGetMoleculeForceField(self._mol, mp)
def ene(self,x):
pos = self.ffm.Positions()
pos = np.array(pos)
if self.only_hydrogen:
pos[self.H_idxs_flat] = x
else:
pos[:] = x
pos = pos.tolist()
return self.ffm.CalcEnergy(pos)
def grad(self,x):
pos = self.ffm.Positions()
pos = np.array(pos)
if self.only_hydrogen:
pos[self.H_idxs_flat] = x
else:
pos[:] = x
pos = pos.tolist()
grad_vals = self.ffm.CalcGrad(pos)
grad_vals = np.array(grad_vals)[self.H_idxs_flat]
grad_vals = grad_vals.tolist()
return grad_vals
def apply_delta_hydrogen(
mol_list,
delta_hydrogen_dict
):
import numpy as np
from rdkit.Chem import AllChem as Chem
import copy
mol_combo = combine_mols(mol_list)
emol = Chem.EditableMol(mol_combo)
offset_list = np.zeros(mol_combo.GetNumAtoms(), dtype=int)
for atomidx, switch in delta_hydrogen_dict.items():
atom = mol_combo.GetAtomWithIdx(atomidx)
charge = atom.GetFormalCharge()
if switch == 1:
Hatom = Chem.Atom(1)
idx1 = emol.AddAtom(Hatom)
Patom = Chem.AtomFromSmarts(atom.GetSmarts())
Patom.SetFormalCharge(charge+1)
emol.ReplaceAtom(
atomidx+int(offset_list[atomidx]),
Patom
)
emol.AddBond(
idx1,
atomidx+int(offset_list[atomidx]),
Chem.BondType.SINGLE
)
elif switch == -1:
for n_atom in atom.GetNeighbors():
if n_atom.GetAtomicNum() == 1:
idx1 = n_atom.GetIdx()
Patom = Chem.AtomFromSmarts(atom.GetSmarts())
Patom.SetFormalCharge(charge-1)
emol.ReplaceAtom(
atomidx+int(offset_list[atomidx]),
Patom
)
emol.RemoveAtom(
idx1+int(offset_list[idx1])
)
offset_list[idx1:] -= 1
break
else:
### Nix.
pass
mol_combo_prot = emol.GetMol()
Chem.SanitizeMol(mol_combo_prot)
return Chem.GetMolFrags(mol_combo_prot, asMols=True)
def minimize_H(
mol_list,
):
import numpy as np
from rdkit.Chem import AllChem as Chem
from scipy import optimize
import copy
mol_combo = combine_mols(mol_list)
ff = FfWrapper(mol_combo, only_hydrogen=True)
x0 = ff.pos
result = optimize.minimize(
fun=ff.ene,
x0=x0,
jac=ff.grad,
method="L-BFGS-B"
)
ff.pos = result.x
energy = ff.ene(result.x)
return energy, Chem.GetMolFrags(ff.mol, asMols=True)
def assign_protonation_states(
cell,
mol_list,
N_iterations=10):
"""
Find the energetically best protonation state of the unitcell.
"""
import numpy as np
from rdkit.Chem import AllChem as Chem
import copy
mol_combo = combine_mols(mol_list)
polar_heavy_atom = Chem.MolFromSmarts("[#7,#8,#16]")
matches = mol_combo.GetSubstructMatches(
polar_heavy_atom,
uniquify=False
)
N_matches = len(matches)
delta_hydrogen_dict_best = dict()
for atm_idx in matches:
atm_idx = int(atm_idx[0])
delta_hydrogen_dict_best[atm_idx] = 0
energy_best = 999999999999999999999.
mol_list_best = copy.deepcopy(mol_list)
N_mol_per_unitcell = len(mol_list)
charge0 = Chem.GetFormalCharge(mol_combo)
#with open(f"./best_init.pdb", "w") as fopen:
# fopen.write(
# Chem.MolToPDBBlock(
# combine_mols(
# mol_list_best
# )
# )
# )
#count = 0
for _ in range(N_iterations):
delta_hydrogen_dict = copy.deepcopy(
delta_hydrogen_dict_best
)
for i in range(N_matches):
atm_idx_i = int(matches[i][0])
delta0_i = delta_hydrogen_dict[atm_idx_i]
for j in range(i+1, N_matches):
atm_idx_j = int(matches[j][0])
delta0_j = delta_hydrogen_dict[atm_idx_j]
for dij in [[-1,1],[1,-1]]:
### 0: Do nothing
### +1: Add hydrogen
### -1: Remove hydrogen
delta_hydrogen_dict[atm_idx_i] = dij[0]
delta_hydrogen_dict[atm_idx_j] = dij[1]
mol_list_prot = apply_delta_hydrogen(
copy.deepcopy(mol_list),
delta_hydrogen_dict
)
charge = Chem.GetFormalCharge(
combine_mols(mol_list_prot)
)
if charge == charge0:
mol_list_prot, _, _ = make_supercell(
cell,
mol_list_prot,
0, 2,
0, 2,
0, 2)
energy, mol_list_prot = minimize_H(mol_list_prot)
if energy < energy_best:
energy_best = energy
delta_hydrogen_dict_best = copy.deepcopy(
delta_hydrogen_dict
)
mol_list_best = copy.deepcopy(
mol_list_prot[:N_mol_per_unitcell]
)
delta_hydrogen_dict[atm_idx_j] = delta0_j
delta_hydrogen_dict[atm_idx_i] = delta0_i
#print(f"Best energy {energy_best:4.2f}")
#with open(f"./best_{count}.pdb", "w") as fopen:
# fopen.write(
# Chem.MolToPDBBlock(
# combine_mols(
# mol_list_best
# )
# )
# )
#count += 1
return mol_list_best
def get_nonoverlapping_atoms(atom_crds_ortho, filter_overlapping=False):
"""
Retrieve indices of rows in `atom_crds_ortho` (N,3) list
that correspond to non-overlapping atoms.
If `filter_overlapping==True`, then one of the overlapping (not clear
which one though) atoms will be retained.
"""
from scipy.spatial import distance
atom_crds_ortho_cp = np.copy(atom_crds_ortho)
dists = distance.cdist(atom_crds_ortho_cp, atom_crds_ortho_cp)
np.fill_diagonal(dists, np.inf)
if filter_overlapping:
tril_idxs = np.tril_indices(atom_crds_ortho.shape[0])
dists[tril_idxs] = np.inf
invalids = np.where(dists < 0.01)[0]
invalids = np.unique(invalids)
valids = np.arange(atom_crds_ortho_cp.shape[0], dtype=int)
valids = np.delete(valids, invalids)
return valids
def random_fill(
cell,
mol_list,
N_per_unitcell,
radius=1.4,
smiles="[H]O[H]"):
"""
Randomly fill unit cell with molecules.
"""
from rdkit.Chem import AllChem as Chem
from rdkit.Geometry import Point3D
import numpy as np
from scipy.spatial import distance
import gemmi
import copy
vdw_dict = {
1 : 1.09, #H
6 : 1.7, #C
7 : 1.55, #N
8 : 1.52, #O
9 : 1.47, #F
15 : 1.8, #P
16 : 1.8, #S
17 : 1.75 #Cl
}
mol = Chem.MolFromSmiles(smiles)
mol = Chem.AddHs(mol)
Chem.EmbedMolecule(mol)
Chem.UFFOptimizeMolecule(mol)
conformer = mol.GetConformer()
atom_crds_mol = conformer.GetPositions()
atom_crds_mol = np.array(atom_crds_mol)
replicated_mol_list, _, _ = make_supercell(
cell,
mol_list,
-1, 1,
-1, 1,
-1, 1)
atom_crds_xtal = list()
atom_radii_xtal = list()
for mol_r in replicated_mol_list:
conf = mol_r.GetConformer(0)
conf_pos = conf.GetPositions()
for atom in mol_r.GetAtoms():
if atom.GetAtomicNum() == 1:
continue
atom_radii_xtal.append(
vdw_dict[atom.GetAtomicNum()]
)
atom_crds_xtal.append(
conf_pos[atom.GetIdx()]
)
atom_crds_xtal = np.array(atom_crds_xtal)
atom_radii_xtal = np.array(atom_radii_xtal)
atom_radii_xtal += radius
grid = list()
for a in np.linspace(0., 1., 50, True):
for b in np.linspace(0., 1., 50, True):
for c in np.linspace(0., 1., 50, True):
frac = np.array([a,b,c], dtype=float)
ortho = cell.orthogonalize(
gemmi.Fractional(*frac)
).tolist()
dists = distance.cdist(atom_crds_xtal, [ortho])
is_outside = np.all(dists[:,0] > atom_radii_xtal)
if is_outside:
grid.append(ortho)
overlap = True
grid = np.array(grid)
print(f"Found {grid.shape[0]} / {50**3} valid grid points.")
print("Scanning overlap...")
while overlap:
mask = np.arange(grid.shape[0], dtype=int)
mask_selection = np.random.choice(
mask,
size=N_per_unitcell,
replace=False
)
grid_selection = grid[mask_selection]
grid_selection_query = np.copy(grid_selection).tolist()
for crd in grid_selection:
frac = cell.fractionalize(
gemmi.Position(
*crd
)
).tolist()
frac = np.array(frac)
for a in [-1.,0.,1.]:
for b in [-1.,0.,1.]:
for c in [-1.,0.,1.]:
if (a==0) & (b==0) & (c==0):
continue
frac += [a,b,c]
ortho = cell.orthogonalize(
gemmi.Fractional(*frac)
).tolist()
grid_selection_query.append(ortho)
frac -= [a,b,c]
grid_selection_query = np.array(grid_selection_query)
dists = distance.cdist(grid_selection_query, grid_selection_query)
np.fill_diagonal(dists, np.inf)
min_dist = np.min(dists)
if min_dist > 2.*radius:
overlap = False
import copy
mol_list_new = copy.deepcopy(mol_list)
for crds in grid_selection:
mol_cp = copy.deepcopy(mol)
conformer = mol_cp.GetConformer()
atom_crds_mol = conformer.GetPositions()
trans = crds - np.mean(atom_crds_mol, axis=0)
atom_crds_mol += trans
for atm_idx in range(mol_cp.GetNumAtoms()):
conformer.SetAtomPosition(
atm_idx,
Point3D(*atom_crds_mol[atm_idx])
)
mol_list_new.append(mol_cp)
return mol_list_new
def make_P1(
cell,
atom_crds_ortho,
atom_num,
addhs=False,
use_openeye=False):
"""
Generate the P1 cell. Return tuple with atomic coordinates (in Ang) and
atomic numbers of all atoms in P1 cell.
"""
import networkx as nx
from rdkit.Chem import AllChem as Chem
from rdkit.Geometry import Point3D
import copy
_atom_crds_ortho = list()
_atom_num = list()
N_atoms = len(atom_num)
for i in range(N_atoms):
if addhs:
if atom_num[i] != 1:
_atom_crds_ortho.append(copy.copy(atom_crds_ortho[i]))
_atom_num.append(copy.copy(atom_num[i]))
else:
_atom_crds_ortho.append(copy.copy(atom_crds_ortho[i]))
_atom_num.append(copy.copy(atom_num[i]))
atom_crds_ortho = _atom_crds_ortho
atom_num = _atom_num
atom_crds_ortho = np.array(atom_crds_ortho, dtype=float)
atom_num = np.array(atom_num, dtype=int)
nonoverlapping_idxs = get_nonoverlapping_atoms(atom_crds_ortho, filter_overlapping=True)
atom_crds_ortho = atom_crds_ortho[nonoverlapping_idxs].tolist()
atom_num = atom_num[nonoverlapping_idxs].tolist()
N_atoms = len(atom_num)
found_bond = True
### Terminate if we haven't found any bonds.
while found_bond:
### Update the frac coordinates
atom_crds_frac = list()
for atm_idx in range(N_atoms):
frac = cell.fractionalize(
gemmi.Position(
*atom_crds_ortho[atm_idx]
)
)
atom_crds_frac.append(frac.tolist())
### Get the "pre-molecules" (adjacency matrix with not much chemistry)
acmatrix_new, _ = xyz2mol.xyz2AC(
atom_num,
atom_crds_ortho,
0,
)
acmatrix_best = acmatrix_new
### Find the disconnected graphs from the adjacency matrix
G = nx.convert_matrix.from_numpy_matrix(acmatrix_new)
G_node_list = list(nx.connected_components(G))
### Translate molecules to neighboring unit cells in + direction
### and check if we can form new bonds. If yes, update `atom_crds_ortho`
found_bond = False
for g in G_node_list:
for a in [0,-1]:
for b in [0,-1]:
for c in [0,-1]:
atom_crds_ortho_cp = copy.deepcopy(atom_crds_ortho)
for atm_idx in g:
frac = copy.deepcopy(atom_crds_frac[atm_idx])
frac[0] += a
frac[1] += b
frac[2] += c
ortho = cell.orthogonalize(
gemmi.Fractional(*frac)
).tolist()
if not ortho in atom_crds_ortho_cp:
atom_crds_ortho_cp[atm_idx] = ortho
acmatrix_new, _ = xyz2mol.xyz2AC(
atom_num,
atom_crds_ortho,
0,
)
if not np.all(acmatrix_new == acmatrix_best) and np.sum(acmatrix_new) >= np.sum(acmatrix_best):
nonoverlapping_idxs = get_nonoverlapping_atoms(atom_crds_ortho_cp)
if nonoverlapping_idxs.size == N_atoms:
atom_crds_ortho = copy.deepcopy(atom_crds_ortho_cp)
acmatrix_best = acmatrix_new
found_bond = True
acmatrix, _ = xyz2mol.xyz2AC(
atom_num,
atom_crds_ortho,
0,
)
G = nx.convert_matrix.from_numpy_matrix(acmatrix)
G_node_list = list(nx.connected_components(G))
atom_num = np.array(atom_num, dtype=int)
atom_crds_ortho = np.array(atom_crds_ortho)
mol_list = list()
for g in G_node_list:
g = list(g)
_, mol = xyz2mol.xyz2AC(
atom_num[g].tolist(),
atom_crds_ortho[g].tolist(),
0)
mol_list.append(mol)
N_mol = len(mol_list)
atom_crds_ortho = list()
atom_num = list()
mol_list_new = list()
for mol_idx in range(N_mol):
mol = mol_list[mol_idx]
conf = mol.GetConformer()
conf_pos = conf.GetPositions()
frac_crds = list()
for pos in conf_pos:
frac_crds.append(
cell.fractionalize(
gemmi.Position(
*pos
)
).tolist()
)
frac_crds = np.array(frac_crds)
valid_atoms = np.where(
(frac_crds[:,0] > 0.) * (frac_crds[:,0] < 1.) *\
(frac_crds[:,1] > 0.) * (frac_crds[:,1] < 1.) *\
(frac_crds[:,2] > 0.) * (frac_crds[:,2] < 1.)
)[0]
### If no atom is in uc, bring the molecule into unit cell
if valid_atoms.size == 0:
is_inside = False
for a in [0,-1,1]:
for b in [0,-1,1]:
for c in [0,-1,1]:
frac_crds += [a,b,c]
valid_atoms = np.where(
(frac_crds[:,0] > 0.) * (frac_crds[:,0] < 1.) *\
(frac_crds[:,1] > 0.) * (frac_crds[:,1] < 1.) *\
(frac_crds[:,2] > 0.) * (frac_crds[:,2] < 1.)
)[0]
if valid_atoms.size != 0:
is_inside = True
else:
frac_crds -= [a,b,c]
if is_inside:
break
if is_inside:
break
if is_inside:
break
if not is_inside:
continue
frac_crds_query = np.copy(frac_crds[valid_atoms[0]])
### Check for overlap
overlap = False
for a in [0,-1,1]:
for b in [0,-1,1]:
for c in [0,-1,1]:
frac_crds_query += [a,b,c]
ortho = cell.orthogonalize(
gemmi.Fractional(*frac_crds_query)
).tolist()
if len(atom_crds_ortho) > 0:
dists = distance.cdist([ortho], atom_crds_ortho)
valids = np.where(dists < 0.01)[0]
if valids.size > 0:
overlap = True
frac_crds_query -= [a,b,c]
if not overlap:
for atm_idx, frac in enumerate(frac_crds):
ortho = cell.orthogonalize(
gemmi.Fractional(*frac)
).tolist()
atom_crds_ortho.append(ortho)
rd_atom = mol.GetAtomWithIdx(atm_idx)
atom_num.append(rd_atom.GetAtomicNum())
conf.SetAtomPosition(
atm_idx,
Point3D(*ortho)
)
mol_list_new.append(mol)
mol_list = list()
if addhs:
if use_openeye:
import warnings
warnings.warn("With addhs=True, we automatically set use_openeye=True.")
from openeye import oechem
from openeye import oequacpac
from xtalmdscripts.supercellbuilding.oe_utils import rdmol_from_oemol
from xtalmdscripts.supercellbuilding.oe_utils import oemol_from_rdmol
count = 0
for mol in mol_list_new:
oemol = oechem.OEMol()
oemol.SetDimension(3)
conf_pos = mol.GetConformer(0).GetPositions()
crds = list()
for atm_idx in range(mol.GetNumAtoms()):
atom = mol.GetAtomWithIdx(atm_idx)
oemol.NewAtom(int(atom.GetAtomicNum()))
crds.extend(conf_pos[atm_idx])
oemol.SetCoords(crds)
oechem.OEAssignAromaticFlags(oemol)
oechem.OEDetermineConnectivity(oemol)
oechem.OEFindRingAtomsAndBonds(oemol)
oechem.OEPerceiveBondOrders(oemol)
oechem.OE3DToInternalStereo(oemol)
oechem.OEPerceiveChiral(oemol)
oechem.OEAssignImplicitHydrogens(oemol)
oechem.OEAssignFormalCharges(oemol)
oequacpac.OERemoveFormalCharge(oemol)
oechem.OEAddExplicitHydrogens(oemol)
oechem.OEAssignAromaticFlags(oemol)
mol = rdmol_from_oemol(oemol)
Chem.AssignStereochemistryFrom3D(mol)
mol_list.append(mol)
#with open(f"./test_{count}.pdb", "w") as fopen:
# fopen.write(Chem.MolToPDBBlock(mol))
count += 1
else:
if use_openeye:
from openeye import oechem
from openeye import oequacpac
from xtalmdscripts.supercellbuilding.oe_utils import rdmol_from_oemol
from xtalmdscripts.supercellbuilding.oe_utils import oemol_from_rdmol
count = 0
for mol in mol_list_new:
oemol = oechem.OEMol()
oemol.SetDimension(3)
conf_pos = mol.GetConformer(0).GetPositions()
crds = list()
for atm_idx in range(mol.GetNumAtoms()):
atom = mol.GetAtomWithIdx(atm_idx)
oemol.NewAtom(int(atom.GetAtomicNum()))
crds.extend(conf_pos[atm_idx])
oemol.SetCoords(crds)
oechem.OEDetermineConnectivity(oemol)
oechem.OEFindRingAtomsAndBonds(oemol)
oechem.OEPerceiveBondOrders(oemol)
oechem.OE3DToInternalStereo(oemol)
oechem.OEPerceiveChiral(oemol)
oechem.OEAssignFormalCharges(oemol)
oechem.OEAssignAromaticFlags(oemol)
mol = rdmol_from_oemol(oemol)
Chem.AssignStereochemistryFrom3D(mol)
mol_list.append(mol)
#with open(f"./test_{count}.pdb", "w") as fopen:
# fopen.write(Chem.MolToPDBBlock(mol))
count += 1
else:
acmatrix, _ = xyz2mol.xyz2AC(
atom_num,
atom_crds_ortho,
0,
)
G = nx.convert_matrix.from_numpy_matrix(acmatrix)
G_node_list = list(nx.connected_components(G))
atom_num = np.array(atom_num, dtype=int)
atom_crds_ortho = np.array(atom_crds_ortho)
for g in G_node_list:
g = list(g)
mol = Chem.GetMolFrags(
xyz2mol.xyz2mol(
atom_num[g].tolist(),
atom_crds_ortho[g].tolist(),
charge=0)[0],
asMols=True
)[0]
mol_list.append(mol)
#strc_write = gemmi.Structure()
#strc_write.spacegroup_hm = "P1"
#strc_write.cell = cell
#with open("./make_p1_test.pdb", "w") as fopen:
# fopen.write(get_pdb_block(mol_list, strc_write))
return mol_list
def make_supercell(
cell,
mol_list,
a_min, a_max,
b_min, b_max,
c_min, c_max):
"""
Generate supercell based specified parameters. Assuming that `atom_crds_ortho`
and `atom_num` are for P1 cell. See method `make_P1`. Returns list of rdkit mol
objects with all molecules in supercell, list containing an int that is unique
among the molecules in a unit cell, list containing the frac coordinates of the
unit cell origins in the basis of the supercell.
"""
a_replicate = np.arange(a_min,a_max+1, dtype=int)
b_replicate = np.arange(b_min,b_max+1, dtype=int)
c_replicate = np.arange(c_min,c_max+1, dtype=int)
N_mol = len(mol_list)
replicated_mol_list = list()
mol_identifies = list()
unitcell_in_supercell_fracs = list()
for a in a_replicate:
for b in b_replicate:
for c in c_replicate:
for mol_idx in range(N_mol):
mol = copy.deepcopy(mol_list[mol_idx])
conf = mol.GetConformer(0)
conf_pos = conf.GetPositions()
N_atoms = mol.GetNumAtoms()
for atom_idx in range(N_atoms):
pos = conf_pos[atom_idx]
frac_pos = cell.fractionalize(
gemmi.Position(*pos)
)
frac_pos.x += a
frac_pos.y += b
frac_pos.z += c
conf_pos_abc = cell.orthogonalize(frac_pos).tolist()
conf.SetAtomPosition(
atom_idx,
Point3D(*conf_pos_abc)
)
replicated_mol_list.append(mol)
conf = mol.GetConformer(0)
mol_identifies.append(mol_idx)
unitcell_in_supercell_fracs.append([a,b,c])
return replicated_mol_list, mol_identifies, unitcell_in_supercell_fracs
def clean_names(mol_list):
"""
Uniquify atom and residue names. Equalize residue names for
chemically equal residues.
"""
import copy
mol_list_new = list()
N_mol = len(mol_list)
for mol_idx in range(N_mol):
mol = copy.deepcopy(mol_list[mol_idx])
atom_counts_dict = dict()
for atom in mol.GetAtoms():
mi = Chem.AtomPDBResidueInfo()
mi.SetIsHeteroAtom(True)
mi.SetResidueName(f'M{mol_idx}'.ljust(3))
mi.SetResidueNumber(mol_idx + 1)
mi.SetOccupancy(1.0)
mi.SetTempFactor(0.0)
atomic_num = atom.GetAtomicNum()
atomic_ele = atom.GetSymbol()
if not atomic_num in atom_counts_dict:
atom_counts_dict[atomic_num] = 1
else:
atom_counts_dict[atomic_num] += 1
mi.SetName(
f"{atomic_ele}{atom_counts_dict[atomic_num]}".ljust(4)
)
atom.SetMonomerInfo(mi)
mol_list_new.append(mol)
return mol_list_new
def get_unique_mapping(
mol_list,
stereochemistry=True
):
"""
Get unique mapping dict and list of unique rdkit mol objects in list of rdkit mol objects.
if `stereochemistry=True`, the mapping will honor stereochemistry.
"""
N_mol = len(mol_list)
smiles_list = [Chem.MolToSmiles(mol, isomericSmiles=stereochemistry) for mol in mol_list]
smiles_list_unique = set(smiles_list)
smiles_list_unique = list(smiles_list_unique)
rdmol_list_unique = list()
unique_mapping = dict()
for smiles_unique_idx, smiles_unique in enumerate(smiles_list_unique):
found_unique = False
for mol_idx in range(N_mol):
mol = mol_list[mol_idx]
smiles = smiles_list[mol_idx]
if smiles == smiles_unique:
if not found_unique:
rdmol_list_unique.append(mol)
found_unique = True
unique_mapping[mol_idx] = smiles_unique_idx
else:
unique_mapping[mol_idx] = smiles_unique_idx
assert len(unique_mapping) == N_mol
return unique_mapping, rdmol_list_unique
def equalize_rdmols(
mol_list,
stereochemistry=True
):
"""
Get list of rdkit mol objects in which all chemically idential mol objects
have identical topology and pdb monomer info. Only difference are coordinates.
If `stereochemistry=True` it will honor stereochemistry.
"""
import copy
unique_mapping, rdmol_list_unique = get_unique_mapping(mol_list, stereochemistry)
mol_list_new = copy.deepcopy(mol_list)
for mol_idx in unique_mapping:
if mol_idx == unique_mapping[mol_idx]:
mol_info = copy.deepcopy(rdmol_list_unique[unique_mapping[mol_idx]])
for mol_info_atm_idx in range(mol_info.GetNumAtoms()):
mi = mol_info.GetAtomWithIdx(mol_info_atm_idx).GetMonomerInfo()
mi.SetResidueName(f'M{unique_mapping[mol_idx]}'.ljust(3))
mi.SetResidueNumber(mol_idx + 1)
mol_info.GetAtomWithIdx(mol_info_atm_idx).SetMonomerInfo(mi)
else:
### This is the molecule that holds the correct coordinates
### and pdb monomer info.
mol_crds = copy.deepcopy(mol_list[mol_idx])
### This is the molecule that holds the correct names, ordering, etc...
mol_info = copy.deepcopy(rdmol_list_unique[unique_mapping[mol_idx]])
match = mol_crds.GetSubstructMatch(mol_info, useChirality=stereochemistry)
conf_pos_crds = mol_crds.GetConformer(0).GetPositions()
conf_info = mol_info.GetConformer(0)
for mol_info_atm_idx, mol_crds_atm_idx in enumerate(match):
pos = conf_pos_crds[mol_crds_atm_idx]
conf_info.SetAtomPosition(
mol_info_atm_idx,
Point3D(*pos)
)
### Note, we cannot `copy.copy(mi_original)`
### or `copy.copy(mol_target.GetAtomWithIdx(atm_idx))`
mi = mol_info.GetAtomWithIdx(mol_info_atm_idx).GetMonomerInfo()
mi.SetResidueName(f'M{unique_mapping[mol_idx]}'.ljust(3))
mi.SetResidueNumber(mol_idx + 1)
mol_info.GetAtomWithIdx(mol_info_atm_idx).SetMonomerInfo(mi)
mol_list_new[mol_idx] = mol_info
return mol_list_new
def generate_replicated_mol_list(
cell,
atom_crds_ortho,
atom_num,
a_min_max,
b_min_max,
c_min_max,
addhs=False,
protonate_unitcell=True,
addwater=0,
N_iterations_protonation=0,
use_openeye=False,
):
"""
Generate rdkit mol object list for molecules in supercell. supercell is generated
according input parameters.
"""
mol_list = make_P1(cell, atom_crds_ortho, atom_num, addhs, use_openeye)
if N_iterations_protonation > 0:
mol_list = assign_protonation_states(
cell=cell,
mol_list=mol_list,
N_iterations=N_iterations_protonation
)
if addwater > 0:
mol_list = random_fill(
cell,
mol_list,
N_per_unitcell=addwater,
radius=0.5,
smiles="O"
)
mol_list = clean_names(mol_list)
replicated_mol_list, mol_identifies, unitcell_in_supercell_fracs = make_supercell(
cell,
mol_list,
a_min_max[0], a_min_max[1],
b_min_max[0], b_min_max[1],
c_min_max[0], c_min_max[1],
)
replicated_mol_list = equalize_rdmols(replicated_mol_list)
return replicated_mol_list, mol_identifies, unitcell_in_supercell_fracs
def get_pdb_block(
replicated_mol_list,
strc_write):
"""
Get pdb block as str. strc_write is gemmi structure object and must reflect
the dimensions of the supercell.
"""
### Combine all rdmols in a single big rdmol
N_mol = len(replicated_mol_list)
mol_new = Chem.Mol()
for mol_idx in range(N_mol):
mol = copy.deepcopy(replicated_mol_list[mol_idx])
mol_new = Chem.CombineMols(mol_new, mol)
header = strc_write.make_pdb_headers()
### With the flavor options, one can control what is written
### to the pdb block.
###
### flavor: (optional)
### flavor & 1 : Write MODEL/ENDMDL lines around each record
### flavor & 2 : Don’t write any CONECT records
### flavor & 4 : Write CONECT records in both directions
### flavor & 8 : Don’t use multiple CONECTs to encode bond order
### flavor & 16 : Write MASTER record
### flavor & 32 : Write TER record
crds_block = Chem.MolToPDBBlock(mol_new, flavor=8|32)
pdb_block = header + crds_block
return pdb_block
def get_pdb_str(
replicated_mol_list,
strc,
a_min_max,
b_min_max,
c_min_max):
"""
Get full pdb file as str.
"""
import gemmi
### Write pdb file
### ==============
a_len = np.max(a_min_max) - np.min(a_min_max) + 1.
b_len = np.max(b_min_max) - np.min(b_min_max) + 1.
c_len = np.max(c_min_max) - np.min(c_min_max) + 1.
strc_write = gemmi.Structure()
strc_write.spacegroup_hm = strc.spacegroup_hm
strc_write.cell = gemmi.UnitCell(
strc.cell.a * a_len,
strc.cell.b * b_len,
strc.cell.c * c_len,
strc.cell.alpha,
strc.cell.beta,
strc.cell.gamma
)
pdb_block = get_pdb_block(replicated_mol_list, strc_write)
return pdb_block
def parse_cif(
cif_path,
use_symmetry_operations=False
):
"""
Parse cif file as gemmis structure object.
"""
import gemmi
doc = gemmi.cif.read(cif_path)[0]
strc = gemmi.make_small_structure_from_block(doc)
### finding it by number is much better
### Sometimes the HM name cannot be found by gemmi.
table_number = -1
for item in doc:
if item.pair == None:
continue
key, value = item.pair
if "_symmetry_Int_Tables_number".lower() == key.lower():
table_number=int(value)
break
if table_number > -1:
strc.spacegroup_hm = gemmi.find_spacegroup_by_number(table_number).hm
atom_crds_ortho = list()
atom_num = list()
if use_symmetry_operations:
op_list = doc.find_values('_symmetry_equiv_pos_as_xyz')
gops = gemmi.GroupOps([gemmi.Op(o) for o in op_list])
for site in strc.sites:
for op in gops:
pos_frac = op.apply_to_xyz(site.fract.tolist())
pos = strc.cell.orthogonalize(
gemmi.Fractional(
*pos_frac
)
).tolist()
atom_crds_ortho.append(pos)
atom_num.append(site.element.atomic_number)
else:
for site in strc.get_all_unit_cell_sites():
pos = strc.cell.orthogonalize(site.fract)
atom_crds_ortho.append([pos.x, pos.y, pos.z])
atom_num.append(site.element.atomic_number)
return strc, atom_crds_ortho, atom_num
def get_supercell_info_str(
mol_identifies,
unitcell_in_supercell_fracs
):
"""
Takes list of in unitcell molecule identifiers and unitcell in supercell
frac coordinates. See output generated by method `generate_replicated_mol_list`.
Returns csv formatted info string.
"""
info_str = "#Mol_idx,"
info_str += "mol_in_unitcell,"
info_str += "unitcell_in_supercell_a,"
info_str += "unitcell_in_supercell_b,"
info_str += "unitcell_in_supercell_c\n"
N_mols = len(mol_identifies)
for mol_idx in range(N_mols):
info_str += f"{mol_idx:d},"
info_str += f"{mol_identifies[mol_idx]:d},"
info_str += f"{unitcell_in_supercell_fracs[mol_idx][0]:d},"
info_str += f"{unitcell_in_supercell_fracs[mol_idx][1]:d},"
info_str += f"{unitcell_in_supercell_fracs[mol_idx][2]:d}\n"
return info_str
def get_replicated_mol_list_json(replicated_mol_list):
"""
Returns rdkit json string of collapsed replicated mol_list.
"""
from rdkit import Chem
mol_combo = Chem.Mol()
for mol in replicated_mol_list:
mol_combo = Chem.CombineMols(mol_combo, mol)
return Chem.MolToJSON(mol_combo)
def main():
"""
Run the workflow.
"""
import gemmi
args = parse_arguments()
strc, atom_crds_ortho, atom_num = parse_cif(args.input, args.use_symmetry_operations)
if len(args.a_min_max) == 2:
a_min_max = [int(args.a_min_max[0]), int(args.a_min_max[1])]
elif len(args.a_min_max) == 1:
uc_length_a = strc.cell.a * 0.1
a_min_max = [0, np.ceil(args.a_min_max[0] / uc_length_a)]
else:
raise ValueError(
"Argument a_min_max must be either pair (x,y) or single value (z)"
)
if len(args.b_min_max) == 2:
b_min_max = [int(args.b_min_max[0]), int(args.b_min_max[1])]
elif len(args.b_min_max) == 1:
uc_length_b = strc.cell.b * 0.1
b_min_max = [0, np.ceil(args.b_min_max[0] / uc_length_b)]
else:
raise ValueError(
"Argument b_min_max must be either pair (x,y) or single value (z)"
)
if len(args.c_min_max) == 2:
c_min_max = [int(args.c_min_max[0]), int(args.c_min_max[1])]
elif len(args.c_min_max) == 1:
uc_length_c = strc.cell.c * 0.1
c_min_max = [0, np.ceil(args.c_min_max[0] / uc_length_c)]
else:
raise ValueError(
"Argument c_min_max must be either pair (x,y) or single value (z)"
)
### Build the supercell as a set of rdkit molecule objects
### ======================================================
replicated_mol_list, mol_identifies, unitcell_in_supercell_fracs = generate_replicated_mol_list(
cell=strc.cell,
atom_crds_ortho=atom_crds_ortho,
atom_num=atom_num,
a_min_max=a_min_max,
b_min_max=b_min_max,
c_min_max=c_min_max,
addhs=args.addhs,
addwater=args.addwater,
N_iterations_protonation=args.n_protonation_attempts,
use_openeye=args.use_openeye
)
### Write pdb file
### ==============
strc_write = gemmi.Structure()
strc_write.spacegroup_hm = "P1"
strc_write.cell = strc.cell
pdb_str = get_pdb_str(
replicated_mol_list,
strc_write,
a_min_max,
b_min_max,
c_min_max
)
with open(f"{args.prefix}.pdb", "w") as fopen:
fopen.write(pdb_str)
with open(f"{args.prefix}.csv", "w") as fopen:
info_str = get_supercell_info_str(
mol_identifies,
unitcell_in_supercell_fracs
)
fopen.write(info_str)
with open(f"{args.prefix}.json", "w") as fopen:
json_str = get_replicated_mol_list_json(replicated_mol_list)
fopen.write(json_str)
### Generate list of unique smiles for unique
### molecules in UC
### =========================================
mol_list = make_P1(strc.cell, atom_crds_ortho, atom_num, args.addhs, args.use_openeye)
if args.addwater > 0:
random_fill(
strc.cell,
mol_list,
N_per_unitcell=args.addwater,
radius=0.5,
smiles="O"
)
unitcell_mol_list, _, _ = make_supercell(
strc.cell,
mol_list,
0,0,
0,0,
0,0,
)
from rdkit.Chem import Descriptors
unitcell_weight = 0.
smiles_list = list()
for mol in unitcell_mol_list:
unitcell_weight += Descriptors.MolWt(mol)
smiles_list.append(Chem.MolToSmiles(mol, isomericSmiles=True))
smiles_list = set(smiles_list)
smiles_list = list(smiles_list)
### Output final summary
### ====================
a_len = np.max(a_min_max) - np.min(a_min_max) + 1.
b_len = np.max(b_min_max) - np.min(b_min_max) + 1.
c_len = np.max(c_min_max) - np.min(c_min_max) + 1.
import gemmi
doc = gemmi.cif.read(args.input)[0]
cif_info_dict = {
"temperature" : "Not found",
"cell_setting" : "Not found",
"space_group" : "Not found",
"density" : "Not found"
}
for item in doc:
if item.pair == None:
continue
key, value = item.pair
if "_diffrn_ambient_temperature".lower() == key.lower():
cif_info_dict["temperature"] = value
elif "_symmetry_cell_setting".lower() == key.lower():
cif_info_dict["cell_setting"] = value
elif "_symmetry_space_group_name_H-M".lower() == key.lower():
cif_info_dict["space_group"] = value
elif "_exptl_crystal_density_diffrn".lower() == key.lower():
cif_info_dict["density"] = value
print(f"""
Summary:
========
Expt:
-----
Temperature [K] : {cif_info_dict['temperature']},
Cell Setting : {cif_info_dict['cell_setting']},
Space Group H-M : {cif_info_dict['space_group']},
Density [g/cm3] : {cif_info_dict['density']},
Supercell:
----------
Total number of molecules : {len(replicated_mol_list)},
Total Length edge a [Ang] : {strc.cell.a * a_len:4.2f},
Total Length edge b [Ang] : {strc.cell.b * b_len:4.2f},
Total Length edge c [Ang] : {strc.cell.c * c_len:4.2f},
Cell angle alpha [deg] : {strc.cell.alpha:4.2f},
Cell angle beta [deg] : {strc.cell.beta:4.2f},
Cell angle gamma [deg] : {strc.cell.gamma:4.2f},
Total Volume supercell [Ang3] : {strc.cell.volume * a_len * b_len * c_len:4.2f}
Density [g/cm3] : {unitcell_weight / strc.cell.volume * 1.6605:4.2f}
SMILES for molecules in UC : {" ".join(smiles_list)}
"""
### 1.6605 conversion g/mol/Ang^3 to g/cm^3
)
try:
diff = (unitcell_weight / strc.cell.volume * 1.6605) - float(cif_info_dict["density"])
if abs(diff) > 0.1:
warnings.warn(f"Density difference {diff}. Check structure.")
except:
pass
def entry_point():
main()
if __name__ == "__main__":
entry_point()
|
from django.shortcuts import render
from rest_framework import serializers, viewsets
from .serializers import AttributeSerializer
from .models import *
from PIL.Image import new
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View, FormView, TemplateView, DetailView, ListView
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, PasswordChangeForm, AdminPasswordChangeForm
from django.views.generic.edit import CreateView, UpdateView
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.views.generic.edit import FormMixin
from django.template.loader import render_to_string
from django.urls import reverse_lazy
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
from django.conf import settings
from django.core.files import File
import datetime
import base64
import json
import random
import base64
from io import BytesIO
from .openai_api import *
import base64
from .build_image import *
from .save_to_png import *
from django.core.files import File
import boto3
import urllib.request
import requests
from django.core.files.base import ContentFile
from os.path import basename
from .tasks import *
from requests_oauthlib import OAuth1
from urllib.parse import urlencode
from rest_framework.views import APIView
from django.http.response import HttpResponseRedirect
from requests_oauthlib import OAuth1
from requests_oauthlib import OAuth1Session
from requests_oauthlib import requests
from .utils import is_twitter_authenticated, update_or_create_user_token, get_user_tokens
from web3 import Web3
from .gemsContract import *
from .forms import *
w3 = Web3(Web3.HTTPProvider('https://eth-mainnet.alchemyapi.io/v2/nFzmfPoiMWnPgTBNZWU9JGmkWSAeoVIt'))
# Create your views here.
#Homepage Landing Page
def Homepage(request):
first_gem = None
example_gems = []
example_tweeters = ["JoshuaOgundu", "chriscantino", "ArlanWasHere", "0xOddrey", "OneBandwagonFan", "CPGclub"]
for person in example_tweeters:
p_index = example_tweeters.index(person)
if p_index == 0:
ci = random.randint(0, 4)
final = getTopic(person)
base_code, svg_code = get_image(final, ci)
first_gem = str("@" + person), base_code
else:
ci = random.randint(0, 4)
final = getTopic(person)
base_code, svg_code = get_image(final, ci)
result = str("@" + person), base_code
example_gems.append(result)
return render(request, "homepage.html", {"example_gems": example_gems, 'first_gem': first_gem})
def Mint(request):
current_gas = w3.eth.gas_price
gem_contract = get_gem_contract()
current_count = gem_contract.functions.totalSupply().call()
return render(request, "mint.html", {"current_count": int(current_count), "current_gas":current_gas})
def finalMint(request, twitterId):
return render(request, "mint.html", {})
def MyGems(request, wallet, twitterId, twitterRef):
gems = []
new_twitter_connection = []
assigned = True
if "none" not in twitterId:
new_twitter_connection = twitterConnection.objects.filter(id=twitterId, twitter_ref=twitterRef).first()
if new_twitter_connection:
if new_twitter_connection.meta_data:
pass
else:
assigned = False
if "none" not in wallet:
gem_contract = get_gem_contract()
addy = Web3.toChecksumAddress(wallet)
ownership = gem_contract.functions.tokensOfOwner(addy).call()
for tokenId in ownership:
meta_match, created = gemsMeta.objects.get_or_create(metaID=tokenId)
if created:
meta_match.name = "Gem #%s" % (tokenId)
meta_match.description = "A unique gem based on your tweets"
meta_match.background = random.randint(0, 4)
meta_match.save()
twitter_connection = twitterConnection.objects.filter(meta_data=meta_match).first()
if twitter_connection:
final = getTopic(twitter_connection.twitter)
base_code, _ = get_image(final, int(meta_match.background))
else:
if new_twitter_connection and assigned == False:
new_twitter_connection.meta_data = meta_match
new_twitter_connection.save()
meta_match.name = "@%s Gem" % (new_twitter_connection.twitter)
meta_match.is_anon = False
twitter_connection = new_twitter_connection
final = getTopic(new_twitter_connection.twitter)
base_code, _ = get_image(final, int(meta_match.background))
assigned = True
else:
final = getTopic('_none_')
base_code, _ = get_image(final, int(meta_match.background))
meta_match.image = base_code
meta_match.save()
result = meta_match, twitter_connection
gems.append(result)
return render(request, "mygems.html", {"gems": gems, "twitterRef": twitterRef, "wallet_addy": wallet, 'new_twitter_connection': new_twitter_connection, 'twitterId': twitterId})
def MakeAnon(request, reveal, tokenId, wallet, twitterId, twitterRef):
reveal = int(reveal)
if reveal == 1:
meta_match = gemsMeta.objects.filter(metaID=tokenId).first()
meta_match.name = "Gem #%s" % (tokenId)
meta_match.is_anon = True
meta_match.save()
else:
meta_match = gemsMeta.objects.filter(metaID=tokenId).first()
twitt_conn = twitterConnection.objects.filter(meta_data=meta_match).first()
meta_match.name = "@%s Gem" % (twitt_conn.twitter)
meta_match.is_anon = False
meta_match.save()
return redirect('my_gems', wallet=wallet, twitterId=twitterId, twitterRef=twitterRef)
def GemReset(request, tokenId, wallet, twitterId, twitterRef):
meta_match = gemsMeta.objects.filter(metaID=tokenId).first()
twitt_conn = twitterConnection.objects.filter(meta_data=meta_match).first()
twitt_conn.delete()
meta_match.name = "Gem #%s" % (tokenId)
meta_match.is_anon = True
meta_match.save()
return redirect('my_gems', wallet=wallet, twitterId='none', twitterRef='none')
def LearnMore(request):
return render(request, "learn-more.html", {})
def GemSearch(request):
if request.method == "POST":
# get the form data
form = gemSearchForm(request.POST)
q = 1
# save the data and after fetch the object in instance
if form.is_valid():
# serialize in new friend object in json
q = form.cleaned_data.get('tokenId')
return redirect('gem_preview', tokenId=int(q))
else:
return redirect('gem_preview', tokenId=1)
def GemPreview(request, tokenId):
meta_match = gemsMeta.objects.filter(metaID=tokenId).first()
if meta_match:
twitter_connection = twitterConnection.objects.filter(meta_data=meta_match).first()
if twitter_connection:
final, tn, subject, polar = getFullTopic(twitter_connection.twitter)
base_code, svg_code = get_image(final, int(meta_match.background))
results = save_to_png(svg_code, twitter_connection.twitter)
f = open(results, 'rb')
meta_match.image_file = File(f)
meta_match.save()
else:
final, tn, subject, polar = getFullTopic('_none_')
base_code, _ = get_image(final, int(meta_match.background))
try:
meta_match.image = base_code
meta_match.save()
url = "https://api.opensea.io/api/v1/asset/0x0B5DdEFf4D54C1B1eE635657C17cF54135e5DB30/%s/?force_update=true" % (meta_match.metaID)
headers = {
"Accept": "application/json",
"X-API-KEY": "Connection: keep-alive"
}
response = requests.request("GET", url, headers=headers)
except:
pass
return render(request, "gem-preview.html", {"tokenId": tokenId, "meta_match": meta_match, "final": final, "tn": tn, "subject": int(subject), "polar": int(polar)})
else:
return render(request, "404.html")
def AboutUs(request):
example_gems = []
example_tweeters = ["0xoddrey", "AnnaShimuni", "Vii__Tat", "zahna_montana"]
jobs = ["Developer", "Product Design", "Art Director", "Community Manager"]
for person in example_tweeters:
index = example_tweeters.index(person)
final = getTopic(person)
ci = random.randint(0, 4)
base_code, svg_code = get_image(final, ci)
result = str("@" + person), jobs[index], base_code
example_gems.append(result)
return render(request, "about-us.html", {'example_gems': example_gems})
def MetaDataApi(request, tokenId):
if request.method == "GET":
meta_match = gemsMeta.objects.filter(metaID=tokenId).first()
if meta_match is None:
meta_match, created = gemsMeta.objects.get_or_create(metaID=tokenId)
if created:
meta_match.name = "Gem #%s" % (tokenId)
meta_match.description = "A unique gem based on your tweets"
meta_match.background = random.randint(0, 4)
final = getTopic('_none_')
base_code, _ = get_image(final, int(meta_match.background))
meta_match.image = base_code
meta_match.save()
twitter_connection = twitterConnection.objects.filter(meta_data=meta_match).first()
try:
if twitter_connection:
final = getTopic(twitter_connection.twitter)
base_code, _ = get_image(final, int(meta_match.background))
else:
final = getTopic('_none_')
base_code, _ = get_image(final, int(meta_match.background))
meta_match.image = base_code
meta_match.save()
except:
pass
return JsonResponse(AttributeSerializer(meta_match).data)
class TwitterAuthRedirectEndpoint(APIView):
def get(self, request, *args, **kwargs):
try:
oauth = OAuth1(
client_key= config('consumer_key2'),
client_secret= config('consumer_secret2'),
)
#Step one: obtaining request token
request_token_url = "https://api.twitter.com/oauth/request_token"
data = urlencode({
"oauth_callback": config('REDIRECT_URI')
})
response = requests.post(request_token_url, auth=oauth, data=data)
response.raise_for_status()
response_split = response.text.split("&")
oauth_token = response_split[0].split("=")[1]
oauth_token_secret = response_split[1].split("=")[1]
#Step two: redirecting user to Twitter
twitter_redirect_url = (
f"https://api.twitter.com/oauth/authenticate?oauth_token={oauth_token}"
)
return HttpResponseRedirect(twitter_redirect_url)
except ConnectionError:
html = "<html><body>You have no internet connection</body></html>"
return HttpResponse(html, status=403)
except Exception as ex:
html="<html><body>Something went wrong. Try Again</body></html>"
print(ex)
return HttpResponse(html, status=403)
class TwitterCallbackEndpoint(APIView):
def get(self, request, *args, **kwargs):
try:
oauth_token = request.query_params.get("oauth_token")
oauth_verifier = request.query_params.get("oauth_verifier")
oauth = OAuth1(
client_key= config('consumer_key2'),
client_secret = config('consumer_secret2'),
resource_owner_key=oauth_token,
verifier=oauth_verifier,
)
res = requests.post(
f"https://api.twitter.com/oauth/access_token", auth=oauth
)
res_split = res.text.split("&")
oauth_token=res_split[0].split("=")[1]
oauth_secret=res_split[1].split("=")[1]
user_id = res_split[2].split("=")[1] if len(res_split) > 2 else None
user_name = res_split[3].split("=")[1] if len(res_split) > 3 else None
if not request.session.exists(request.session.session_key):
request.session.create()
update_or_create_user_token(request.session.session_key, oauth_token,
oauth_secret, user_id, user_name)
twitter_match, _ = twitterConnection.objects.get_or_create(user_id=user_id, twitter=user_name)
##
twitt_ref = twitter_match.twitter_ref
redirect_url="https://www.gemsnft.co/my-gems/none/%s/%s/" % (twitter_match.id, twitt_ref)
return HttpResponseRedirect(redirect_url)
except ConnectionError:
return HttpResponse(
"<html><body>You have no internet connection</body></html>", status=403
)
except Exception as ex:
print(ex)
return HttpResponse(
"<html><body>Something went wrong.Try again.</body></html>", status=403
)
class IsAuthenticated(APIView):
def get(self, request, *args, **kwargs):
is_authenticated = is_twitter_authenticated(
self.request.session.session_key)
return Response({'status': is_authenticated}, status=status.HTTP_200_OK)
|
# coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from openapi_server.models.feedback import Feedback # noqa: E501
from openapi_server.models.feedback_response import FeedbackResponse # noqa: E501
from openapi_server.models.result import Result # noqa: E501
from openapi_server.models.result_feedback import ResultFeedback # noqa: E501
from openapi_server.test import BaseTestCase
class TestResultController(BaseTestCase):
"""ResultController integration test stubs"""
def test_get_result(self):
"""Test case for get_result
Request stored result
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/api/rtx/v1/result/{result_id}'.format(result_id=56),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_result_feedback(self):
"""Test case for get_result_feedback
Request stored feedback for this result
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/api/rtx/v1/result/{result_id}/feedback'.format(result_id=56),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_post_result_feedback(self):
"""Test case for post_result_feedback
Store feedback for a particular result
"""
feedback = {
"commenter_id" : 1,
"datetime" : "2018-05-08 12:00",
"rating_id" : 1,
"result_id" : "https://rtx.ncats.io/api/rtx/v1/result/234",
"expertise_level_id" : 1,
"comment" : "This is a great result because...",
"id" : "https://rtx.ncats.io/api/rtx/v1/result/234/feedback/56",
"commenter_full_name" : "John Smith"
}
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
response = self.client.open(
'/api/rtx/v1/result/{result_id}/feedback'.format(result_id=56),
method='POST',
headers=headers,
data=json.dumps(feedback),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
|
"""
********************************************************************************
main file to execute your program
********************************************************************************
"""
import time
import numpy as np
import tensorflow as tf
from pinn import PINN
from config_gpu import config_gpu
from prp_dat import func_u0, func_ub, prp_grd, prp_dataset
from params import params
from make_fig import plot_sol0, plot_sol1
from plot_hist import *
def main():
config_gpu(gpu_flg = 1)
tmin, tmax = 0., 1.
xmin, xmax = -1., 1.
in_dim, out_dim, width, depth, \
w_init, b_init, act, \
lr, opt, \
f_scl, laaf, \
rho, nu, \
w_dat, w_pde, \
f_mntr, r_seed, \
n_epch, n_btch, c_tol, \
N_0, N_b, N_r = params()
t_0, x_0, t_b, x_b, t_r, x_r = prp_dataset(tmin, tmax, xmin, xmax, N_0, N_b, N_r)
u_0 = func_u0(x_0)
u_b = func_ub(x_b)
pinn = PINN(t_0, x_0, u_0,
t_b, x_b, u_b,
t_r, x_r,
Rm = in_dim, Rn = out_dim, Rl = width, depth = depth, activ = "tanh", BN = False,
w_init = "glorot_normal", b_init = "zeros",
lr = lr, opt = opt, w_0 = 1., w_b = 1., w_r = 1.,
f_mntr = 10, r_seed = 1234)
with tf.device("/device:GPU:0"):
pinn.train(n_epch, n_btch, c_tol)
plt.figure(figsize=(8,4))
plt.plot(pinn.ep_log, pinn.loss_log, alpha=.7)
plt.grid(alpha=.5)
plt.show()
# PINN inference
nt = int(1e3) + 1
nx = int(1e2) + 1
t, x, TX = prp_grd(
tmin, tmax, nt,
xmin, xmax, nx
)
t0 = time.time()
u_hat, gv_hat = pinn.predict(t, x)
t1 = time.time()
elps = t1 - t0
print("elapsed time for PINN inference (sec):", elps)
print("elapsed time for PINN inference (min):", elps / 60.)
plot_sol1(TX, u_hat .numpy(), -1, 1, .25)
plot_sol1(TX, gv_hat.numpy(), -1, 1, .25)
# FDM approximation
factor = 20
nt = int(factor * (nt - 1)) + 1
nx = int(factor * (nx - 1)) + 1
t, x, TX = prp_grd(
tmin, tmax, nt,
xmin, xmax, nx
)
t, x = np.linspace(tmin, tmax, nt), np.linspace(xmin, xmax, nx)
dt, dx = t[1] - t[0], x[1] - x[0]
nu = pinn.nu.numpy()
u = np.zeros([nx, nt])
# impose IC
for i in range(nx):
u[:,0] = - np.sin(np.pi * x)
# explicit time integration
t0 = time.time()
for n in range(nt - 1):
for i in range(1, nx - 1):
u[i, n + 1] = u[i, n] \
- dt / dx * u[i, n] * (u[i, n] - u[i - 1, n]) \
+ nu * dt / dx ** 2 * (u[i + 1, n] - 2 * u[i, n] + u[i - 1, n])
t1 = time.time()
elps = t1 - t0
print("elapsed time for FDM simulation (sec):", elps)
print("elapsed time for FDM simulation (sec):", elps / 60.)
plot_sol1(TX, u.reshape(-1, 1), -1, 1, .25)
if __name__ == "__main__":
main()
|
from functools import partial
from wai.annotations.core.component import ProcessorComponent
from wai.annotations.core.stream import ThenFunction, DoneFunction
from wai.annotations.core.stream.util import RequiresNoFinalisation
from wai.annotations.domain.image.object_detection import ImageObjectDetectionInstance
from wai.annotations.domain.image.object_detection.util import set_object_label
from wai.common.adams.imaging.locateobjects import LocatedObjects, LocatedObject
from wai.common.geometry import Polygon, Point
from .._format import OPEXODFormat, OPEXObject
class FromOPEXOD(
RequiresNoFinalisation,
ProcessorComponent[OPEXODFormat, ImageObjectDetectionInstance]
):
"""
Converter from OPEX annotations to internal format.
"""
def process_element(
self,
element: OPEXODFormat,
then: ThenFunction[ImageObjectDetectionInstance],
done: DoneFunction
):
# Unpack the external format
image_info, opex_objects = element
# Convert OPEX objects to located objects
located_objects = None
if len(opex_objects) > 0:
to_located_object = partial(self.to_located_object)
located_objects = LocatedObjects(map(to_located_object, opex_objects))
then(
ImageObjectDetectionInstance(
image_info,
located_objects
)
)
def to_located_object(self, object: OPEXObject) -> LocatedObject:
"""
Converts the OPEX object to a located object.
:param object:
The OPEX object.
:return:
The located object.
"""
# Get the object label (just uses the class index if no mapping is provided)
label: str = object.label
bbox = object.prediction.bbox
poly = object.prediction.polygon
points = []
for x, y in poly.points:
points.append(Point(x=x, y=y))
# Create the located object
located_object = LocatedObject(bbox.left, bbox.top, bbox.right - bbox.left + 1, bbox.bottom - bbox.top + 1)
located_object.set_polygon(Polygon(*points))
set_object_label(located_object, label)
return located_object
|
import re
# Utility class with useful functions that do not belong to a specific component of the Enigma Machine.
class Util:
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
alphabetPos = []
for i in range(0, len(alphabet)):
alphabetPos.append(alphabet[i])
# Converts a letter of the alphabet to its numerical equivalent, i.e. A=1 ... Z=26.
# @param char The character to convert.
@staticmethod
def alphabetToNum(char):
return Util.alphabetPos.index(char) + 1
# Converts a number to its alphabet equivalent.
# @param num The number (in [1, 26]) to convert.
@staticmethod
def numToAlphabet(num):
return Util.alphabetPos[num - 1]
# Converts decrypted text (typically in 5-letter segments with various annotations) into legible form.
# @param text The text to parse.
# @return Legible text, with converted annotations.
@staticmethod
def parseText(text):
insideBracket = False
# Inner method to switch between brackets.
# @param matchObj Unused parameter for re.sub(..)
# @return The bracket to be used.
def bracketChooser(matchObj):
nonlocal insideBracket
if (not insideBracket):
insideBracket = True
return '('
insideBracket = False
return ')'
# Annotations here are based on the historical standards used by Germany in WW2
# See http://users.telenet.be/d.rijmenants/Enigma%20Sim%20Manual.pdf for more details.
# Merge 5-letter blocks, separate words by X
text = text.replace(' ', '')
text = re.sub(r'(?<!X)X(?!X)', ' ', text)
# Conversion of numbers
text = text.replace('NULL', '0')
text = text.replace('CENTA', '00')
text = text.replace('MILLE', '000')
text = text.replace('MYRIA', '0000')
text = text.replace('EINZ', '1')
text = text.replace('EINS', '1')
text = text.replace('ZWO', '2')
text = text.replace('DREI', '3')
text = text.replace('VIER', '4')
text = text.replace('FUNF', '5')
text = text.replace('SEQS', '6')
text = text.replace('SIEBEN', '7')
text = text.replace('AQT', '8')
text = text.replace('NEUN', '9')
# Conversion of abbreviations/code
text = text.replace('Q', 'CH')
text = text.replace('UD', '?')
text = text.replace('XX', ':')
text = text.replace('YY', '-')
text = text.replace('J', '*')
text = re.sub(r'KK', bracketChooser, text) # Convert KK...KK into (...)
text = re.sub(r'\bK\b|\bK^(\d|\D|\W|\w)', '.', text) # Convert single K into a fullstop.
text = re.sub(r'\s[.]|\s[.]', '.', text) # Remove incorrect spaces before full stops
text = re.sub(r'\b(\D+)\1\b', r'\1', text) # Remove duplicate words
return text
# Convert numbers into German enigma format.
# @param num The number to convert. Can either be in number format, or string format.
# @return A string containing the number, formatted for transmission by Enigma.
@staticmethod
def formatNum(num):
if (not isinstance(num, str)):
num = str(num) # We're largely using regex to convert it.
num = num.replace('0000', 'MYRIA')
num = num.replace('000', 'MILLE')
num = num.replace('00', 'CENTA')
num = num.replace('0', 'NULL')
num = num.replace('9', 'NEUN')
num = num.replace('8', 'AQT')
num = num.replace('7', 'SIEBEN')
num = num.replace('6', 'SEQS')
num = num.replace('5', 'FUNF')
num = num.replace('4', 'VIER')
num = num.replace('3', 'DREI')
num = num.replace('2', 'ZWO')
num = num.replace('1', 'EINS')
# Convert separators
num = num.replace(':', 'XX')
return num
# Formats a string into 5-letter segments.
# @param string The string to format.
# @return A formatted string, in 5 letter segments. If the last segment is less than 5 letters, it will be padded with spaces.
@staticmethod
def formatStr(string):
if (not isinstance(string, str)):
string = str(string)
string = re.sub(r'\s', '', string)
string = re.sub(r'(.{5})', r'\1 ', string)
if (not (len(string) % 6 == 0)):
string += ' ' * (5 - (len(string) % 6))
return string
# Formats a string by converting special characters to their German Enigma annotation equivalent.
# @param text The string to encode.
# @return An encoded string.
@staticmethod
def convertSpecialChar(text):
if (not isinstance(text, str)):
text = str(text)
return text
text = text.replace('CH', 'Q')
text = text.replace('?', 'UD')
text = text.replace(':', 'XX')
text = text.replace('-', 'YY')
text = text.replace('*', 'J')
text = text.replace('.', 'K')
text = text.replace(' ', 'X')
return text
|
import sys
ifunc, g = lambda: [*map(int, sys.stdin.readline().rstrip().split())], range
n = ifunc()
iList = ifunc()
iList = sorted(iList)
ret = [-1, -1, -1]
minMinDiff = 10**10
for idx in g(len(iList)-2):
tar = -iList[idx]
le = idx+1
ri = len(iList)-1
tarLe, tarRi = -1, -1
minDiff = 10**10
while le < ri:
aSum = iList[le] + iList[ri]
if abs(aSum-tar) < minDiff:
minDiff = abs(aSum-tar)
tarLe, tarRi = le, ri
if aSum < tar:
le += 1
elif tar < aSum:
ri -= 1
else:
break
if minDiff == 0:
ret = [iList[idx], iList[tarLe], iList[tarRi]]
break
if minDiff < minMinDiff:
ret = [iList[idx], iList[tarLe], iList[tarRi]]
minMinDiff = minDiff
ret = sorted(ret)
print(*ret)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
kPluginNodeTypeName = "spClosestPointOnCurve"
cpocPluginId = OpenMaya.MTypeId(0x00349)
# Node definition
class closestPointOnCurve(OpenMayaMPx.MPxNode):
# the plugs
aInCurve = OpenMaya.MObject()
aInPosition = OpenMaya.MObject()
aOutPosition = OpenMaya.MObject()
aOutPositionX = OpenMaya.MObject()
aOutPositionY = OpenMaya.MObject()
aOutPositionZ = OpenMaya.MObject()
aOutNormal = OpenMaya.MObject()
aOutNormalX = OpenMaya.MObject()
aOutNormalY = OpenMaya.MObject()
aOutNormalZ = OpenMaya.MObject()
aOutParam = OpenMaya.MObject()
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
def compute(self, plug, dataBlock):
if plug == closestPointOnCurve.aOutPosition or plug == closestPointOnCurve.aOutParam:
dataHandle = dataBlock.inputValue(closestPointOnCurve.aInCurve)
inputAsCurve = dataHandle.asNurbsCurve()
#if not inputAsCurve.hasFn(OpenMaya.MFn.kNurbsCurve):
# return OpenMaya.kUnknownParameter
dataHandle = dataBlock.inputValue(closestPointOnCurve.aInPosition)
inPositionAsFloat3 = dataHandle.asFloat3()
inPosition = OpenMaya.MPoint(
inPositionAsFloat3[0],
inPositionAsFloat3[1],
inPositionAsFloat3[2]
)
# connect the MFnNurbsCurve
# and ask the closest point
nurbsCurveFn = OpenMaya.MFnNurbsCurve(inputAsCurve)
# get and set outPosition
outParam = OpenMaya.MScriptUtil()
outParam.createFromDouble(0)
outParamPtr = outParam.asDoublePtr()
# get position and paramater
outPosition = nurbsCurveFn.closestPoint(
inPosition, True, outParamPtr, 0.001, OpenMaya.MSpace.kWorld
)
outputHandle = dataBlock.outputValue(
closestPointOnCurve.aOutPosition
)
outputHandle.set3Float(outPosition.x, outPosition.y, outPosition.z)
# get and set outNormal
#outNormal = nurbsCurveFn.normal(parameter, OpenMaya.MSpace.kWorld)
#outputHandle = dataBlock.outputValue(closestPointOnCurve.aOutNormal)
#outputHandle.set3Float(outNormal.x, outNormal.y, outNormal.z)
#outputHandle.set3Float(0, 1, 0 )
# get and set the uvs
outputHandle = dataBlock.outputValue(closestPointOnCurve.aOutParam)
#outputHandle.setFloat(OpenMaya.MScriptUtil(outParamPtr).asDouble())
outputHandle.setFloat(OpenMaya.MScriptUtil.getDouble(outParamPtr))
dataBlock.setClean(plug)
else:
return OpenMaya.kUnknownParameter
# creator
def nodeCreator():
return OpenMayaMPx.asMPxPtr(closestPointOnCurve())
# initializer
def nodeInitializer():
tAttr = OpenMaya.MFnTypedAttribute()
nAttr = OpenMaya.MFnNumericAttribute()
# input curve
closestPointOnCurve.aInCurve = tAttr.create(
"inCurve", "ic", OpenMaya.MFnData.kNurbsCurve
)
tAttr.setStorable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInCurve)
# input position
closestPointOnCurve.aInPositionX = nAttr.create(
"inPositionX", "ipx", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(1)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInPositionX)
closestPointOnCurve.aInPositionY = nAttr.create(
"inPositionY", "ipy", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(1)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInPositionY)
closestPointOnCurve.aInPositionZ = nAttr.create(
"inPositionZ", "ipz", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(1)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInPositionZ)
closestPointOnCurve.aInPosition = nAttr.create(
"inPosition", "ip",
closestPointOnCurve.aInPositionX,
closestPointOnCurve.aInPositionY,
closestPointOnCurve.aInPositionZ
)
nAttr.setStorable(1)
nAttr.setKeyable(1)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInPosition)
# output position
closestPointOnCurve.aOutPositionX = nAttr.create(
"outPositionX", "opx", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPositionX)
closestPointOnCurve.aOutPositionY = nAttr.create(
"outPositionY", "opy", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPositionY)
closestPointOnCurve.aOutPositionZ = nAttr.create(
"outPositionZ", "opz", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPositionZ)
closestPointOnCurve.aOutPosition = nAttr.create(
"outPosition", "op",
closestPointOnCurve.aOutPositionX,
closestPointOnCurve.aOutPositionY,
closestPointOnCurve.aOutPositionZ
)
nAttr.setStorable(0)
nAttr.setKeyable(0)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPosition)
# output normal
closestPointOnCurve.aOutNormalX = nAttr.create(
"outNormalX", "onx", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormalX)
closestPointOnCurve.aOutNormalY = nAttr.create(
"outNormalY", "ony", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormalY)
closestPointOnCurve.aOutNormalZ = nAttr.create(
"outNormalZ", "onz", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormalZ)
closestPointOnCurve.aOutNormal = nAttr.create(
"outNormal", "on",
closestPointOnCurve.aOutNormalX,
closestPointOnCurve.aOutNormalY,
closestPointOnCurve.aOutNormalZ
)
nAttr.setStorable(0)
nAttr.setKeyable(0)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormal)
closestPointOnCurve.aOutParam = nAttr.create(
"outParam", "opa", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setKeyable(0)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutParam)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInCurve,
closestPointOnCurve.aOutPosition
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInPosition,
closestPointOnCurve.aOutPosition
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInCurve,
closestPointOnCurve.aOutParam
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInPosition,
closestPointOnCurve.aOutParam
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInCurve,
closestPointOnCurve.aOutNormal
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInPosition,
closestPointOnCurve.aOutNormal
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aOutParam,
closestPointOnCurve.aOutPosition
)
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, "Erkan Ozgur Yilmaz","1.0.2")
try:
mplugin.registerNode(
kPluginNodeTypeName,
cpocPluginId,
nodeCreator,
nodeInitializer
)
except:
sys.stderr.write("Failed to register node: %s" % kPluginNodeTypeName)
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode(cpocPluginId)
except:
sys.stderr.write("Failed to deregister node: %s" % kPluginNodeTypeName)
raise
|
import sys
sys.path.append(r"C:\Program Files\Python 3.5\lib\site-packages")
import matplotlib.pyplot as plt
print(sys.path)
plt.plot([1, 2, 3, 7, 9, 23, 32, 12], [2, 1, 3, 32, 11, 5, 65, 43])
plt.show()
|
# -*- coding: utf8 -*-
# Copyright (C) 2013 Daniel Lombraña González
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pbclient
from mock import patch
from base import TestPyBossaClient
from nose.tools import assert_raises
class TestPybossaClientCategory(TestPyBossaClient):
@patch('pbclient.requests.get')
def test_get_category_not_found(self, Mock):
"""Test get category not found works"""
# App does not exist should return 404 error object
not_found = self.create_error_output(action='GET', status_code=404,
target='category', exception_cls='NotFound')
Mock.return_value = self.create_fake_request(not_found)
err = self.client.get_category(1)
self.check_error_output(err, not_found)
@patch('pbclient.requests.get')
def test_get_category_found(self, Mock):
"""Test get category found works"""
Mock.return_value = self.create_fake_request(self.category, 200)
category = self.client.get_category(1)
assert category.id == self.category['id'], category
assert category.short_name == self.category['short_name'], category
@patch('pbclient.requests.get')
def test_get_category_errors(self, Mock):
"""Test get category errors works"""
targets = ['category']
errors = {'Unauthorized': 401, 'NotFound': 404, 'Forbidden': 401,
'TypeError': 415}
for target in targets:
for error in errors.keys():
err_output = self.create_error_output(action='GET',
status_code=errors[error],
target=target,
exception_cls=error)
Mock.return_value = self.create_fake_request(err_output,
errors[error])
err = self.client.get_category(1)
self.check_error_output(err_output, err)
@patch('pbclient.requests.get')
def test_get_categories(self, Mock):
"""Test get_categories works"""
Mock.return_value = self.create_fake_request([self.category], 200)
categories = self.client.get_categories()
assert len(categories) == 1, categories
category = categories[0]
assert category.id == self.category['id'], category
assert category.short_name == self.category['short_name'], category
# Without categories
Mock.return_value = self.create_fake_request([], 200)
categories = self.client.get_categories()
assert len(categories) == 0, categories
@patch('pbclient.requests.get')
def test_get_categories_with_keyset_pagination(self, Mock):
"""Test get_categories uses keyset pagination if a last_id argument is
provided"""
Mock.return_value = self.create_fake_request([], 200)
self.client.get_categories(last_id=1, limit=3)
Mock.assert_called_once_with('http://localhost:5000/api/category',
params={'limit': 3,
'last_id': 1,
'api_key': 'tester'})
@patch('pbclient.requests.get')
def test_get_categories_error(self, Mock):
"""Test get_categories error works"""
Mock.return_value = self.create_fake_request(self.category, 200)
assert_raises(TypeError, self.client.get_categories)
@patch('pbclient.requests.get')
def test_find_category(self, Mock):
"""Test find_category works"""
Mock.return_value = self.create_fake_request([self.category], 200)
categories = self.client.find_category(short_name=self.category['short_name'])
# Only one category is found
assert len(categories) == 1, categories
category = categories[0]
assert category.id == self.category['id'], category
assert category.short_name == self.category['short_name'], category
@patch('pbclient.requests.get')
def test_find_category_errors(self, Mock):
"""Test find category errors works"""
targets = ['category']
errors = {'Unauthorized': 401, 'NotFound': 404, 'Forbidden': 401,
'TypeError': 415}
for target in targets:
for error in errors.keys():
err_output = self.create_error_output(action='GET',
status_code=errors[error],
target=target,
exception_cls=error)
Mock.return_value = self.create_fake_request(err_output,
errors[error])
err = self.client.find_category(short_name=self.category['short_name'])
self.check_error_output(err_output, err)
@patch('pbclient.requests.get')
def test_find_category_not_found(self, Mock):
"""Test find_category not found works"""
Mock.return_value = self.create_fake_request([], 200)
categories = self.client.find_category(short_name="foobar")
assert len(categories) == 0, categories
@patch('pbclient.requests.post')
def test_create_category(self, Mock):
"""Test create_category works"""
Mock.return_value = self.create_fake_request(self.category, 200)
category = self.client.create_category(
name=self.category['name'],
description=self.category['description'])
assert category.id == self.category['id']
assert category.short_name == self.category['short_name']
@patch('pbclient.requests.post')
def test_create_category_exists(self, Mock):
"""Test create_category duplicate entry works"""
already_exists = self.create_error_output(action='POST', status_code=415,
target='category', exception_cls='IntegrityError')
Mock.return_value = self.create_fake_request(already_exists, 415)
category = self.client.create_category(
name=self.category['name'],
description=self.category['description'])
self.check_error_output(category, already_exists)
@patch('pbclient.requests.post')
def test_create_category_not_allowed(self, Mock):
"""Test create_category not authorized works"""
not_authorized = self.create_error_output(action='POST', status_code=401,
target='category', exception_cls='Unauthorized')
Mock.return_value = self.create_fake_request(not_authorized, 401)
category = self.client.create_category(
name=self.category['name'],
description=self.category['description'])
self.check_error_output(category, not_authorized)
@patch('pbclient.requests.post')
def test_create_category_forbidden(self, Mock):
"""Test create_category not forbidden works"""
forbidden = self.create_error_output(action='POST', status_code=403,
target='category', exception_cls='Forbidden')
Mock.return_value = self.create_fake_request(forbidden, 403)
category = self.client.create_category(
name=self.category['name'],
description=self.category['description'])
self.check_error_output(category, forbidden)
@patch('pbclient.requests.put')
def test_update_category(self, Mock):
"""Test update_category works"""
Mock.return_value = self.create_fake_request(self.category, 200)
category = self.client.update_category(pbclient.Project(self.category))
assert category.id == self.category['id'], category
assert category.short_name == self.category['short_name'], category
@patch('pbclient.requests.put')
def test_update_category_not_found(self, Mock):
"""Test update_category not found works"""
not_found = self.create_error_output(action='PUT', status_code=404,
target='category', exception_cls='NotFound')
Mock.return_value = self.create_fake_request(not_found, 404)
err = self.client.update_category(pbclient.Project(self.category))
self.check_error_output(not_found, err)
@patch('pbclient.requests.put')
def test_update_category_forbidden(self, Mock):
"""Test update_category forbidden works"""
forbidden = self.create_error_output(action='PUT', status_code=403,
target='category', exception_cls='Forbidden')
Mock.return_value = self.create_fake_request(forbidden, 403)
err = self.client.update_category(pbclient.Project(self.category))
self.check_error_output(forbidden, err)
@patch('pbclient.requests.put')
def test_update_category_unauthorized(self, Mock):
"""Test update_category unauthorized works"""
unauthorized = self.create_error_output(action='PUT', status_code=401,
target='category', exception_cls='Unauthorized')
Mock.return_value = self.create_fake_request(unauthorized, 401)
err = self.client.update_category(pbclient.Project(self.category))
self.check_error_output(unauthorized, err)
@patch('pbclient.requests.delete')
def test_delete_category(self, Mock):
"""Test delete_category works"""
Mock.return_value = self.create_fake_request('', 204, 'text/html')
res = self.client.delete_category(1)
assert res is True, res
@patch('pbclient.requests.delete')
def test_delete_category(self, Mock):
"""Test delete_category error works"""
Mock.return_value = self.create_fake_request('404', 404, 'text/html')
res = self.client.delete_category(1)
assert res == '404', res
|
#!/usr/bin/env python3
import autopipe
if __name__ == "__main__":
exit(autopipe.main())
|
from django.contrib.sitemaps import Sitemap
from django.core.urlresolvers import reverse
from .models import Product, Category
class HomeSitemap(Sitemap):
changefreq = "weekly"
priority = 0.7
def items(self):
return ['product:home']
def location(self, obj):
return reverse(obj)
class CategorySitemap(Sitemap):
changefreq = "weekly"
priority = 0.6
def items(self):
return Category.objects.all()
def lastmod(self, obj):
return obj.modified_at
class ProductSitemap(Sitemap):
changefreq = "weekly"
priority = 0.5
def items(self):
return Product.objects.all()
def lastmod(self, obj):
return obj.modified_at
|
# Generated by Django 3.2.5 on 2021-08-12 07:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('school', '0007_alter_studentlist_child'),
]
operations = [
migrations.AlterField(
model_name='studentlist',
name='school_class',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='student_list_class', to='school.class', verbose_name='Sınıf'),
),
]
|
from opendc.models.model import Model
from opendc.models.user import User
from opendc.util.exceptions import ClientError
from opendc.util.rest import Response
class Prefab(Model):
"""Model representing a Project."""
collection_name = 'prefabs'
def check_user_access(self, google_id):
"""Raises an error if the user with given [google_id] has insufficient access to view this prefab.
:param google_id: The Google ID of the user.
"""
user = User.from_google_id(google_id)
# TODO(Jacob) add special handling for OpenDC-provided prefabs
#try:
print(self.obj)
if self.obj['authorId'] != user.get_id() and self.obj['visibility'] == "private":
raise ClientError(Response(403, "Forbidden from retrieving prefab."))
#except KeyError:
# OpenDC-authored objects don't necessarily have an authorId
# return
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
import argparse
import numpy as np
from scipy.sparse import csr_matrix
import pandas as pd
import logging
from sklearn.decomposition import TruncatedSVD
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import scanpy as sc
import anndata as ad
import matplotlib.pyplot as plt
from tqdm import tqdm
from copy import deepcopy
from utils import *
from model import Generator, Discriminator
parser = argparse.ArgumentParser()
parser.add_argument('--num_classes', type=int, default=1 , help='Number of classes for discriminator.')
parser.add_argument('--lr_gen', type=float, default=0.0001 , help='Learning rate for generator.')
parser.add_argument('--lr_dis', type=float, default=0.0001 , help='Learning rate for discriminator.')
parser.add_argument('--weight_decay', type=float, default=1e-3 , help='Weight decay.')
parser.add_argument('--gener_batch_size', type=int, default=64 , help='Batch size for generator.')
parser.add_argument('--dis_batch_size', type=int, default=32 , help='Batch size for discriminator.')
parser.add_argument('--epoch', type=int, default=200 , help='Number of epoch.')
parser.add_argument('--optim', type=str, default="Adam" , help='Choose your optimizer')
parser.add_argument('--loss', type=str, default="wgangp_eps" , help='Loss function')
parser.add_argument('--phi', type=int, default="1" , help='phi')
parser.add_argument('--beta1', type=int, default="0" , help='beta1')
parser.add_argument('--n_critic', type=int, default=5 , help='n_critic.')
parser.add_argument('--max_iter', type=int, default=500000 , help='max_iter.')
parser.add_argument('--beta2', type=float, default="0.99" , help='beta2')
parser.add_argument('--lr_decay', type=str, default=True , help='lr_decay')
parser.add_argument('--g_in_feat', type=int, default=15496 , help='in_feat.')
parser.add_argument('--g_hid_feat', type=int, default=5456 , help='hid_feat.')
parser.add_argument('--g_out_feat', type=int, default=50 , help='out_feat.')
parser.add_argument('--d_in_feat', type=int, default=15496 , help='in_feat.')
parser.add_argument('--d_hid_feat', type=int, default=7748 , help='hid_feat.')
parser.add_argument('--d_out_feat', type=int, default=50 , help='out_feat.')
parser.add_argument('--dropout', type=float, default=0.5 , help='dropout.')
parser.add_argument('--pca', type=str, default=True , help='PCA')
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
print("Device:",device)
args = parser.parse_args()
adata_gex = ad.read_h5ad("/home/ubuntu/Documents/ahmetr/single-cell/data/explore/multiome/multiome_gex_processed_training.h5ad")
adata_atac = ad.read_h5ad("/home/ubuntu/Documents/ahmetr/single-cell/data/explore/multiome/multiome_atac_processed_training.h5ad")
"""
The GEX data has 20952 observations and 15037 features.
The ATAC data has 20952 observations and 119014 features.
"""
#print(f"The GEX data has {adata_gex.n_obs} observations and {adata_gex.n_vars} features.")
#print(f"The ATAC data has {adata_atac.n_obs} observations and {adata_atac.n_vars} features.")
"""
adata_gex.var
15037 rows × 15 columns
adata_atac.var
119014 rows × 7 columns
adata_gex.obs
20952 rows × 9 columns
adata_atac.obs
20952 rows × 8 columns
"""
"""Visualizing Gene Expression"""
#sc.pl.umap(adata_gex, color='cell_type')
#sc.pl.umap(adata_gex, color='batch')
"""Visualizing ATAC"""
#sc.pl.umap(adata_atac, color='cell_type')
#sc.pl.umap(adata_atac, color='batch')
train_cells = adata_gex.obs_names[adata_gex.obs["batch"] != "s2d4"]
test_cells = adata_gex.obs_names[adata_gex.obs["batch"] == "s2d4"]
# This will get passed to the method
input_train_mod1 = adata_atac[train_cells]
input_train_mod2 = adata_gex[train_cells]
input_test_mod1 = adata_atac[test_cells]
# This will get passed to the metric
true_test_mod2 = adata_gex[test_cells]
input_mod1 = ad.concat(
{"train": input_train_mod1, "test": input_test_mod1},
axis=0,
join="outer",
label="group",
fill_value=0,
index_unique="-",
)
# Binarize ATAC
if input_train_mod1.var["feature_types"][0] == "ATAC":
input_mod1.X[input_mod1.X > 1] = 1
elif input_train_mod2.var["feature_types"][0] == "ATAC":
input_train_mod2.X[input_mod1.X > 1] = 1
"""
if args.pca == "True":
# Do PCA on the input data
logging.info('Performing dimensionality reduction on modality 1 values...')
embedder_mod1 = TruncatedSVD(n_components=50)
print("input_mod1.X:",input_mod1.X.shape)
mod1_pca = embedder_mod1.fit_transform(input_mod1.X)
logging.info('Performing dimensionality reduction on modality 2 values...')
embedder_mod2 = TruncatedSVD(n_components=50)
mod2_pca = embedder_mod2.fit_transform(input_train_mod2.layers["log_norm"])
# split dimred mod 1 back up for training
X_train = mod1_pca[input_mod1.obs['group'] == 'train']
X_train = torch.from_numpy(X_train)
X_test = mod1_pca[input_mod1.obs['group'] == 'test']
X_test = torch.from_numpy(X_test)
y_train = mod2_pca
y_train = torch.from_numpy(y_train)
else:
#X_train = input_mod1.obs['group'] == 'train'
X_train = csr_matrix(input_mod1.X).toarray()
X_train = torch.from_numpy(X_train)
#X_test = input_mod1.obs['group'] == 'test'
X_test = csr_matrix(input_test_mod1.obs).toarray()
X_test = torch.from_numpy(X_test)
#y_train = input_train_mod2.layers["log_norm"]
y_train = input_train_mod2.layers["log_norm"]
y_train = csr_matrix(input_train_mod2.layers["log_norm"]).toarray()
y_train = torch.from_numpy(y_train)
"""
# Do PCA on the input data
logging.info('Performing dimensionality reduction on modality 1 values...')
embedder_mod1 = TruncatedSVD(n_components=50)
mod1_pca = embedder_mod1.fit_transform(input_mod1.X)
logging.info('Performing dimensionality reduction on modality 2 values...')
embedder_mod2 = TruncatedSVD(n_components=50)
mod2_pca = embedder_mod2.fit_transform(input_train_mod2.layers["log_norm"])
# split dimred mod 1 back up for training
X_train = mod1_pca[input_mod1.obs['group'] == 'train']
X_train = torch.from_numpy(X_train)
X_test = mod1_pca[input_mod1.obs['group'] == 'test']
X_test = torch.from_numpy(X_test)
y_train = mod2_pca
y_train = torch.from_numpy(y_train)
assert len(X_train) + len(X_test) == len(mod1_pca)
logging.info('Running CellGAN...')
#reg = LinearRegression()
# Train the model on the PCA reduced modality 1 and 2 data
#reg.fit(X_train, y_train) ### for train
#y_pred = reg.predict(X_test) ## for validation
# Project the predictions back to the modality 2 feature space
generator= Generator(in_feat=args.g_in_feat, hid_feat=args.g_hid_feat, out_feat=args.g_out_feat, dropout=args.dropout)#,device = device)
generator.to(device)
discriminator = Discriminator(in_feat=args.d_in_feat, hid_feat=args.d_hid_feat, out_feat=args.d_out_feat, num_classes=1, dropout=args.dropout)
discriminator.to(device)
generator.apply(inits_weight)
discriminator.apply(inits_weight)
if args.optim == 'Adam':
optim_gen = optim.Adam(filter(lambda p: p.requires_grad, generator.parameters()), lr=args.lr_gen, betas=(args.beta1, args.beta2))
optim_dis = optim.Adam(filter(lambda p: p.requires_grad, discriminator.parameters()),lr=args.lr_dis, betas=(args.beta1, args.beta2))
elif args.optim == 'SGD':
optim_gen = optim.SGD(filter(lambda p: p.requires_grad, generator.parameters()),
lr=args.lr_gen, momentum=0.9)
optim_dis = optim.SGD(filter(lambda p: p.requires_grad, discriminator.parameters()),
lr=args.lr_dis, momentum=0.9)
elif args.optim == 'RMSprop':
optim_gen = optim.RMSprop(filter(lambda p: p.requires_grad, discriminator.parameters()), lr=args.lr_dis, eps=1e-08, weight_decay=args.weight_decay, momentum=0, centered=False)
optim_dis = optim.RMSprop(filter(lambda p: p.requires_grad, discriminator.parameters()), lr=args.lr_dis, eps=1e-08, weight_decay=args.weight_decay, momentum=0, centered=False)
gen_scheduler = LinearLrDecay(optim_gen, args.lr_gen, 0.0, 0, args.max_iter * args.n_critic)
dis_scheduler = LinearLrDecay(optim_dis, args.lr_dis, 0.0, 0, args.max_iter * args.n_critic)
def compute_gradient_penalty(D, real_samples, fake_samples, phi):
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = torch.Tensor(np.random.random((real_samples.size(0), 1, 1, 1))).to(real_samples.get_device())
# Get random interpolation between real and fake samples
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = D(interpolates)
fake = torch.ones([real_samples.shape[0], 1], requires_grad=False).to(real_samples.get_device())
# Get gradient w.r.t. interpolates
gradients = torch.autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.contiguous().view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - phi) ** 2).mean()
return gradient_penalty
def train(generator, discriminator, optim_gen, optim_dis,
epoch, schedulers, n_critic = args.n_critic, device="cuda:0"):
gen_step = 0
generator = generator.train()
discriminator = discriminator.train()
#train_loader= ##training dataset
global_steps = 0
real_data = y_train.type(torch.cuda.FloatTensor)
print("real_data.shape:", real_data.shape)
optim_dis.zero_grad()
real_valid = discriminator(real_data)
fake_data = generator(X_train).detach()
fake_valid = discriminator(fake_data)
if args.loss == 'hinge':
loss_dis = torch.mean(nn.ReLU(inplace=True)(1.0 - real_valid)).to(device) + torch.mean(nn.ReLU(inplace=True)(1 + fake_valid)).to(device)
elif args.loss == 'wgangp':
gradient_penalty = compute_gradient_penalty(discriminator, real_data, fake_data.detach(), args.phi)
loss_dis = -torch.mean(real_valid) + torch.mean(fake_valid) + gradient_penalty * 10 / (args.phi ** 2)
elif args.loss == 'wgangp_eps':
gradient_penalty = compute_gradient_penalty(discriminator, real_data, fake_data.detach(), args.phi)
loss_dis = -torch.mean(real_valid) + torch.mean(fake_valid) + gradient_penalty * 10 / (args.phi ** 2)
loss_dis += (torch.mean(real_valid) ** 2) * 1e-3
loss_dis.backward()
optim_dis.step()
if global_steps % n_critic == 0:
optim_gen.zero_grad()
if schedulers:
gen_scheduler, dis_scheduler = schedulers
g_lr = gen_scheduler.step(global_steps)
d_lr = dis_scheduler.step(global_steps)
generated_samples= generator(X_train)
fake_valid = discriminator(generated_samples)
gener_loss = -torch.mean(fake_valid).to(device)
gener_loss.backward()
optim_gen.step()
gen_step += 1
"""
for index, (data, _) in enumerate(y_train): #### X_train ?????
real_data = data.type(torch.cuda.FloatTensor)
optim_dis.zero_gard()
real_valid = discriminator(real_data)
fake_data = generator(X_train).detach()
fake_valid = discriminator(fake_data)
if args.loss == 'hinge':
loss_dis = torch.mean(nn.ReLU(inplace=True)(1.0 - real_valid)).to(device) + torch.mean(nn.ReLU(inplace=True)(1 + fake_valid)).to(device)
elif args.loss == 'wgangp':
gradient_penalty = compute_gradient_penalty(discriminator, real_data, fake_data.detach(), args.phi)
loss_dis = -torch.mean(real_valid) + torch.mean(fake_valid) + gradient_penalty * 10 / (args.phi ** 2)
elif args.loss == 'wgangp_eps':
gradient_penalty = compute_gradient_penalty(discriminator, real_data, fake_data.detach(), args.phi)
loss_dis = -torch.mean(real_valid) + torch.mean(fake_valid) + gradient_penalty * 10 / (args.phi ** 2)
loss_dis += (torch.mean(real_valid) ** 2) * 1e-3
loss_dis.backward()
optim_dis.step()
if global_steps % n_critic == 0:
optim_gen.zero_grad()
if schedulers:
gen_scheduler, dis_scheduler = schedulers
g_lr = gen_scheduler.step(global_steps)
d_lr = dis_scheduler.step(global_steps)
generated_samples= generator(X_train)
fake_valid = discriminator(generated_samples)
gener_loss = -torch.mean(fake_valid).to(device)
gener_loss.backward()
optim_gen.step()
gen_step += 1
"""
def validate(generator, calculate_rmse): #writer_dict,
#writer = writer_dict['writer']
#global_steps = writer_dict['valid_global_steps']
generator = generator.eval(X_test)
y_pred = generator
y_pred = y_pred @ embedder_mod2.components_
pred_test_mod2 = generator
pred_test_mod2 = ad.AnnData(X = y_pred, obs = input_test_mod1.obs, var = input_train_mod2.var)
rmse = calculate_rmse(true_test_mod2, pred_test_mod2)
#fid_score = get_fid(fid_stat, epoch, generator, num_img=5000, val_batch_size=60*2, latent_dim=1024, writer_dict=None, cls_idx=None)
print("RMSE score:", rmse)
#print(f"FID score: {fid_score}")
#writer.add_scalar('FID_score', fid_score, global_steps)
#writer_dict['valid_global_steps'] = global_steps + 1
return rmse
for epoch in range(args.epoch):
lr_schedulers = (gen_scheduler, dis_scheduler) if args.lr_decay else None
train(generator, discriminator, optim_gen, optim_dis,
epoch, lr_schedulers, n_critic = args.n_critic, device="cuda:0")
score = validate(generator, calculate_rmse)
t = time.time()
print('Epoch: {:04d}'.format(epoch+1),
'RMSE: {:.4f}'.format(score),
'time: {:.4f}s'.format(time.time() - t))
score = validate(generator, calculate_rmse)
|
# Allow slice-creator to change their slice even after other users voted for it
# Slices are un-modifiable
# If user changes their slice, create new slice record
# Deduplicate slice-records by content
# Remove the slice-record if votes=0 and fromEditPage=false
# Also allows user to vote for slice that was changed after display
# Also enables search-indexing without changing slice-content
#
# Uses title-only records to help retrieve top titles for budget-results
# Risks transaction failure because title-records are somewhat contended
# Title-record also stores slice-size distribution for slice-votes with that title, to get median size for title
# Import external modules
from collections import Counter, namedtuple
import datetime
from google.appengine.ext import ndb
from google.appengine.api import search
import hashlib
import logging
import re
import time;
# Import app modules
from configBudget import const as conf
import autocomplete.stats as stats
import text
# Constants
conf.MAX_RETRY = 3
conf.MAX_VOTE_RETRY = 3
conf.CHAR_LENGTH_UNIT = 100
conf.MAX_SLICE_SUGGESTIONS = 3
conf.NUM_FREQ_SLICE_SUGGESTIONS = min( 1, conf.MAX_SLICE_SUGGESTIONS - 1 ) # Should be less than MAX_SLICE_SUGGESTIONS
def standardizeContent( content ):
content = text.formTextToStored( content ) if content else None
content = content.strip(' \n\r\x0b\x0c') if content else None # For now keep TAB to delimit slice from reason
return content if content else None
def voteCountToScore( voteCount, title, reason ):
contentLen = ( len(title) if title else 0 ) + ( len(reason) if reason else 0 )
# score = votes per CHAR_LENGTH_UNITs used
unitsUsed = ( float(contentLen) / float(conf.CHAR_LENGTH_UNIT) ) if contentLen >= conf.CHAR_LENGTH_UNIT else 1.0
return float(voteCount) / float(unitsUsed)
# Persistent-record for unique slice-title & reason
class Slice( ndb.Model ):
budgetId = ndb.StringProperty() # To verify slice belongs to budget, and indexed to retrieve popular slices
title = ndb.StringProperty() # Indexed to retrieve popular reasons per title
reason = ndb.StringProperty()
creator = ndb.StringProperty() # Indexed to check slice sum is valid
fromEditPage = ndb.BooleanProperty() # To keep slices from budget-creator only from edit-page
voteCount = ndb.IntegerProperty( default=0 )
sizeToCount = ndb.JsonProperty( default={} ) # map[ size -> vote-count ]
score = ndb.FloatProperty( default=0 )
# For matching input-words to make suggestions
words = ndb.StringProperty( repeated=True )
# Key slices by budgetId+hash(content), to prevent duplicates
# Prevents problem of voting for slice by ID that was deleted (down-voted) between display & vote
@staticmethod
def toKeyId( budgetId, title, reason ):
hasher = hashlib.md5()
if title is None: title = ''
if reason is None: reason = ''
hasher.update( text.utf8(title + '\t' + reason) )
return '{}-{}'.format( budgetId, hasher.hexdigest() )
@staticmethod
def create( budgetId, title, reason, creator=None, fromEditPage=False ):
slice = Slice( id=Slice.toKeyId(budgetId, title, reason),
budgetId=budgetId, title=title, reason=reason, creator=creator, fromEditPage=fromEditPage )
# Index content words
content = ' '.join( [ w for w in [title, reason] if w ] )
words = text.uniqueInOrder( text.removeStopWords( text.tokenize(content) ) )
words = words[ 0 : conf.MAX_WORDS_INDEXED ] # Limit number of words indexed
slice.words = text.tuples( words, maxSize=2 )
return slice
@staticmethod
def get( budgetId, title, reason ):
return Slice.get_by_id( Slice.toKeyId( budgetId, title, reason ) )
def hasTitle( self ):
return self.title and self.title.strip()
def hasTitleAndReason( self ):
return self.title and self.title.strip() and self.reason and self.reason.strip()
def incrementSizeCount( self, size, increment ):
size = str( size ) # JSON-field stores keys as strings
countOld = self.sizeToCount.get( size, 0 )
self.sizeToCount[ size ] = max( 0, countOld + increment ) # Do not allow negative counts
self.sizeToCount = { s:c for s,c in self.sizeToCount.iteritems() if 0 < c } # Filter zeros
def medianSize( self ): return stats.medianKey( self.sizeToCount )
def sumScoreBelowSize( self, size ):
return voteCountToScore( self.countVotesBelowSize(size), self.title, self.reason )
def sumScoreAboveSize( self, size ):
return voteCountToScore( self.countVotesAboveSize(size), self.title, self.reason )
def countVotesBelowSize( self, size ):
resultSum = sum( [ c for s, c in self.sizeToCount.iteritems() if int(s) < size ] )
logging.debug( 'countVotesBelowSize() resultSum=' + str(resultSum) + ' size=' + str(size) + ' sizeToCount=' + str(self.sizeToCount) )
return resultSum
def countVotesAboveSize( self, size ):
resultSum = sum( [ c for s, c in self.sizeToCount.iteritems() if size < int(s) ] )
logging.debug( 'countVotesAboveSize() resultSum=' + str(resultSum) + ' size=' + str(size) + ' sizeToCount=' + str(self.sizeToCount) )
return resultSum
# Persistent-record for unique slice-title
class SliceTitle( ndb.Model ):
budgetId = ndb.StringProperty() # To verify title belongs to budget, and indexed to retrieve popular titles
title = ndb.StringProperty() # Indexed to retrieve popular reasons per title
voteCount = ndb.IntegerProperty( default=0 ) # Equal to sum of sizeToCount, indexed to retrieve top titles
sizeToCount = ndb.JsonProperty( default={} ) # map[ size -> vote-count ]
# Key slices by budgetId+hash(title)
@staticmethod
def toKeyId( budgetId, title ):
hasher = hashlib.md5()
hasher.update( text.utf8(title) )
return '{}:{}'.format( budgetId, hasher.hexdigest() )
@staticmethod
def create( budgetId, title ):
return SliceTitle( id=SliceTitle.toKeyId(budgetId, title), budgetId=budgetId, title=title )
@staticmethod
def get( budgetId, title ):
return SliceTitle.get_by_id( SliceTitle.toKeyId( budgetId, title ) )
def incrementSizeCount( self, size, increment ):
logging.debug( 'SliceTitle.incrementSizeCount() size=' + str(size) + ' increment=' + str(increment) )
size = str( size ) # JSON-field stores keys as strings
countOld = self.sizeToCount.get( size, 0 )
self.sizeToCount[ size ] = max( 0, countOld + increment ) # Do not allow negative counts
self.sizeToCount = { s:c for s,c in self.sizeToCount.iteritems() if 0 < c } # Filter zeros
logging.debug( 'SliceTitle.incrementSizeCount() sizeToCount=' + str(self.sizeToCount) )
def medianSize( self ): return stats.medianKey( self.sizeToCount )
def toDisplay( self, userId ):
return {
'id': str(self.key.id()) ,
'title': self.title ,
'votes': self.voteCount ,
'medianSize': self.medianSize() ,
}
# Record-class for storing budget x user -> all slice-votes for this user
class SliceVotes( ndb.Model ):
# Indexed fields for querying
userId = ndb.StringProperty()
budgetId = ndb.StringProperty()
slices = ndb.JsonProperty( default={} ) # map[ sliceId -> size ]
def slicesTotalSize( self ):
return sum( [size for sliceId, size in self.slices.iteritems()] )
@staticmethod
def toKeyId( budgetId, userId ):
return '{}-{}'.format( budgetId, userId )
@staticmethod
def create( budgetId, userId ):
voteId = SliceVotes.toKeyId( budgetId, userId )
return SliceVotes( id=voteId, userId=userId, budgetId=budgetId )
@staticmethod
def get( budgetId, userId ):
voteId = SliceVotes.toKeyId( budgetId, userId )
return SliceVotes.get_by_id( voteId )
# Returns series[ SliceTitle ]
def retrieveTopSliceTitlesByVotes( budgetId ):
logging.debug( 'retrieveTopSliceTitlesByVotes() budgetId=' + str(budgetId) )
# Retrieve top title-records by vote-count
sliceBatchSize = 20
titleRecords = SliceTitle.query( Slice.budgetId==budgetId ).order( -Slice.voteCount ).fetch( sliceBatchSize )
# Limit titles to amount summing to size=100%
titleRecordsCapped = [ ]
sumSizes = 0
for t in titleRecords:
if t is None: continue
sumSizes += t.medianSize()
if sumSizes > 100: break
titleRecordsCapped.append( t )
return sorted( titleRecordsCapped , key=lambda t:-t.voteCount )
# Returns series[ slice-record ] with top vote-counts
def retrieveTopSliceReasonsByVotes( budgetId, title, maxSlices=10 ):
logging.debug( 'retrieveTopSliceReasonsByVotes() budgetId=' + str(budgetId) )
sliceRecords = Slice.query( Slice.budgetId==budgetId, Slice.title==title ).order( -Slice.voteCount ).fetch( maxSlices )
return sorted( sliceRecords , key=lambda s:-s.voteCount )
# Returns series[ slice-record ] with top scores
def retrieveTopSliceReasonsByScore( budgetId, title, maxSlices=10 ):
logging.debug( 'retrieveTopSlicesByScore() budgetId=' + str(budgetId) + ' maxSlices=' + str(maxSlices) )
sliceRecords = Slice.query( Slice.budgetId==budgetId, Slice.title==title ).order( -Slice.score ).fetch( maxSlices )
return sorted( sliceRecords , key=lambda s:-s.score )
# Returns series[ slice-record ]
def retrieveTopSlicesByScoreForStart( budgetId, sliceStart, hideReasons=False ):
logging.debug(('retrieveTopSlicesByScoreForStart()', 'sliceStart=', sliceStart))
# We always have sliceStart, since we're not pre-populating slice-suggestions, there must always be slice input
sliceRecords = []
inputWords = text.uniqueInOrder( text.removeStopWords( text.tokenize(sliceStart) ) )
logging.debug( 'retrieveTopSlices() inputWords=' + str(inputWords) )
if inputWords and (0 < len(inputWords)):
# Retrieve top-voted slice-records matching last input-word. Results will be collected and match-scored in client.
# Only one inequality filter per query is supported, so cannot require both title and reason are non-null
sliceRecords = Slice.query( Slice.budgetId==budgetId, Slice.words==inputWords[-1] ).order( -Slice.score ).fetch( 1 )
# Retrieve for last input-word-pair
if ( 2 <= len(inputWords) ):
tuple = ' '.join( inputWords[-2:-1] )
sliceRecords += Slice.query( Slice.budgetId==budgetId, Slice.words==tuple ).order( -Slice.score ).fetch( 1 )
logging.debug( 'retrieveTopSlices() sliceRecords=' + str(sliceRecords) )
# Filter out empty title/reason
# There should be no records missing title & reason, since these should not be saveable, and should not word-match
if hideReasons: sliceRecords = filter( lambda s: s.hasTitle() , sliceRecords )
else: sliceRecords = filter( lambda s: s.hasTitleAndReason() , sliceRecords )
return sliceRecords
# sliceContent may be null
# Returns Slice, SliceVote
# If any slice vote increment fails... then undo sliceVote._setVote() and all slice vote increments via transaction
@ndb.transactional(xg=True, retries=conf.MAX_VOTE_RETRY) # Cross-table transaction is ok because vote record (user x slice) is not contended, and slice vote-count record is locking anyway
def vote( budgetId, title, reason, sizeNew, userId ):
logging.debug(('vote()', 'sizeNew=', sizeNew, 'title=', title, 'reason=', reason))
# Store vote for slice
sliceId = Slice.toKeyId( budgetId, title, reason )
logging.debug(('vote()', 'sliceId=', sliceId))
voteRecord = SliceVotes.get( budgetId, userId )
existingVote = (voteRecord is not None) and (voteRecord.slices is not None) and (sliceId in voteRecord.slices)
sizeOld = voteRecord.slices.get(sliceId, 0) if (voteRecord is not None) and (voteRecord.slices is not None) else 0
logging.debug(('vote()', 'sizeOld=', sizeOld, 'existingVote=', existingVote))
logging.debug(('vote()', 'voteRecord=', voteRecord))
if (0 < sizeNew):
if voteRecord is None: voteRecord = SliceVotes.create( budgetId, userId )
voteRecord.slices[ sliceId ] = sizeNew
if ( conf.SLICE_SIZE_SUM_MAX < voteRecord.slicesTotalSize() ):
logging.debug(('vote()', 'voteRecord.slicesTotalSize()=', voteRecord.slicesTotalSize()))
return None, None, False
voteRecord.put()
elif existingVote: # and sizeNew <= 0...
del voteRecord.slices[ sliceId ]
voteRecord.put()
logging.debug(('vote()', 'voteRecord=', voteRecord))
# Increment and store vote aggregates in slice & title-records, during the same transaction
sliceRecord = __incrementSliceVoteCount( sizeOld, sizeNew, budgetId, title, reason )
__incrementTitleVoteCount( sizeOld, sizeNew, budgetId, title )
logging.debug(('vote()', 'sliceRecord=', sliceRecord))
return sliceRecord, voteRecord, True
# Increment vote count, inside another transaction
# Updates or deletes title-record
# Returns updated SliceTitle record, or throws transaction-conflict exception
# Title-record update is more contended than reason-record update
def __incrementTitleVoteCount( sizeOld, sizeNew, budgetId, title ):
logging.debug(('__incrementTitleVoteCount()', 'sizeOld=', sizeOld, 'sizeNew=', sizeNew, 'budgetId=', budgetId, 'title=', title))
voteIncrement = 0
if ( sizeOld == sizeNew ): return
elif ( sizeOld <= 0 ) and ( 0 < sizeNew ): voteIncrement = 1
elif ( 0 < sizeOld ) and ( sizeNew <= 0 ): voteIncrement = -1
logging.debug(('__incrementTitleVoteCount()', 'voteIncrement=', voteIncrement))
# If title-record does not exist...
titleRecord = SliceTitle.get( budgetId, title )
if titleRecord is None:
# Decrement is redundant
if ( sizeNew <= 0 ): return
# Create record to increment
else: titleRecord = SliceTitle.create( budgetId, title )
if budgetId != titleRecord.budgetId: raise ValueError('budgetId != titleRecord.budgetId')
# Update title-record voteCount field
titleRecord.voteCount = max( 0, titleRecord.voteCount + voteIncrement )
logging.debug(('__incrementTitleVoteCount()', 'titleRecord=', titleRecord))
# If title has votes... update title-record
if (0 < titleRecord.voteCount):
# Update title-record fields
titleRecord.incrementSizeCount( sizeOld, -1 )
titleRecord.incrementSizeCount( sizeNew, 1 )
logging.debug(('__incrementTitleVoteCount()', 'overwriting titleRecord=', titleRecord))
# Store title-record
titleRecord.put()
# If title has no votes... delete title-record
else:
logging.debug(('__incrementTitleVoteCount()', 'deleting titleRecord=', titleRecord))
titleRecord.key.delete()
# Increment vote count, inside another transaction
# Updates or deletes slice-record
# Returns updated Slice record, or throws transaction Conflict exception
def __incrementSliceVoteCount( sizeOld, sizeNew, budgetId, title, reason ):
logging.debug(('__incrementSliceVoteCount()', 'sizeOld=', sizeOld, 'sizeNew=', sizeNew, 'budgetId=', budgetId, 'title=', title, 'reason=', reason))
voteIncrement = 0
if ( sizeOld == sizeNew ):
sliceRecord = Slice.get( budgetId, title, reason )
logging.debug(('__incrementSliceVoteCount()', 'sizeOld=sizeNew sliceRecord=', sliceRecord))
elif ( sizeOld <= 0 ) and ( 0 < sizeNew ): voteIncrement = 1
elif ( 0 < sizeOld ) and ( sizeNew <= 0 ): voteIncrement = -1
logging.debug(('__incrementSliceVoteCount()', 'voteIncrement=', voteIncrement))
# If slice-record does not exist...
sliceRecord = Slice.get( budgetId, title, reason )
if sliceRecord is None:
# Decrement is redundant
if ( sizeNew <= 0 ): return None
# Create record to increment
else: sliceRecord = Slice.create( budgetId, title, reason )
if budgetId != sliceRecord.budgetId: raise ValueError('budgetId != sliceRecord.budgetId')
# Update slice-record vote-count field
sliceRecord.voteCount = max( 0, sliceRecord.voteCount + voteIncrement )
logging.debug(('__incrementSliceVoteCount()', 'sliceRecord=', sliceRecord))
# If slice has votes or comes from survey-creator... keep slice record
if (0 < sliceRecord.voteCount) or sliceRecord.fromEditPage:
sliceRecord.incrementSizeCount( sizeOld, -1 )
sliceRecord.incrementSizeCount( sizeNew, 1 )
# Update score field
sliceRecord.score = voteCountToScore( sliceRecord.voteCount, sliceRecord.title, sliceRecord.reason )
logging.debug(('__incrementSliceVoteCount()', 'overwriting sliceRecord=', sliceRecord))
# Store slice-record
sliceRecord.put()
return sliceRecord
# If slice has no votes and not from survey-creator... delete slice-record
else:
logging.debug(('__incrementSliceVoteCount()', 'deleting sliceRecord=', sliceRecord))
sliceRecord.key.delete()
return None
|
# -*- coding: utf-8 -*-
import os
from datetime import date, datetime, timedelta
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.management import call_command
from django.core.management.base import CommandError
import mock
from nose.tools import eq_
import amo
import amo.tests
from addons.models import Addon
from lib.es.management.commands.reindex import flag_database, unflag_database
from users.models import UserProfile
import mkt
from mkt.site.fixtures import fixture
from mkt.webapps.cron import (clean_old_signed, update_app_trending,
update_weekly_downloads)
from mkt.webapps.tasks import _get_trending
from mkt.webapps.models import Installed, Webapp
class TestWeeklyDownloads(amo.tests.TestCase):
fixtures = ['base/users']
def setUp(self):
self.addon = Webapp.objects.create(type=amo.ADDON_WEBAPP)
self.user = UserProfile.objects.get(pk=999)
def get_webapp(self):
return Webapp.objects.get(pk=self.addon.pk)
def add_install(self, addon=None, user=None, created=None):
install = Installed.objects.create(addon=addon or self.addon,
user=user or self.user)
if created:
install.update(created=created)
return install
def test_weekly_downloads(self):
eq_(self.get_webapp().weekly_downloads, 0)
self.add_install()
self.add_install(user=UserProfile.objects.get(pk=10482),
created=datetime.today() - timedelta(days=2))
update_weekly_downloads()
eq_(self.get_webapp().weekly_downloads, 2)
def test_weekly_downloads_flagged(self):
eq_(self.get_webapp().weekly_downloads, 0)
self.add_install()
self.add_install(user=UserProfile.objects.get(pk=10482),
created=datetime.today() - timedelta(days=2))
flag_database('new', 'old', 'alias')
try:
# Should fail.
self.assertRaises(CommandError, update_weekly_downloads)
eq_(self.get_webapp().weekly_downloads, 0)
# Should work with the environ flag.
os.environ['FORCE_INDEXING'] = '1'
update_weekly_downloads()
finally:
unflag_database()
del os.environ['FORCE_INDEXING']
eq_(self.get_webapp().weekly_downloads, 2)
def test_recently(self):
self.add_install(created=datetime.today() - timedelta(days=6))
update_weekly_downloads()
eq_(self.get_webapp().weekly_downloads, 1)
def test_long_ago(self):
self.add_install(created=datetime.today() - timedelta(days=8))
update_weekly_downloads()
eq_(self.get_webapp().weekly_downloads, 0)
def test_addon(self):
self.addon.update(type=amo.ADDON_EXTENSION)
self.add_install()
update_weekly_downloads()
eq_(Addon.objects.get(pk=self.addon.pk).weekly_downloads, 0)
class TestCleanup(amo.tests.TestCase):
def setUp(self):
self.file = os.path.join(settings.SIGNED_APPS_REVIEWER_PATH,
'1', 'x.z')
def test_not_cleaned(self):
storage.open(self.file, 'w')
clean_old_signed()
assert storage.exists(self.file)
def test_cleaned(self):
storage.open(self.file, 'w')
clean_old_signed(-60)
assert not storage.exists(self.file)
@mock.patch('lib.crypto.packaged.sign_app')
class TestSignApps(amo.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.app = Addon.objects.get(id=337141)
self.app.update(is_packaged=True)
self.app2 = amo.tests.app_factory(
name='Mozillaball ょ', app_slug='test',
is_packaged=True, version_kw={'version': '1.0',
'created': None})
self.app3 = amo.tests.app_factory(
name='Test app 3', app_slug='test3', status=amo.STATUS_REJECTED,
is_packaged=True, version_kw={'version': '1.0',
'created': None})
def test_by_webapp(self, sign_mock):
v1 = self.app.get_version()
call_command('sign_apps', webapps=str(v1.pk))
file1 = v1.all_files[0]
assert sign_mock.called_with(((file1.file_path,
file1.signed_file_path),))
def test_all(self, sign_mock):
v1 = self.app.get_version()
v2 = self.app2.get_version()
call_command('sign_apps')
file1 = v1.all_files[0]
file2 = v2.all_files[0]
eq_(len(sign_mock.mock_calls), 2)
eq_(sign_mock.mock_calls[0][1][:2],
(file1.file_path, file1.signed_file_path))
eq_(sign_mock.mock_calls[1][1][:2],
(file2.file_path, file2.signed_file_path))
class TestUpdateTrending(amo.tests.TestCase):
def setUp(self):
self.app = Webapp.objects.create(type=amo.ADDON_WEBAPP)
@mock.patch('mkt.webapps.tasks._get_trending')
def test_trending_saved(self, _mock):
_mock.return_value = 12.0
update_app_trending()
eq_(self.app.get_trending(), 12.0)
for region in mkt.regions.REGIONS_DICT.values():
eq_(self.app.get_trending(region=region), 12.0)
# Test running again updates the values as we'd expect.
_mock.return_value = 2.0
update_app_trending()
eq_(self.app.get_trending(), 2.0)
for region in mkt.regions.REGIONS_DICT.values():
eq_(self.app.get_trending(region=region), 2.0)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_get_trending(self, _mock):
client = mock.Mock()
client.return_value = [
{'count': 133.0, 'date': date(2013, 8, 26)},
{'count': 122.0, 'date': date(2013, 9, 2)},
]
_mock.return_value = client
# 1st week count: 133 + 122 = 255
# Prior 3 weeks get averaged: (133 + 122) / 3 = 85
# (255 - 85) / 85 = 2.0
eq_(_get_trending(self.app.id), 2.0)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_get_trending_threshold(self, _mock):
client = mock.Mock()
client.return_value = [
{'count': 49.0, 'date': date(2013, 8, 26)},
{'count': 50.0, 'date': date(2013, 9, 2)},
]
_mock.return_value = client
# 1st week count: 49 + 50 = 99
# 99 is less than 100 so we return 0.0.
eq_(_get_trending(self.app.id), 0.0)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_get_trending_monolith_error(self, _mock):
client = mock.Mock()
client.side_effect = ValueError
_mock.return_value = client
eq_(_get_trending(self.app.id), 0.0)
|
# Copyright 2020 Maintainers of OarphPy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from oarphpy.util.misc import GPUS_UNRESTRICTED
def tf_create_session_config(restrict_gpus=GPUS_UNRESTRICTED, extra_opts=None):
extra_opts = extra_opts or {}
import tensorflow as tf
config = tf.compat.v1.ConfigProto()
tf_session_config_restrict_gpus(config, restrict_gpus=restrict_gpus)
config.log_device_placement = False
# # Enable CPU XLA!
# config.graph_options.optimizer_options.global_jit_level = \
# tf.OptimizerOptions.ON_1
for k, v in extra_opts.items():
setattr(config, k, v)
return config
def tf_session_config_restrict_gpus(config, restrict_gpus=GPUS_UNRESTRICTED):
if restrict_gpus is GPUS_UNRESTRICTED:
config.allow_soft_placement = True
else:
config.device_count['GPU'] = len(restrict_gpus)
config.gpu_options.visible_device_list = (
','.join(str(g) for g in restrict_gpus))
config.gpu_options.allow_growth = True
def tf_create_session(config=None):
config = config or tf_create_session_config()
import tensorflow as tf
sess = tf.compat.v1.Session(config=config)
return sess
def tf_cpu_session_config():
return tf_create_session_config(restrict_gpus=[])
def tf_cpu_session(config=None):
if not config:
config = tf_cpu_session_config()
else:
tf_session_config_restrict_gpus(config, restrict_gpus=[])
return tf_create_session(config=config)
@contextmanager
def tf_data_session(dataset, sess=None, config=None):
import tensorflow as tf
# Must declare these before the graph gets finalized below
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
# Silly way to iterate over a tf.Dataset
# https://stackoverflow.com/a/47917849
sess = sess or tf_cpu_session()
with sess as sess:
def iter_dataset():
# see MonitoredTrainingSession.StepContext
while True:
try:
# with loop_until_data_exausted():
yield sess.run(next_element)
except (tf.errors.OutOfRangeError, StopIteration):
break
yield sess, iter_dataset
# TPUS don't support strings :P but they do support lists of integers
def to_padded_codepoint_list(s, max_len=5000):
s_enc = s.encode('utf-8')
return list(s_enc[:max_len]) + ([0] * (max_len - len(s_enc)))
def from_padded_codepoint_list(l, null_value=0):
return ''.join(
chr(el) for el in l # May only work in python3?
if (null_value is None or el != null_value))
def give_me_frozen_graph(
checkpoint,
nodes=None,
blacklist=None,
base_graph=None,
sess=None,
saver=None):
"""
Tensorflow has several ways to load checkpoints / graph artifacts.
It's impossible to know if some API is stable or if tomorrow somebody
will invent something new and break everything becaus PyTorch is shiny
(e.g. TF Eager). Sam Abrahams wrote a book on Tensorflow
( https://www.amazon.com/TensorFlow-Machine-Intelligence-hands--introduction-ebook/dp/B01IZ43JV4/ )
and one time couldn't tell me definitively which API to use. What's more is
that freeze_graph.py is an optional script instead of a library module in
Tensorflow. Chaos!!
So, based upon spark-dl's `strip_and_freeze_until()`
( https://github.com/databricks/spark-deep-learning/blob/4daa1179f498df4627310afea291133539ce7001/python/sparkdl/graph/utils.py#L199 ),
here's a utility for getting a frozen, serializable, pyspark-friendly
graph from a checkpoint artifact metagraph thingy I have no idea.
"""
def op_name(v):
name = v
if hasattr(v, 'name'):
name = v.name
if ':' not in name:
return name
toks = name.split(':')
assert len(toks) <= 2, (toks, v, name)
return toks[0]
import tensorflow as tf
graph = base_graph or tf.Graph()
if nodes:
ops = [graph.get_operation_by_name(op_name(n)) for n in nodes]
else:
ops = graph.get_operations()
# if blacklist:
# for n in blacklist:
# ops.remove(graph.get_operation_by_name(op_name(n)))
with graph.as_default():
with (sess or tf_cpu_session()) as sess:
saver = saver or tf.train.Saver()
log.info("Reading from checkpoint %s ..." % checkpoint)
saver.restore(sess, checkpoint)
log.info("... done.")
gdef_frozen = tf.graph_util.convert_variables_to_constants(
sess,
graph.as_graph_def(add_shapes=True),
[op.name for op in ops])
# variable_names_blacklist=blacklist)
return gdef_frozen
def tf_variable_summaries(var, prefix=''):
"""Create Tensorboard summaries showing basic stats of the
variable `var`."""
import tensorflow as tf
if prefix:
prefix = prefix + '/'
else:
def var_name(v):
"""Magic: get the name of the variable that the caller passed to
`tf_variable_summaries()`"""
import inspect
lcls = inspect.stack()[2][0].f_locals
for name in lcls:
if id(v) == id(lcls[name]):
return name
return None
prefix = var_name(var)
if not prefix:
prefix = str(var.name)
idx = prefix.find('/')
if idx >= 0:
prefix = prefix[:prefix.find('/')] # Exclude slashes in var name
idx = prefix.find(':')
if idx >= 0:
prefix = prefix[:prefix.find(':')] # Exclude : too
with tf.variable_scope(prefix):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
class TFSummaryRow(object):
__slots__ = (
'path',
'split',
'step',
'wall_time',
'tag',
'simple_value',
'image',
'tensor',
)
def __init__(self):
self.path = ''
self.split = ''
self.step = -1
self.wall_time = 0
self.tag = ''
self.simple_value = float('nan')
self.image = None
self.tensor = None
@staticmethod
def fill_simple_value(row, summary):
if summary.HasField('simple_value'):
row.simple_value = summary.simple_value
@staticmethod
def fill_image(row, summary):
if summary.HasField('image'):
import imageio
row.image = imageio.imread(summary.image.encoded_image_string)
@staticmethod
def fill_tensor(row, summary):
if summary.HasField('tensor'):
import tensorflow as tf
row.tensor = tf.make_ndarray(summary.tensor)
def as_dict(self):
return dict((k, getattr(self, k)) for k in self.__slots__)
def as_row(self, extra=None):
from pyspark.sql import Row
from au.spark import NumpyArray
d = self.as_dict()
d['image'] = NumpyArray(d['image'])
d['tensor'] = NumpyArray(d['tensor'])
d.update(**(extra or {}))
return Row(**d)
class TFSummaryReader(object):
# Subclass and use this attribute to elide / ignore some summary messages
FILLERS = (
TFSummaryRow.fill_simple_value,
TFSummaryRow.fill_image,
TFSummaryRow.fill_tensor,
)
def __init__(self, paths=None, glob_events_from_dir=None):
self._paths = paths or []
if glob_events_from_dir and os.path.exists(glob_events_from_dir):
self._paths.extend(
pathlib.Path(glob_events_from_dir).rglob('**/events.out*'))
def __iter__(self):
import tensorflow as tf
for path in self._paths:
path = str(path)
log.info("Reading summaries from path %s ..." % path)
split = ''
# TF estimators puts eval summaries in the 'eval' subdir
eval_str = os.pathsep + 'eval' + os.pathsep
if eval_str in path:
split = 'eval'
def iter_events_verbose(path):
# When there's an error in the file, e.g. truncated record, Tensorflow
# doesn't print the path :(
try:
for tf_event in tf.train.summary_iterator(path):
yield tf_event
except Exception as e:
raise Exception(("Error reading file %s" % path, e))
for tf_event in iter_events_verbose(path):
for tf_summary in tf_event.summary.value:
row = TFSummaryRow()
row.path = path
row.split = split
row.wall_time = tf_event.wall_time
row.step = tf_event.step
row.tag = tf_summary.tag
for filler in self.FILLERS:
filler(row, tf_summary)
yield row
class TFRecordsFileAsListOfStrings(object):
"""
Friends Don't Let Friends Use TFRecords.
This utility provides a Tensorflow-free, minimal-dependency solution
for reading TFRecords from a *file stream* (e.g. a buffered reader) and
exposes random access over the stream's records.
Based upon:
* https://github.com/apache/beam/blob/master/sdks/python/apache_beam/io/tfrecordio.py#L67
* https://www.tensorflow.org/versions/r1.11/api_guides/python/python_io#TFRecords_Format_Details
* https://github.com/gdlg/simple-waymo-open-dataset-reader/blob/master/simple_waymo_open_dataset_reader/__init__.py#L19
* https://gist.github.com/ed-alertedh/9f49bfc6216585f520c7c7723d20d951
"""
## Public API
def __init__(self, fileobj):
self.fileobj = fileobj
self._offset_length = None
def __len__(self):
self._maybe_build_index()
return len(self._offset_length)
def __getitem__(self, idx):
if idx >= len(self):
raise IndexError
else:
start, length = self._offset_length[idx]
return self._get_data(start, length)
def __iter__(self):
self.fileobj.seek(0)
for offset, length in self._iter_offset_length():
yield self._get_data(offset, length)
## Utils
@classmethod
def _masked_crc32c(cls, value):
"""Compute a masked crc32c checksum for a value. FMI see
https://www.tensorflow.org/versions/r1.11/api_guides/python/python_io#TFRecords_Format_Details
"""
if not hasattr(cls, '_crc32c_fn'):
import crcmod
cls._crc32c_fn = crcmod.predefined.mkPredefinedCrcFun('crc-32c')
crc = cls._crc32c_fn(value)
return (((crc >> 15) | (crc << 17)) + 0xa282ead8) & 0xffffffff
@classmethod
def _check_crc(cls, value, expected_crc):
crc_actual = cls._masked_crc32c(value)
if crc_actual != expected_crc:
import codecs
raise ValueError(
'Invalid TFRecord, mistmatch: %s %s %s' % (
codecs.encode(value[:10], 'hex'), crc_actual, expected_crc))
def _iter_offset_length(self):
self.fileobj.seek(0)
while True:
header = self.fileobj.read(12)
if header == b'':
break
assert len(header) == 12
import struct
length, lengthcrc = struct.unpack("<QI", header)
self._check_crc(header[:8], lengthcrc)
base = self.fileobj.tell()
yield (base, length)
# Skip over payload and payload CRC
self.fileobj.seek(base + length + 4)
def _maybe_build_index(self):
if self._offset_length is None:
self._offset_length = list(self._iter_offset_length())
self.fileobj.seek(0)
def _get_data(self, start, length):
self.fileobj.seek(start)
payload = self.fileobj.read(length + 4)
assert len(payload) == (length + 4)
import struct
data, datacrc = struct.unpack("<%dsI" % length, payload)
self._check_crc(data, datacrc)
return data
|
"""
The intention of this script is to provide a demonstration of how ConFlowGen is supposed to be used as a library.
It is, by design, a stateful library that persists all input in an SQL database format to enable reproducibility.
The intention of this demo is further explained in the logs it generated.
"""
import datetime
import sys
try:
import conflowgen
print(f"Importing ConFlowGen version {conflowgen.__version__}")
except ImportError:
print("Please first install conflowgen as a library")
sys.exit()
# Start logging
logger = conflowgen.setup_logger()
logger.info("""
####
## Demo Proof of Concept
####
This demo is based on some example data and is meant to show the basic functionality. For a slightly more realistic
example, please check out `demo_DEHAM_CTA.py`. However, computing those numbers also takes longer than quickly running
this small example.
""")
# Pick database
database_chooser = conflowgen.DatabaseChooser()
demo_file_name = "demo_poc.sqlite"
if demo_file_name in database_chooser.list_all_sqlite_databases():
database_chooser.load_existing_sqlite_database(demo_file_name)
else:
database_chooser.create_new_sqlite_database(demo_file_name)
# Set settings
container_flow_generation_manager = conflowgen.ContainerFlowGenerationManager()
container_flow_generation_manager.set_properties(
name="Demo file",
start_date=datetime.datetime.now().date(),
end_date=datetime.datetime.now().date() + datetime.timedelta(days=21)
)
# Add vehicles that frequently visit the terminal.
port_call_manager = conflowgen.PortCallManager()
feeder_service_name = "LX050"
if not port_call_manager.has_schedule(feeder_service_name, vehicle_type=conflowgen.ModeOfTransport.feeder):
logger.info(f"Add feeder service '{feeder_service_name}' to database")
port_call_manager.add_large_scheduled_vehicle(
vehicle_type=conflowgen.ModeOfTransport.feeder,
service_name=feeder_service_name,
vehicle_arrives_at=datetime.date(2021, 7, 9),
vehicle_arrives_at_time=datetime.time(11),
average_vehicle_capacity=800,
average_moved_capacity=100,
next_destinations=[
("DEBRV", 0.4), # 50% of the containers (in boxes) go here...
("RULED", 0.6) # and the other 50% of the containers (in boxes) go here.
]
)
else:
logger.info(f"Feeder service '{feeder_service_name}' already exists")
train_service_name = "JR03A"
if not port_call_manager.has_schedule(train_service_name, vehicle_type=conflowgen.ModeOfTransport.train):
logger.info(f"Add train service '{train_service_name}' to database")
port_call_manager.add_large_scheduled_vehicle(
vehicle_type=conflowgen.ModeOfTransport.train,
service_name=train_service_name,
vehicle_arrives_at=datetime.date(2021, 7, 12),
vehicle_arrives_at_time=datetime.time(17),
average_vehicle_capacity=90,
average_moved_capacity=90,
next_destinations=None # Here we don't have containers that need to be grouped by destination
)
else:
logger.info(f"Train service '{train_service_name}' already exists")
deep_sea_service_name = "LX050"
if not port_call_manager.has_schedule(deep_sea_service_name, vehicle_type=conflowgen.ModeOfTransport.deep_sea_vessel):
logger.info(f"Add deep sea vessel service '{deep_sea_service_name}' to database")
port_call_manager.add_large_scheduled_vehicle(
vehicle_type=conflowgen.ModeOfTransport.deep_sea_vessel,
service_name=deep_sea_service_name,
vehicle_arrives_at=datetime.date(2021, 7, 10),
vehicle_arrives_at_time=datetime.time(19),
average_vehicle_capacity=16000,
average_moved_capacity=150, # for faster demo
next_destinations=[
("ZADUR", 0.3), # 30% of the containers (in boxes) go here...
("CNSHG", 0.7) # and the other 70% of the containers (in boxes) go here.
]
)
else:
logger.info(f"Deep sea service '{deep_sea_service_name}' already exists")
###
# Now, all schedules and input distributions are set up - no further inputs are required
###
logger.info("Preview the results with some light-weight approaches.")
conflowgen.run_all_previews()
logger.info("Generate all fleets with all vehicles. This is the core of the whole project.")
container_flow_generation_manager.generate()
logger.info("The container flow data have been generated, run post-hoc analyses on them")
conflowgen.run_all_analyses()
logger.info("For a better understanding of the data, it is advised to study the logs and compare the preview with the "
"post-hoc analysis results")
logger.info("Start data export...")
# Export important entries from SQL to CSV so that it can be further processed, e.g. by a simulation software
export_container_flow_manager = conflowgen.ExportContainerFlowManager()
export_folder_name = "demo-poc--" + str(datetime.datetime.now()).replace(":", "-").replace(" ", "--").split(".")[0]
export_container_flow_manager.export(
folder_name=export_folder_name + "-csv",
file_format=conflowgen.ExportFileFormat.csv
)
export_container_flow_manager.export(
folder_name=export_folder_name + "-xlsx",
file_format=conflowgen.ExportFileFormat.xlsx
)
# Gracefully close everything
database_chooser.close_current_connection()
logger.info("Demo 'demo_poc' finished successfully.")
|
import pickle
import numpy as np
import pandas as pd
import ssms
import argparse
def none_or_str(value):
print('none_or_str')
print(value)
print(type(value))
if value == 'None':
return None
return value
def none_or_int(value):
print('none_or_int')
print(value)
print(type(value))
#print('printing type of value supplied to non_or_int ', type(int(value)))
if value == 'None':
return None
return int(value)
if __name__ == "__main__":
# Interface ----
CLI = argparse.ArgumentParser()
CLI.add_argument("--my_list",
type = list,
default = None)
# CLI.add_argument('--config_dict_key',
# type = none_or_int,
# default = None)
args = CLI.parse_args()
print(args)
print(args.my_list)
|
from . import base
class Error(base.Event):
pass
class ConnectNotAvailable(Error):
def __str__(self):
return "connect not available."
class VPNClientStateNotConnected(Error):
def __str__(self):
return "vpn client state is not connected"
class ConnectAlreadyInProgress(Error):
def __str__(self):
return "connect already in progress"
|
"""
This problem was asked by Amazon.
Implement a stack that has the following methods:
push(val), which pushes an element onto the stack
pop(), which pops off and returns the topmost element of the stack. If there are no elements in the stack, then it should throw an error or return null.
max(), which returns the maximum value in the stack currently. If there are no elements in the stack, then it should throw an error or return null.
Each method should run in constant time.
"""
class Stack:
def __init__(self):
self._top = -1
self._max = []
self._vals = []
def max(self):
if self._top == -1:
return None
return self._max[self._top]
def push(self, val):
self._vals.append(val)
if self.max() is None or val > self.max():
self._max.append(val)
else:
self._max.append(self.max())
self._top += 1
def pop(self):
if self._top == -1:
return None
del self._max[self._top]
ret = self._vals[self._top]
del self._vals[self._top]
self._top -= 1
return ret
|
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from .config import Config
from .auth import auth
from .models import UserModel
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
# Para cuadno flask_login quiera cargar un usuario
@login_manager.user_loader
def load_user(username):
return UserModel.query(username)
def create_app():
app = Flask(__name__) # Cargar la app en la clase Flask
bootstrap = Bootstrap(app) # Inicialziar bootstrap
app.config.from_object(Config)
login_manager.init_app(app)
app.register_blueprint(auth)
return app
|
"""
Wrapper for `git diff` command.
"""
from textwrap import dedent
from diff_cover.command_runner import CommandError, execute
class GitDiffError(Exception):
"""
`git diff` command produced an error.
"""
class GitDiffTool:
"""
Thin wrapper for a subset of the `git diff` command.
"""
def __init__(self, range_notation, ignore_whitespace):
"""
:param str range_notation:
which range notation to use when producing the diff for committed
files against another branch.
Traditionally in git-cover the symmetric difference (three-dot, "A...M") notation has
been used:
it includes commits reachable from A and M from their merge-base, but not both,
taking history in account.
This includes cherry-picks between A and M, which are harmless and do not produce
changes, but might give inaccurate coverage false-negatives.
Two-dot range notation ("A..M") compares the tips of both trees and produces a diff.
This more accurately describes the actual patch that will be applied by merging A into
M, even if commits have been cherry-picked between branches.
This will produce a more accurate diff for coverage comparison when complex merges and
cherry-picks are involved.
:param bool ignore_whitespace:
Perform a diff but ignore any and all whitespace.
"""
self._range_notation = range_notation
self._default_git_args = [
"git",
"-c",
"diff.mnemonicprefix=no",
"-c",
"diff.noprefix=no",
]
self._default_diff_args = ["diff", "--no-color", "--no-ext-diff", "-U0"]
if ignore_whitespace:
self._default_diff_args.append("--ignore-all-space")
self._default_diff_args.append("--ignore-blank-lines")
def diff_committed(self, compare_branch="origin/main"):
"""
Returns the output of `git diff` for committed
changes not yet in origin/main.
Raises a `GitDiffError` if `git diff` outputs anything
to stderr.
"""
diff_range = "{branch}{notation}HEAD".format(
branch=compare_branch, notation=self._range_notation
)
try:
return execute(
self._default_git_args + self._default_diff_args + [diff_range]
)[0]
except CommandError as e:
if "unknown revision" in str(e):
raise ValueError(
dedent(
f"""
Could not find the branch to compare to. Does '{compare_branch}' exist?
the `--compare-branch` argument allows you to set a different branch.
"""
)
)
raise
def diff_unstaged(self):
"""
Returns the output of `git diff` with no arguments, which
is the diff for unstaged changes.
Raises a `GitDiffError` if `git diff` outputs anything
to stderr.
"""
return execute(self._default_git_args + self._default_diff_args)[0]
def diff_staged(self):
"""
Returns the output of `git diff --cached`, which
is the diff for staged changes.
Raises a `GitDiffError` if `git diff` outputs anything
to stderr.
"""
return execute(self._default_git_args + self._default_diff_args + ["--cached"])[
0
]
def untracked(self):
"""Return the untracked files."""
output = execute(["git", "ls-files", "--exclude-standard", "--others"])[0]
output = output.strip()
return output.split("\n") if output else []
|
from pathlib import Path
import pytest
@pytest.fixture(scope="session")
def tests_dir():
return Path(__file__).parent.resolve()
|
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class OffsetTest( GafferImageTest.ImageTestCase ) :
def testPassThrough( self ) :
c = GafferImage.ImageReader()
c["fileName"].setValue( os.path.dirname( __file__ ) + "/images/checker2x2.exr" )
o = GafferImage.Offset()
o["in"].setInput( c["out"] )
self.assertEqual( o["offset"].getValue(), imath.V2i( 0 ) )
self.assertImageHashesEqual( o["out"], c["out"] )
self.assertImagesEqual( o["out"], c["out"] )
def testDataWindow( self ) :
c = GafferImage.ImageReader()
c["fileName"].setValue( os.path.dirname( __file__ ) + "/images/checker2x2.exr" )
self.assertEqual(
c["out"]["dataWindow"].getValue(),
imath.Box2i( imath.V2i( 0 ), imath.V2i( 2 ) )
)
o = GafferImage.Offset()
o["in"].setInput( c["out"] )
o["offset"].setValue( imath.V2i( 1 ) )
self.assertEqual(
o["out"]["dataWindow"].getValue(),
imath.Box2i( imath.V2i( 1 ), imath.V2i( 3 ) )
)
def testChannelData( self ) :
c = GafferImage.ImageReader()
c["fileName"].setValue( os.path.dirname( __file__ ) + "/images/checker2x2.exr" )
o = GafferImage.Offset()
o["in"].setInput( c["out"] )
o["offset"].setValue( imath.V2i( 1 ) )
def sample( image, channelName, pos ) :
sampler = GafferImage.Sampler( image, channelName, image["dataWindow"].getValue() )
return sampler.sample( pos.x, pos.y )
for yOffset in range( -10, 10 ) :
for xOffset in range( -10, 10 ) :
o["offset"].setValue( imath.V2i( xOffset, yOffset ) )
for y in range( 0, 2 ) :
for x in range( 0, 2 ) :
for channelName in ( "R", "G", "B", "A" ) :
self.assertEqual(
sample( o["out"], channelName, imath.V2i( x + xOffset, y + yOffset ) ),
sample( c["out"], channelName, imath.V2i( x, y ) ),
)
def testMultipleOfTileSize( self ) :
c = GafferImage.ImageReader()
c["fileName"].setValue( os.path.dirname( __file__ ) + "/images/checker.exr" )
o = GafferImage.Offset()
o["in"].setInput( c["out"] )
for offset in [
imath.V2i( 0, 1 ),
imath.V2i( 1, 0 ),
imath.V2i( 2, 0 ),
imath.V2i( 2, 1 ),
imath.V2i( 2, -3 ),
imath.V2i( -2, 3 ),
imath.V2i( -1, -1 ),
] :
offset *= GafferImage.ImagePlug.tileSize()
o["offset"].setValue( offset )
self.assertEqual(
o["out"].channelDataHash( "R", offset ),
c["out"].channelDataHash( "R", imath.V2i( 0 ) ),
)
self.assertEqual(
o["out"].channelData( "R", offset ),
c["out"].channelData( "R", imath.V2i( 0 ) ),
)
def testOffsetBack( self ) :
c = GafferImage.ImageReader()
c["fileName"].setValue( os.path.dirname( __file__ ) + "/images/checker.exr" )
o1 = GafferImage.Offset()
o1["in"].setInput( c["out"] )
o1["offset"].setValue( imath.V2i( 101, -45 ) )
o2 = GafferImage.Offset()
o2["in"].setInput( o1["out"] )
o2["offset"].setValue( imath.V2i( -101, 45 ) )
self.assertImagesEqual( c["out"], o2["out"] )
def testChannelDataDirtyPropagation( self ) :
c = GafferImage.Constant()
o = GafferImage.Offset()
o["in"].setInput( c["out"] )
cs = GafferTest.CapturingSlot( o.plugDirtiedSignal() )
c["color"]["r"].setValue( 0.5 )
self.assertTrue( o["out"]["channelData"] in { x[0] for x in cs } )
def testDataWindowDirtyPropagation( self ) :
c = GafferImage.Constant()
o = GafferImage.Offset()
o["in"].setInput( c["out"] )
cs = GafferTest.CapturingSlot( o.plugDirtiedSignal() )
c["format"].setValue( GafferImage.Format( 100, 100 ) )
self.assertTrue( o["out"]["dataWindow"] in { x[0] for x in cs } )
def testOffsetEmpty( self ) :
c = GafferImage.Text()
c["text"].setValue( "" )
self.assertTrue( c["out"]["dataWindow"].getValue().isEmpty() )
o = GafferImage.Offset()
o["in"].setInput( c["out"] )
o["offset"].setValue( imath.V2i( 100, 100 ) )
self.assertTrue( o["out"]["dataWindow"].getValue().isEmpty() )
o["offset"].setValue( imath.V2i( -100, -100 ) )
self.assertTrue( o["out"]["dataWindow"].getValue().isEmpty() )
if __name__ == "__main__":
unittest.main()
|
from typing import Optional
from anyio import to_thread
from github import Github
from github.Label import Label
from github.Repository import Repository
from Shared.functions.readSettingsFile import get_setting
_REPO: Optional[Repository] = None
_LABELS: Optional[list[Label]] = None
async def get_github_repo() -> Optional[Repository]:
"""Returns the GitHub api repo object"""
global _REPO
if not _REPO:
if get_setting("GITHUB_APPLICATION_API_KEY") and get_setting("GITHUB_REPOSITORY_ID"):
github_api = Github(get_setting("GITHUB_APPLICATION_API_KEY"))
# run those in a thread with anyio since they are blocking
_REPO = await to_thread.run_sync(github_api.get_repo, int(get_setting("GITHUB_REPOSITORY_ID")))
return _REPO
async def get_github_labels() -> Optional[list[Label]]:
"""Returns the GitHub labels that should be used on the issue"""
global _LABELS
if not _LABELS and get_setting("GITHUB_ISSUE_LABEL_NAMES"):
repo = await get_github_repo()
if repo:
_LABELS = []
for label_name in get_setting("GITHUB_ISSUE_LABEL_NAMES"):
# run those in a thread with anyio since they are blocking
label = await to_thread.run_sync(repo.get_label, label_name)
_LABELS.append(label)
return _LABELS
|
import json
import pandas as pd
import sqlite3
conn = sqlite3.connect('hackernews.sqlite3')
curs = conn.cursor()
query = '''SELECT author, count(author), sentiment
FROM comments
GROUP BY author
HAVING count(author) >= 50
ORDER BY sentiment
LIMIT 10'''
curs.execute(query)
results = curs.fetchall()
df = pd.DataFrame(results, columns=["author", "post count", "sentiment"])
d = df.to_dict()
with open('topsalties.json', 'w') as file:
json.dump(d, file)
|
import os
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # for ignoring the some of tf warnings
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from sklearn.model_selection import train_test_split
from src.CNN_classification import network
from src.CNN_classification import utils
def train(X, y,
random_state=42,
test_size=0.20,
stage_train_dir='.',
patience=10,
epochs=10,
batch_size=None,
dropout_rate=0.2,
dump_model_summary=True,
set_lr_scheduler=True,
set_checkpoint=True,
set_earlystopping=True,
set_tensorboard=False,
dump_history=True,
save_model=True,
**kwargs
):
N, L = X.shape[0], X.shape[1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size,
random_state=random_state,
) #stratify=y
K = len(np.unique(y_train))
# design the architecture of model
model = network.create_model((L, L, 1), K, dropout_rate=dropout_rate)
# compile the model
opt = tf.keras.optimizers.Adam(learning_rate=1e-4)
model.compile(optimizer=opt,
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
# Callbacks
callbacks = []
if set_lr_scheduler:
lr_scheduler = tf.keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5)
callbacks += [lr_scheduler]
if set_checkpoint:
checkpoint_file = utils.make_path(stage_train_dir, "ckpt-best.h5")
checkpoint_cb = ModelCheckpoint(checkpoint_file,
save_best_only=True,
monitor='val_loss',
save_weights_only=False)
callbacks += [checkpoint_cb]
if set_earlystopping:
early_stopping_cb = EarlyStopping(patience=patience, restore_best_weights=True)
callbacks += [early_stopping_cb]
if set_tensorboard:
tensor_board = TensorBoard(log_dir=stage_train_dir)
callbacks += [tensor_board]
# print model info
if dump_model_summary:
utils.print_model_summary(model, stage_train_dir)
# training the model
history = model.fit(X_train, y_train,
validation_data=(X_test, y_test),
callbacks=callbacks,
epochs=epochs,
batch_size=batch_size)
if save_model:
model.save(utils.make_path(stage_train_dir, 'saved-model.h5'))
if dump_history:
utils.write_numpy_dic_to_json(history.history,
utils.make_path(stage_train_dir, 'history.json'))
loss_test, accuracy_test = model.evaluate(X_test, y_test, verbose=0)
print('loss_test={:.3f}, accuracy_test={:.3f}'.format(loss_test, accuracy_test))
"""loaded_model = tf.keras.models.load_model(make_path(stage_train_dir, 'saved-model.h5'))
loss_test, accuracy_test = loaded_model.evaluate(X_test, y_test, verbose=0)
print('loaded_model_loss_test={:.3f}, loaded_model_accuracy_test={:.3f}'.format(loss_test, accuracy_test))"""
return model, history
#====================================================================
|
from setuptools import setup, find_packages
setup(
name = 'port_dashboard',
version = '1.0',
packages = find_packages(),
package_data = {
'': ['LICENSE', 'README.md'],
},
# PyPI metadata
description = 'generate a port to-do list from C preprocessor macros',
author = 'Michael Labbe',
author_email = 'mike@frogtoss.com',
url = 'https://github.com/mlabbe/port_dashboard',
# keywords = ['logging', 'debug', 'release'],
)
|
import scipy
def sparseCheck(features, labels):
features = features
labels = labels
"""
Takes features and labels as input and checks if any of those is sparse csr_matrix.
"""
try:
print('Checking for Sparse Matrix [*]\n')
if scipy.sparse.issparse(features[()]):
print('Converting Sparse Features to array []\n')
features = features[()].toarray()
print(
'Conversion of Sparse Features to array Done [', u'\u2713', ']\n')
elif scipy.sparse.issparse(labels[()]):
print('Converting Sparse Labels to array []\n')
labels = labels[()].toarray()
print(
'Conversion of Sparse Labels to array Done [', u'\u2713', ']\n')
else:
print('No Sparse Matrix Found')
except Exception as error:
# print('Sparse matrix Check failed with KeyError: ', error)
pass
return (features, labels)
|
{%- if cookiecutter.add_graphiql_route == "yes" -%}
from .graphiql import handle_graphiql
{% endif -%}
from .graphql import handle_graphql
{%- if cookiecutter.add_health_routes == "yes" %}
from .health import (
handle_live as handle_health_live,
handle_ready as handle_health_ready,
)
{%- endif %}
__all__ = (
"handle_graphql",
{%- if cookiecutter.add_graphiql_route == "yes" %}
"handle_graphiql",
{%- endif %}
{%- if cookiecutter.add_health_routes == "yes" %}
"handle_health_live",
"handle_health_ready",
{%- endif %}
)
|
from datetime import datetime
from cred.database import db
class Event(db.Model):
__tablename__ = 'event'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(240))
location = db.Column(db.String(240))
action = db.Column(db.String(240))
value = db.Column(db.String(240))
time = db.Column(db.DateTime, default=datetime.utcnow)
client_id = db.Column(db.Integer, db.ForeignKey('client.id'))
client = db.relationship(
'Client',
backref=db.backref('events', lazy='dynamic'))
def __init__(self, client, name, location, action, value, time=None):
self.client = client
self.name = name
self.location = location
self.action = action
self.value = value
if time is not None:
self.time = time
def __repr__(self):
return '<ID %r, Name %r, Location %r, Action %r, Value %r, Time %r>' % (
self.id,
self.name,
self.location,
self.action,
self.value,
self.time
)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Top Block
# Generated: Wed Feb 14 21:28:23 2018
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import sip
import sys
from gnuradio import qtgui
class top_block(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Top Block")
Qt.QWidget.__init__(self)
self.setWindowTitle("Top Block")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "top_block")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 32000
self.c4 = c4 = (1,1,1,1,-1,-1,-1,1,-1,-1,-1,-1,1,1,-1,1,1,-1,-1)
self.c3 = c3 = (1,1,1,-1,-1,-1,1,-1,1,1,-1)
self.c2 = c2 = (1,1,1,-1,-1,1,-1)
self.c1 = c1 = (1,-1)
##################################################
# Blocks
##################################################
self.qtgui_time_sink_x_0 = qtgui.time_sink_f(
1024, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.10)
self.qtgui_time_sink_x_0.set_y_axis(-2, 2)
self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(False)
self.qtgui_time_sink_x_0.enable_grid(False)
self.qtgui_time_sink_x_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_time_sink_x_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_time_sink_x_0_win)
self.blocks_vector_source_x_0_0_0_0 = blocks.vector_source_s(c4, True, 1, [])
self.blocks_vector_source_x_0_0_0 = blocks.vector_source_s(c3, True, 1, [])
self.blocks_vector_source_x_0_0 = blocks.vector_source_s(c2, True, 1, [])
self.blocks_vector_source_x_0 = blocks.vector_source_s(c1, True, 1, [])
self.blocks_short_to_float_0 = blocks.short_to_float(1, 1)
self.blocks_and_xx_0 = blocks.and_ss()
##################################################
# Connections
##################################################
self.connect((self.blocks_and_xx_0, 0), (self.blocks_short_to_float_0, 0))
self.connect((self.blocks_short_to_float_0, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.blocks_vector_source_x_0, 0), (self.blocks_and_xx_0, 0))
self.connect((self.blocks_vector_source_x_0_0, 0), (self.blocks_and_xx_0, 1))
self.connect((self.blocks_vector_source_x_0_0_0, 0), (self.blocks_and_xx_0, 2))
self.connect((self.blocks_vector_source_x_0_0_0_0, 0), (self.blocks_and_xx_0, 3))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "top_block")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.qtgui_time_sink_x_0.set_samp_rate(self.samp_rate)
def get_c4(self):
return self.c4
def set_c4(self, c4):
self.c4 = c4
self.blocks_vector_source_x_0_0_0_0.set_data(self.c4, [])
def get_c3(self):
return self.c3
def set_c3(self, c3):
self.c3 = c3
self.blocks_vector_source_x_0_0_0.set_data(self.c3, [])
def get_c2(self):
return self.c2
def set_c2(self, c2):
self.c2 = c2
self.blocks_vector_source_x_0_0.set_data(self.c2, [])
def get_c1(self):
return self.c1
def set_c1(self, c1):
self.c1 = c1
self.blocks_vector_source_x_0.set_data(self.c1, [])
def main(top_block_cls=top_block, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 11 10:31:47 2021
@author: nd
"""
import sys
import re
from .build_struts import *
# add file names here
## origin file
#file_in = sys.argv[1]
## output file
file_out = "_output.txt"
# read in file
def file_read(file_in):
lines = open(file_in, "r").readlines()
return lines
# outputs file
def file_output(file_out_list):
# sec_strut_output, helices, b_sheets, loops, gaps
out_type = ['secStrut', 'helix', 'sheet', 'loop', 'gap']
for i, j in zip(out_type, file_out_list):
convert_list = [str(x) for x in j]
wrt = open(i + file_out, "w")
for line in ','.join(convert_list):
wrt.write(line)
# extracts needed residue information
def extract_info(in_file):
new_list = []
for line in in_file:
line_s = line.split()
line_seg = line[16:35].split()
try:
line_s[1] = int(line_s[1])
new_line_s = [line_s[0], line_s[2], line_s[1], line[16:25], line_seg[-2], line_seg[-1][:-1], float(line_s[-3]), float(line_s[-2]), float(line_s[-1])]
new_list.append(new_line_s)
except:
continue
return new_list
# Extracts helix residue information and builds a list of helices
# lists info for each alph carbon in each helix
def helix(in_list):
helix_list, temp_helix, scraps, temp = [], [], [], []
for res in (in_list):
try:
int(res[1]) == False
except:
if "H" in res[3] or "I" in res[3] or "G" in res[3]:
temp_helix.append(res)
else:
scraps.append(res)
for res in temp_helix:
if temp == []:
temp.append([res[1], res[2], res[-3], res[-2], res[-1]])
elif res[2] == temp[-1][1] + 1:
temp.append([res[1], res[2], res[-3], res[-2], res[-1]])
else:
helix_list.append(temp)
temp = []
temp.append([res[1], res[2], res[-3], res[-2], res[-1]])
helix_list.append(temp)
return helix_list, scraps
# Extracts strand residue information
def sheet_loop(in_list):
temp_sheet, new_sheet = [], []
# builds a list of strand residues
# and directly hydrogen bonded strand residues
for strand in in_list:
temp_strand = []
for res in strand:
if res[3] not in temp_strand and bool(re.search("0[a-zA-Z]+", res[3])) == False:
temp_strand.append(res[3])
if res[4] not in temp_strand and bool(re.search("0[a-zA-Z]+", res[4])) == False:
temp_strand.append(res[4])
if res[5] not in temp_strand and bool(re.search("0[a-zA-Z]+", res[5])) == False:
temp_strand.append(res[5])
temp_sheet.append(temp_strand)
# groups strands into sheets
new_sheet.append(temp_sheet[0])
for strand in temp_sheet:
if strand in new_sheet:
continue
elif strand not in new_sheet:
for strand_l in new_sheet:
if len(set(strand).difference(strand_l)) < len(strand):
a = list(dict.fromkeys(strand + strand_l))
new_sheet.append(a)
new_sheet.remove(strand_l)
break
elif len(set(strand).difference(strand_l)) == len(strand) and strand_l == new_sheet[-1]:
new_sheet.append(strand)
else:
continue
return new_sheet
# Extracts strand residue information
# Builds a list of sheets
def sheets(in_list):
strand_list, temp_sheet, scraps, temp = [], [], [], []
for res in (in_list):
try:
int(res[1]) == False
except:
if "E" in res[3]:
temp_sheet.append(res)
else:
scraps.append(res)
for res in temp_sheet:
if temp == []:
temp.append([res[0], res[1], res[2], str(res[0])+res[1], str(res[4])+res[1], res[5]+res[1], res[-3], res[-2], res[-1]])
elif res[2] == temp[-1][2] + 1:
temp.append([res[0], res[1], res[2], str(res[0])+res[1], str(res[4])+res[1], res[5]+res[1], res[-3], res[-2], res[-1]])
else:
strand_list.append(temp)
temp = []
temp.append([res[0], res[1], res[2], str(res[0])+res[1], str(res[4])+res[1], res[5]+res[1], res[-2], res[-1]])
strand_list.append(temp)
# converts list of strands to a list of sheets
sheet_list = sheet_loop(strand_list)
# extracts positional info for sheet residues
out_list = []
for sheet in sheet_list:
out_sheet_t = []
for res in sheet:
for item in temp_sheet:
if int(res[:-1]) == int(item[0]):
out_sheet_t.append(item[1:3]+item[-3:])
out_list.append(out_sheet_t)
return out_list, scraps
# extracts loops and gaps
def disordered(full_list, scrap_list):
gaps, loops, loop = [], [], []
# finds gaps
i = full_list[0][2]
item_k = False
for item in full_list:
item_l = [item[1], item[2], item[-3], item[-2], item[-1]]
if item_l[1] == i:
continue
elif item_l[1] != i+1 and item_l[0] == item_k[0]:
gaps.append([item_k, item_l])
i = item_l[1]
item_k = item_l
#finds loops
j = scrap_list[0][2]
for item in scrap_list:
item_l = [item[1], item[2], item[-3], item[-2], item[-1]]
if item_l[1] == j:
loop.append(item_l)
j = item_l[1]
elif j == scrap_list[-1] and len(loop) > 0:
loops.append(loop)
j = item_l[1]
elif item_l[1] == j+1:
loop.append(item_l)
j = item_l[1]
elif item_l[1] > j+1:
loops.append(loop)
loop = []
loop.append(item_l)
j = item_l[1]
elif item_l[1] < j+1 and len(loop) > 0:
loops.append(loop)
loop = []
loop.append(item_l)
j = item_l[1]
else:
loop = []
loop.append(item_l)
j = item_l[1]
# finds loop ends
loop_ends = []
for i in loops:
loop_ends.append([i[0], i[-1]])
return loop_ends, gaps
# Main fxn
def main_sec_strut(file_in):
file_list_1 = file_read(file_in)
extract = extract_info(file_list_1)
helices, scraps = helix(extract)
b_sheets, leftovers = sheets(scraps)
loops, gaps = disordered(extract, leftovers)
sec_strut_output = helices + b_sheets
file_output([sec_strut_output, helices, b_sheets, loops, gaps])
secondary_struts(helices,b_sheets)
disorganized_struts(helices, b_sheets, loops)
gap_struts(gaps)
|
#
# Test solvers give the same output
#
import pybamm
import numpy as np
import unittest
import liionpack as lp
class TestSolvers(unittest.TestCase):
def test_consistent_results_1_step(self):
Rsmall = 1e-6
netlist = lp.setup_circuit(
Np=1, Ns=1, Rb=Rsmall, Rc=Rsmall, Ri=5e-2, V=4.0, I=1.0
)
# PyBaMM parameters
chemistry = pybamm.parameter_sets.Chen2020
parameter_values = pybamm.ParameterValues(chemistry=chemistry)
# Cycling experiment
experiment = pybamm.Experiment(
[("Discharge at 1 A for 100 s or until 3.3 V",)] * 1, period="10 s"
)
# Solve pack with casadi
a = lp.solve(
netlist=netlist,
parameter_values=parameter_values,
experiment=experiment,
inputs=None,
nproc=1,
manager="casadi"
)
# Solve pack with dask
b = lp.solve(
netlist=netlist,
parameter_values=parameter_values,
experiment=experiment,
inputs=None,
nproc=1,
manager="dask"
)
# Solve pack with ray
c = lp.solve(
netlist=netlist,
parameter_values=parameter_values,
experiment=experiment,
inputs=None,
nproc=1,
manager="ray"
)
v_a = a["Terminal voltage [V]"]
v_b = b["Terminal voltage [V]"]
v_c = c["Terminal voltage [V]"]
assert np.allclose(v_a, v_b)
assert np.allclose(v_b, v_c)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python2.7
# -*- coding: UTF-8 -*-
from sys import stdin
from collections import defaultdict
import re
def main():
list = []
for l in stdin: # every line in the input
text = l.decode('utf8') # decode from utf-8 encoded string
text = text.rstrip('\n') # strip newline from the end of the line
words = text.split("\t")
list.append(words)
# To return a new list, use the sorted() built-in function...
#orderedList = sorted(list, key=lambda x: x[1], reverse=True)
for word, stat in sorted(list,
key=lambda x: x[1], reverse=True):
print ("{0}\t{1}".format(word.encode('utf-8'),stat))
# if the script is called directly (as opposed to being imported)
# call the main function.
# This prevents it from being run when this module is imported
if __name__ == '__main__':
main()
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
return self.depth(root) != -1
def depth(self, root):
if not root:
return 0
left = self.depth(root.left)
if left == -1:
return -1
right = self.depth(root.right)
if right == -1:
return -1
return max(left, right) + 1 if abs(left - right) < 2 else -1
|
from datetime import (
datetime
)
from pydantic import (
BaseModel
)
from typing import (
Optional,
List,
)
__all__ = [
"Company",
]
class Value(BaseModel):
value: str
enum_id: Optional[int] = None
enum_code: Optional[str] = None
class CustomFieldValue(BaseModel):
field_id: int
field_name: str
field_code: Optional[str] = None
field_type: str
values: List[Value]
class Company(BaseModel):
id: int
name: str
responsible_user_id: int
created_at: datetime
updated_at: datetime
group_id: int
created_by: int
updated_by: int
is_deleted: bool
custom_fields_values: Optional[List[CustomFieldValue]]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic:
利用generator模仿tail -f www.log | grep "python"
对变化的日志文件持续查看含有python的行
Desc :
"""
import time
__author__ = 'Xiong Neng'
def tail(f):
f.seek(0, 2) # 移动到EOF
while True:
line = f.readline()
if not line:
time.sleep(0.2)
continue
yield line
def grep(lines, search_text):
for line in lines:
if search_text in line: yield line
def my_tail_search(filename, keyword):
wwwlog = tail(open(filename))
pylines = grep(wwwlog, keyword)
for line in pylines:
print(line)
def main():
pass
|
from ibis.tests.expr.mocks import BaseMockConnection
class MockImpalaConnection(BaseMockConnection):
@property
def dialect(self):
from ibis.backends.impala.compiler import ImpalaDialect
return ImpalaDialect
def _build_ast(self, expr, context):
from ibis.backends.impala.compiler import build_ast
return build_ast(expr, context)
|
##Here we plot distributrions of how many individuals were correct for each states.
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import gaussian_kde
pal = sns.diverging_palette(10, 220, sep=80, n=5,l=40,center='light')
pal2 = sns.diverging_palette(10, 220, sep=80, n=5,l=40,center='dark')
pal[2] = pal2[2]
def ilogit(x):
return 1/(1+np.exp(-x))
def plot_figure1a(true_data, false_data):
#Basic figure paramters
sns.set_context('paper', font_scale=1.5)
#Plot distributions, adjust legend etc...
sns.distplot(true_data.groupby(['states']).mean()['correct_start'],hist_kws=dict(histtype='stepfilled',alpha=.9,ec="k"),
color='white',bins=np.linspace(0,1,10),label='True',kde=False)
sns.distplot(false_data.groupby(['states']).mean()['correct_start'],hist_kws=dict(histtype='stepfilled',alpha=.8,ec="k"),
color='grey',bins=np.linspace(0,1,10),label='False',kde=False)
plt.yticks(np.linspace(0,25,6))
plt.xlim(0,1)
plt.xlabel('Proportion correct')
plt.ylabel('Count')
#Save figure
plt.tight_layout()
def joint_hpdi(samples_extracted):
for idx in range(5):
x = samples_extracted['alpha_p'][:,idx]
y = samples_extracted['beta_p'][:,idx]
k = gaussian_kde(np.vstack([x, y]))
xi, yi = np.mgrid[x.min():x.max():x.size**0.5*1j,y.min():y.max():y.size**0.5*1j]
zi = k(np.vstack([xi.flatten(), yi.flatten()]))
#set zi to 0-1 scale
zi = (zi-zi.min())/(zi.max() - zi.min())
zi =zi.reshape(xi.shape)
#set up plot
origin = 'lower'
levels = [.11,1]
CS = plt.contourf(xi, yi, zi,levels = levels,
shade=True,
linewidths=(1,),
alpha=.5,
colors=[pal[idx], pal[idx]],
origin=origin)
plt.xlabel('Intercept')
plt.ylabel('Effect of \nconfidence')
plt.ylim(-1.5,1.5)
plt.xlim(-1,1)
plt.xticks(np.linspace(-1.5,1.5,5))
plt.xticks(np.linspace(-1.5,1.5,5))
def plot_figure1b(samples_extracted,stan_data_logistic):
x = np.linspace(np.min(stan_data_logistic['x']),np.max(stan_data_logistic['x']),10)
for idx in range(5):
y = np.array([samples_extracted['alpha_p'][:,idx] + samples_extracted['beta_p'][:,idx] * item for item in x])
y = ilogit(y)
cis = np.percentile(y, q=[5.5,94.5],axis=1)
plt.plot(50*(x/2+.5)+50, np.mean(y, axis=1),color=pal[idx])
plt.fill_between(50*(x/2+.5)+50, cis[0,:], cis[1,:],alpha=.3,color=pal[idx])
plt.ylim(.2,.8)
plt.xlim(50,100)
plt.ylabel('Accuracy')
plt.xlabel('Reported confidence')
def plot_fig1cd(stan_model_data, df, samples, correct=True):
x = np.linspace(-.5, .5, 100)
x_transformed = (x+.5)*100
for idx in range(5):
avg_conf = np.mean(stan_model_data['confidence'][df['pol_recode']==idx+1])
y = np.array([ilogit(samples['alpha_p'][:,idx] + \
samples['b_conf_p'][:,idx] * avg_conf +\
samples['b_socConf_p'][:,idx] * item) for item in x])
if correct:
plt.plot(x_transformed, np.mean(y,axis=1),color=pal[idx])
ci = np.percentile(y, axis=1, q=[5.5,94.5])
plt.fill_between(x_transformed, ci[0], ci[1], color=pal[idx],alpha=.3)
else:
plt.plot(x_transformed[::-1], np.mean(y,axis=1),color=pal[idx])
ci = np.percentile(y, axis=1, q=[5.5,94.5])
plt.fill_between(x_transformed[::-1], ci[0], ci[1], color=pal[idx],alpha=.3)
plt.ylabel('Probability of switching')
plt.ylim(0,1)
plt.xlim(0,100)
plt.xlabel('Social disagreement')
def plot_switch_predicted_acuracy(data, switch_samples, correct=True):
extracted_switch_samples_correct = switch_samples.extract(['alpha_p',
'b_conf_p',
'b_socConf_p',
'yhat'])
correct_data = data[data['correct_start']==correct]
pal[2] = pal2[2]
sns.set_context('paper', font_scale=1.5)
correct_data['yhat'] = np.mean(extracted_switch_samples_correct['yhat'],axis=0)
grouped = correct_data.groupby(['pol_recode']).mean().reset_index()
plt.scatter(grouped['yhat'], grouped['switched'],color=pal,s=100)
plt.plot([0,1], [0,1], ls='--', color='black')
plt.ylim(0.15, 0.4)
plt.xlim(0.15, 0.4)
plt.yticks(np.linspace(.15,.4,6))
plt.yticks(np.linspace(.15,.4,6))
plt.xlabel('Predicted \nswitching')
plt.ylabel('Observed \nswitching')
np.percentile(extracted_switch_samples_correct['yhat'],axis=1, q=[5.5, 94.5])
|
class Solution(object):
def XXX(self, root):
if not root:
return 0
return max(self.XXX(root.left), self.XXX(root.right)) + 1
|
from bs4 import BeautifulSoup
import requests
import time
def get_movie_budgets( page_index):
r = requests.get('https://www.the-numbers.com/movie/budgets/all/%s'% (page_index,))
html_doc = r.content
soup = BeautifulSoup(html_doc, 'html.parser')
rows = [tr for tr in soup.find_all('tr') if len(tr) == 13 ]
return [(row.contents[4].a.string.encode('utf-8').strip(), row.contents[6].string.encode('utf-8').strip(), row.contents[10].string.encode('utf-8').strip()) for row in rows]
def write_movie_data( filename="gross.dat", max_page=120):
page_index_list = [(100*i)+1 for i in range(max_page)]
for index in page_index_list:
time.sleep(4)
movies = get_movie_budgets( str(index))
file = open(filename,'a')
for m in movies:
file.write('{}|{}|{}\n'.format(*m))
file.close()
if __name__ == '__main__':
write_movie_data()
|
class Transformer(object):
def __init__(self, pc=None):
self.pc = pc
def transform_jump(self, cond, true_path, false_path, **kwargs):
return None
def transform_ret(self, ret_value, **kwargs):
return None
def transform_branch(self, cond, true_path, false_path, **kwargs):
return None
def we_are_not_transformed(kind):
return True
|
# Generated by Django 3.2 on 2021-05-01 08:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='category',
old_name='website_category',
new_name='store_category',
),
]
|
# coding=UTF-8
#------------------------------------------------------------------------------
# Copyright (c) 2007-2016, Acoular Development Team.
#------------------------------------------------------------------------------
# separate file to find out about version without importing the acoular lib
__author__ = "Acoular developers"
__date__ = "11 May 2016"
__version__ = "16.5"
|
# Copyright 2000 - 2013 NeuStar, Inc.All rights reserved.
# NeuStar, the Neustar logo and related names and logos are registered
# trademarks, service marks or tradenames of NeuStar, Inc. All other
# product names, company names, marks, logos and symbols may be trademarks
# of their respective owners.
__author__ = 'Jon Bodner'
import connection
import json
import urllib
class RestApiClient:
def __init__(self, username, password, use_http=False, host="restapi.ultradns.com"):
"""Initialize a Rest API Client.
Arguments:
username -- The username of the user
password -- The password of the user
Keyword Arguments:
use_http -- For internal testing purposes only, lets developers use http instead of https.
host -- Allows you to point to a server other than the production server.
"""
self.rest_api_connection = connection.RestApiConnection(use_http, host)
self.rest_api_connection.auth(username, password)
# Zones
# create a primary zone
def create_primary_zone(self, account_name, zone_name):
"""Creates a new primary zone.
Arguments:
account_name -- The name of the account that will contain this zone.
zone_name -- The name of the zone. It must be unique.
"""
zone_properties = {"name": zone_name, "accountName": account_name, "type": "PRIMARY"}
primary_zone_info = {"forceImport": True, "createType": "NEW"}
zone_data = {"properties": zone_properties, "primaryCreateInfo": primary_zone_info}
return self.rest_api_connection.post("/v1/zones", json.dumps(zone_data))
# list zones for account
def get_zones_of_account(self, account_name, q=None, **kwargs):
"""Returns a list of zones for the specified account.
Arguments:
account_name -- The name of the account.
Keyword Arguments:
q -- The search parameters, in a dict. Valid keys are:
name - substring match of the zone name
zone_type - one of:
PRIMARY
SECONDARY
ALIAS
sort -- The sort column used to order the list. Valid values for the sort field are:
NAME
ACCOUNT_NAME
RECORD_COUNT
ZONE_TYPE
reverse -- Whether the list is ascending(False) or descending(True)
offset -- The position in the list for the first returned element(0 based)
limit -- The maximum number of rows to be returned.
"""
uri = "/v1/accounts/" + account_name + "/zones"
params = build_params(q, kwargs)
return self.rest_api_connection.get(uri, params)
# get zone metadata
def get_zone_metadata(self, zone_name):
"""Returns the metadata for the specified zone.
Arguments:
zone_name -- The name of the zone being returned.
"""
return self.rest_api_connection.get("/v1/zones/" + zone_name)
# delete a zone
def delete_zone(self, zone_name):
"""Deletes the specified zone.
Arguments:
zone_name -- The name of the zone being deleted.
"""
return self.rest_api_connection.delete("/v1/zones/"+zone_name)
# RRSets
# list rrsets for a zone
def get_rrsets(self, zone_name, q=None, **kwargs):
"""Returns the list of RRSets in the specified zone.
Arguments:
zone_name -- The name of the zone.
Keyword Arguments:
q -- The search parameters, in a dict. Valid keys are:
ttl - must match the TTL for the rrset
owner - substring match of the owner name
value - substring match of the first BIND field value
sort -- The sort column used to order the list. Valid values for the sort field are:
OWNER
TTL
TYPE
reverse -- Whether the list is ascending(False) or descending(True)
offset -- The position in the list for the first returned element(0 based)
limit -- The maximum number of rows to be returned.
"""
uri = "/v1/zones/" + zone_name + "/rrsets"
params = build_params(q, kwargs)
return self.rest_api_connection.get(uri, params)
# list rrsets by type for a zone
# q The query used to construct the list. Query operators are ttl, owner, and value
def get_rrsets_by_type(self, zone_name, rtype, q=None, **kwargs):
"""Returns the list of RRSets in the specified zone of the specified type.
Arguments:
zone_name -- The name of the zone.
rtype -- The type of the RRSets. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
Keyword Arguments:
q -- The search parameters, in a dict. Valid keys are:
ttl - must match the TTL for the rrset
owner - substring match of the owner name
value - substring match of the first BIND field value
sort -- The sort column used to order the list. Valid values for the sort field are:
OWNER
TTL
TYPE
reverse -- Whether the list is ascending(False) or descending(True)
offset -- The position in the list for the first returned element(0 based)
limit -- The maximum number of rows to be returned.
"""
uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype
params = build_params(q, kwargs)
return self.rest_api_connection.get(uri,params)
# create an rrset
def create_rrset(self, zone_name, rtype, owner_name, ttl, rdata):
"""Creates a new RRSet in the specified zone.
Arguments:
zone_name -- The zone that will contain the new RRSet. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
ttl -- The TTL value for the RRSet.
rdata -- The BIND data for the RRSet as a string.
If there is a single resource record in the RRSet, you can pass in the single string.
If there are multiple resource records in this RRSet, pass in a list of strings.
"""
if type(rdata) is not list:
rdata = [rdata]
rrset = {"ttl": ttl, "rdata": rdata}
return self.rest_api_connection.post("/v1/zones/"+zone_name+"/rrsets/"+rtype+"/"+owner_name, json.dumps(rrset))
# edit an rrset(PUT)
def edit_rrset(self, zone_name, rtype, owner_name, ttl, rdata):
"""Updates an existing RRSet in the specified zone.
Arguments:
zone_name -- The zone that contains the RRSet. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
ttl -- The updated TTL value for the RRSet.
rdata -- The updated BIND data for the RRSet as a string.
If there is a single resource record in the RRSet, you can pass in the single string.
If there are multiple resource records in this RRSet, pass in a list of strings.
"""
if type(rdata) is not list:
rdata = [rdata]
rrset = {"ttl": ttl, "rdata": rdata}
uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name
return self.rest_api_connection.put(uri,json.dumps(rrset))
# delete an rrset
def delete_rrset(self, zone_name, rtype, owner_name):
"""Deletes an RRSet.
Arguments:
zone_name -- The zone containing the RRSet to be deleted. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
"""
return self.rest_api_connection.delete("/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name)
# Accounts
# get account details for user
def get_account_details(self):
"""Returns a list of all accounts of which the current user is a member."""
return self.rest_api_connection.get("/v1/accounts")
# Version
# get version
def version(self):
"""Returns the version of the REST API server."""
return self.rest_api_connection.get("/v1/version")
# Status
# get status
def status(self):
"""Returns the status of the REST API server."""
return self.rest_api_connection.get("/v1/status")
def build_params(q, args):
params = {}
params.update(args)
if q is not None:
params.update(q)
return params
|
import visitors.visitor as visitor
from pipeline import State
from cl_ast import *
class FormatVisitor(State):
def __init__(self, sname, fname):
super().__init__(sname)
self.fname = fname
def run(self, ast):
printed_ast = self.visit(ast)
f = open(self.fname, 'w')
f.write(printed_ast)
f.close()
return ast
# Visitor for each node
@visitor.on('node')
def visit(self, node, tabs):
ans = '\t' * tabs + f'\\__{node.__class__.__name__}'
return ans
@visitor.when(ProgramNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__ProgramNode ({node.row},{node.col}) [<class> ... <class>]'
statements = '\n'.join(self.visit(child, tabs + 1) for child in node.declarations)
return f'{ans}\n{statements}'
@visitor.when(ClassDeclarationNode)
def visit(self, node, tabs=0):
parent = '' if node.parent is None else f": {node.parent}"
ans = '\t' * tabs + f'\\__ClassDeclarationNode ({node.row},{node.col}): class {node.id} {parent} {{ <feature> ... <feature> }}'
features = '\n'.join(self.visit(child, tabs + 1) for child in node.features)
return f'{ans}\n{features}'
@visitor.when(AttrDeclarationNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__AttrDeclarationNode ({node.row},{node.col}): {node.id} : {node.type}'
return f'{ans}'
@visitor.when(VarDeclarationNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__VarDeclarationNode ({node.row},{node.col}): {node.id} : {node.type} = <expr>'
expr = self.visit(node.expr, tabs + 1)
return f'{ans}\n{expr}'
@visitor.when(FuncDeclarationNode)
def visit(self, node, tabs=0):
params = ', '.join(':'.join(param) for param in node.params)
ans = '\t' * tabs + f'\\__FuncDeclarationNode ({node.row},{node.col}): {node.id}({params}) : {node.type} -> <body>'
body = f'{self.visit(node.body, tabs + 1)}'
# body = f'\n{self.visit(node.body, tabs + 1)}'.join(self.visit(child, tabs + 1) for child in node.body)
return f'{ans}\n{body}'
@visitor.when(ConstantNode)
def visit(self, node, tabs=0):
return '\t' * tabs + f'\\__ ({node.row},{node.col}){node.__class__.__name__}: {node.lex}'
@visitor.when(BinaryOperationNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__<expr> ({node.row},{node.col}){node.__class__.__name__} <expr>'
left = self.visit(node.left, tabs + 1)
right = self.visit(node.right, tabs + 1)
return f'{ans}\n{left}\n{right}'
@visitor.when(UnaryOperationNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__ ({node.row},{node.col}){node.__class__.__name__} <expr>'
expr = self.visit(node.expr, tabs + 1)
return f'{ans}\n{expr}'
@visitor.when(AssignNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__AssignNode ({node.row},{node.col}): {node.id} <- <expr>'
expr = self.visit(node.expr, tabs + 1)
return f'{ans}\n{expr}'
@visitor.when(WhileNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__{node.__class__.__name__}: while ({node.row},{node.col}) <cond> loop <expr> pool'
cond = self.visit(node.cond, tabs + 1)
expr = self.visit(node.expr, tabs + 1)
return f'{ans}\n{cond}\n{expr}'
@visitor.when(ConditionalNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__ {node.__class__.__name__}: if ({node.row},{node.col}) <cond> then <expr> else <expr> fi'
cond = self.visit(node.cond, tabs + 1)
stm = self.visit(node.stm, tabs + 1)
else_stm = self.visit(node.else_stm, tabs + 1)
return f'{ans}\n{cond}\n{stm}\n{else_stm}'
@visitor.when(CaseNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__ {node.__class__.__name__}: case ({node.row},{node.col}) <expr> of <case-list> esac'
expr = self.visit(node.expr, tabs + 1)
case_list = '\n'.join(self.visit(child, tabs + 1) for child in node.case_list)
return f'{ans}\n{expr}\n{case_list}'
@visitor.when(OptionNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__ {node.__class__.__name__}: {node.id} : {node.typex} ({node.row},{node.col}) -> <expr>'
expr = self.visit(node.expr, tabs + 1)
return f'{ans}\n{expr}'
@visitor.when(BlockNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__ {node.__class__.__name__} ({node.row},{node.col})' + '{ <expr_list> }'
expr = '\n'.join(self.visit(child, tabs + 1) for child in node.expr_list)
return f'{ans}\n{expr}'
@visitor.when(NewNode)
def visit(self, node, tabs=0):
return '\t' * tabs + f'\\__ NewNode ({node.row},{node.col}): new {node.type}()'
@visitor.when(VariableNode)
def visit(self, node, tabs=0):
return '\t' * tabs + f'\\__ VarAccessNode ({node.row},{node.col}): {node.id}'
@visitor.when(LetNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__ {node.__class__.__name__} let ({node.row},{node.col}) <init_list> in <expr>'
init_list = '\n'.join(self.visit(arg, tabs + 1) for arg in node.init_list)
expr = self.visit(node.expr, tabs + 1)
return f'{ans}\n{init_list}\n{expr}'
@visitor.when(ParentCallNode)
def visit(self, node, tabs=0):
obj = self.visit(node.obj, tabs + 1)
ans = '\t' * tabs + f'\\__ParentCallNode ({node.row},{node.col}) : <obj>@{node.type}.{node.id}(<expr>, ..., <expr>)'
args = '\n'.join(self.visit(arg, tabs + 1) for arg in node.args)
return f'{ans}\n{obj}\n{args}'
@visitor.when(ExprCallNode)
def visit(self, node, tabs=0):
obj = self.visit(node.obj, tabs + 1)
ans = '\t' * tabs + f'\\__ExprCallNode ({node.row},{node.col}) : <obj>.{node.id}(<expr>, ..., <expr>)'
args = '\n'.join(self.visit(arg, tabs + 1) for arg in node.args)
return f'{ans}\n{obj}\n{args}'
@visitor.when(SelfCallNode)
def visit(self, node, tabs=0):
ans = '\t' * tabs + f'\\__SelfCallNode ({node.row},{node.col}) : {node.id}(<expr>, ..., <expr>)'
args = '\n'.join(self.visit(arg, tabs + 1) for arg in node.args)
return f'{ans}\n{args}'
|
import requests
import math
import os
import time
import lxml.html
PROXY_TXT_API = 'https://www.proxyscan.io/api/proxy?type=https&format=txt&uptime=100'
PLATFORM = os.name
def get_proxy():
'''
Get proxy (str) from API.
'''
proxy = requests.get(PROXY_TXT_API).text
return proxy.rstrip()
def convert_size(size_bytes):
'''
Convert from bytes to human readable sizes (str).
'''
# https://stackoverflow.com/a/14822210
if size_bytes == 0:
return '0 B'
size_name = ('B', 'KB', 'MB', 'GB', 'TB')
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return '%s %s' % (s, size_name[i])
def download_speed(bytes_read, start_time):
'''
Convert speed to human readable speed (str).
'''
if bytes_read == 0:
return '0 B/s'
elif time.time()-start_time == 0:
return '- B/s'
size_name = ('B/s', 'KB/s', 'MB/s', 'GB/s', 'TB/s')
bps = bytes_read/(time.time()-start_time)
i = int(math.floor(math.log(bps, 1024)))
p = math.pow(1024, i)
s = round(bps / p, 2)
return '%s %s' % (s, size_name[i])
def get_link_info(url):
'''
Get file name and size.
Returns list: [File Name, Downloaded Size]
'''
try:
r = requests.get(url)
html = lxml.html.fromstring(r.content)
if html.xpath('//*[@id="pass"]'):
return ['Private File', '- MB']
name = html.xpath('//td[@class=\'normal\']')[0].text
size = html.xpath('//td[@class=\'normal\']')[2].text
return [name, size]
except:
return None
|
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
digitBitmap = {0: 0b00111111, 1: 0b00000110, 2: 0b01011011, 3: 0b01001111, 4: 0b01100110,
5: 0b01101101, 6: 0b01111101, 7: 0b00000111, 8: 0b01111111, 9: 0b01100111}
masks = {'a': 0b00000001, 'b': 0b00000010, 'c': 0b00000100,
'd': 0b00001000, 'e': 0b00010000, 'f': 0b00100000, 'g': 0b01000000}
pins = {'a': 17, 'b': 22, 'c': 6, 'd': 13, 'e': 19, 'f': 27, 'g': 5}
def renderChar(c):
val = digitBitmap[c]
GPIO.output(list(pins.values()), GPIO.LOW)
for k, v in masks.items():
if val & v == v:
GPIO.output(pins[k], GPIO.HIGH)
try:
GPIO.setup(list(pins.values()), GPIO.OUT)
GPIO.output(list(pins.values()), GPIO.LOW)
val = 0
while True:
print("Printing " + str(val))
renderChar(val)
val = 0 if val == 9 else (val + 1)
time.sleep(1)
except KeyboardInterrupt:
print("Goodbye")
finally:
GPIO.cleanup()
|
import tkinter as tk
class SimpleTableInput(tk.Frame):
def __init__(self, parent, rows, columns):
tk.Frame.__init__(self, parent)
self._entry = {}
self.rows = rows
self.columns = columns
# register a command to use for validation
vcmd = (self.register(self._validate), "%P")
# create the table of widgets
for row in range(self.rows):
for column in range(self.columns):
index = (row, column)
e = tk.Entry(self, validate="key", validatecommand=vcmd)
e.grid(row=row, column=column, stick="nsew")
self._entry[index] = e
# adjust column weights so they all expand equally
for column in range(self.columns):
self.grid_columnconfigure(column, weight=1)
# designate a final, empty row to fill up any extra space
self.grid_rowconfigure(rows, weight=1)
def get(self):
'''Return a list of lists, containing the data in the table'''
result = []
for row in range(self.rows):
current_row = []
for column in range(self.columns):
index = (row, column)
current_row.append(self._entry[index].get())
result.append(current_row)
return result
def _validate(self, P):
'''Perform input validation.
Allow only an empty value, or a value that can be converted to a float
'''
if P.strip() == "":
return True
try:
f = float(P)
except ValueError:
self.bell()
return False
return True
class Example(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.table = SimpleTableInput(self, 3, 4)
self.submit = tk.Button(self, text="Submit", command=self.on_submit)
self.table.pack(side="top", fill="both", expand=True)
self.submit.pack(side="bottom")
def on_submit(self):
print(self.table.get())
root = tk.Tk()
Example(root).pack(side="top", fill="both", expand=True)
root.mainloop()
|
from __future__ import absolute_import
from __future__ import unicode_literals
from couchexport.export import get_writer
class WorkBook(object):
_undefined = '---'
@property
def undefined(self):
return self._undefined
def __init__(self, file, format):
self._headers = {}
self.writer = get_writer(format.slug)
self.file = file
self.writer.open((), file)
def open(self, table_name, headers):
self.writer.add_table(table_name, headers)
self._headers[table_name] = headers
def write_row(self, table_name, row):
headers = self._headers[table_name]
for key in row:
if key not in headers:
raise AttributeError()
self.writer.write_row(table_name, [row.get(h) for h in headers])
def close(self):
self.writer.close()
|
from django_elasticsearch_dsl_drf.serializers import DocumentSerializer
from rest_framework import serializers
from radical_translations.core.documents import ResourceDocument
class ResourceDocumentSerializer(DocumentSerializer):
highlight = serializers.SerializerMethodField()
class Meta:
document = ResourceDocument
def get_highlight(self, obj):
if hasattr(obj.meta, "highlight"):
return obj.meta.highlight.__dict__["_d_"]
return {}
class SimpleResourceDocumentSerializer(DocumentSerializer):
class Meta:
document = ResourceDocument
fields = [
"id",
"title",
"date_display",
"year",
"places",
"is_original",
"is_translation",
"form_genre",
]
|
from afterpay.exceptions.afterpay_error import AfterpayError
class ServerError(AfterpayError):
"""
A common cause of this response from PUT/POST endpoints is that the request body is missing or empty.
"""
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Yuki Furuta <furushchev@jsk.imi.i.u-tokyo.ac.jp>
import chainer
import chainer.links as L
import chainer.functions as F
from ... import links
class DeepEpisodicMemoryDecoder(chainer.Chain):
"""Deep Episodic Memory Decoder"""
def __init__(self, num_episodes, in_channels=None, dropout=None):
if in_channels is None:
in_channels = 1000
if dropout is None:
dropout = 0.1
super(DeepEpisodicMemoryDecoder, self).__init__(
fc_lstm=links.ConvolutionLSTM2D(
in_channels, in_channels, 1, pad=0),
fc_deconv=L.Deconvolution2D(
in_channels, 64, 4, stride=1, pad=0),
#
lstm1=links.ConvolutionLSTM2D(64, 64, 3),
lstm_norm1=links.LayerNormalization(),
deconv1=L.Deconvolution2D(
64, 64, 3, stride=2, pad=1, outsize=(8, 8)),
deconv_norm1=links.LayerNormalization(),
#
lstm2=links.ConvolutionLSTM2D(64, 64, 3),
lstm_norm2=links.LayerNormalization(),
deconv2=L.Deconvolution2D(
64, 64, 3, stride=2, pad=1, outsize=(16, 16)),
deconv_norm2=links.LayerNormalization(),
#
lstm3=links.ConvolutionLSTM2D(64, 32, 3),
lstm_norm3=links.LayerNormalization(),
deconv3=L.Deconvolution2D(
32, 32, 5, stride=2, pad=2, outsize=(32, 32)),
deconv_norm3=links.LayerNormalization(),
#
lstm4=links.ConvolutionLSTM2D(32, 32, 5),
lstm_norm4=links.LayerNormalization(),
deconv4=L.Deconvolution2D(
32, 32, 5, stride=2, pad=2, outsize=(64, 64)),
deconv_norm4=links.LayerNormalization(),
#
lstm5=links.ConvolutionLSTM2D(32, 32, 5),
lstm_norm5=links.LayerNormalization(),
deconv5=L.Deconvolution2D(
32, 3, 5, stride=2, pad=2, outsize=(128, 128)),
)
self.in_channels = in_channels
self.dropout = dropout
self.num_episodes = num_episodes
def reset_state(self):
for link in self.links():
if link != self and hasattr(link, "reset_state"):
link.reset_state()
def __call__(self, x):
xp = self.xp
assert x.ndim == 4, "%s != 4" % (x.ndim) # B,2C,1,1
assert x.shape[1] == self.in_channels * 2
assert x.shape[2] == x.shape[3] == 1
c0, h0 = F.split_axis(x, 2, axis=1)
self.fc_lstm.set_state(c0, h0)
l0 = h0
outputs = []
for i in range(self.num_episodes):
l0 = self.fc_lstm(l0)
#
d0 = self.fc_deconv(F.dropout(l0, ratio=self.dropout))
d0 = F.relu(d0)
#
l1 = self.lstm1(F.dropout(d0, ratio=self.dropout))
l1 = self.lstm_norm1(l1)
#
d1 = self.deconv1(F.dropout(l1, ratio=self.dropout))
d1 = F.relu(self.deconv_norm1(d1))
#
l2 = self.lstm2(F.dropout(d1, ratio=self.dropout))
l2 = self.lstm_norm2(l2)
#
d2 = self.deconv2(F.dropout(l2, ratio=self.dropout))
d2 = F.relu(self.deconv_norm2(d2))
#
l3 = self.lstm3(F.dropout(d2, ratio=self.dropout))
l3 = self.lstm_norm3(l3)
#
d3 = self.deconv3(F.dropout(l3, ratio=self.dropout))
d3 = F.relu(self.deconv_norm3(d3))
#
l4 = self.lstm4(F.dropout(d3, ratio=self.dropout))
l4 = self.lstm_norm4(l4)
#
d4 = self.deconv4(F.dropout(l4, ratio=self.dropout))
d4 = F.relu(self.deconv_norm4(d4))
#
l5 = self.lstm5(F.dropout(d4, ratio=self.dropout))
l5 = self.lstm_norm5(l5)
#
o = self.deconv5(F.dropout(l5, ratio=self.dropout))
o = F.relu(o)
#
o = o[:, None, :, :, :]
outputs.append(o) # <- B1CHW
outputs = F.concat(outputs, axis=1) # BNCHW
return outputs
|
import shutil
import pytest
from jupytext import read
from jupytext.cli import jupytext
from .utils import requires_ir_kernel, requires_nbconvert, skip_on_windows
@requires_nbconvert
@skip_on_windows
def test_pipe_nbconvert_execute(tmpdir):
tmp_ipynb = str(tmpdir.join("notebook.ipynb"))
tmp_py = str(tmpdir.join("notebook.py"))
with open(tmp_py, "w") as fp:
fp.write(
"""1 + 2
"""
)
jupytext(
args=[
tmp_py,
"--to",
"ipynb",
"--pipe-fmt",
"ipynb",
"--pipe",
"jupyter nbconvert --stdin --stdout --to notebook --execute",
]
)
nb = read(tmp_ipynb)
assert len(nb.cells) == 1
assert nb.cells[0].outputs[0]["data"] == {"text/plain": "3"}
@requires_nbconvert
@skip_on_windows
def test_pipe_nbconvert_execute_sync(tmpdir):
tmp_ipynb = str(tmpdir.join("notebook.ipynb"))
tmp_py = str(tmpdir.join("notebook.py"))
with open(tmp_py, "w") as fp:
fp.write(
"""1 + 2
"""
)
jupytext(
args=[
tmp_py,
"--set-formats",
"py,ipynb",
"--sync",
"--pipe-fmt",
"ipynb",
"--pipe",
"jupyter nbconvert --stdin --stdout --to notebook --execute",
]
)
nb = read(tmp_ipynb)
assert len(nb.cells) == 1
assert nb.cells[0].outputs[0]["data"] == {"text/plain": "3"}
@requires_nbconvert
@skip_on_windows
def test_execute(tmpdir, caplog, capsys):
tmp_ipynb = str(tmpdir.join("notebook.ipynb"))
tmp_py = str(tmpdir.join("notebook.py"))
with open(tmp_py, "w") as fp:
fp.write(
"""1 + 2
"""
)
jupytext(args=[tmp_py, "--to", "ipynb", "--execute"])
nb = read(tmp_ipynb)
assert len(nb.cells) == 1
assert nb.cells[0].outputs[0]["data"] == {"text/plain": "3"}
@requires_nbconvert
def test_execute_readme_ok(tmpdir):
tmp_md = str(tmpdir.join("notebook.md"))
with open(tmp_md, "w") as fp:
fp.write(
"""
A readme with correct instructions
```python
1 + 2
```
"""
)
jupytext(args=[tmp_md, "--execute"])
@requires_nbconvert
@skip_on_windows
def test_execute_readme_not_ok(tmpdir):
tmp_md = str(tmpdir.join("notebook.md"))
with open(tmp_md, "w") as fp:
fp.write(
"""
A readme with incorrect instructions (a is not defined)
```python
a + 1
```
"""
)
import nbconvert
with pytest.raises(
nbconvert.preprocessors.execute.CellExecutionError, match="is not defined"
):
jupytext(args=[tmp_md, "--execute"])
@requires_nbconvert
@skip_on_windows
def test_execute_sync(tmpdir, caplog, capsys):
tmp_ipynb = str(tmpdir.join("notebook.ipynb"))
tmp_py = str(tmpdir.join("notebook.py"))
with open(tmp_py, "w") as fp:
fp.write(
"""1 + 2
"""
)
jupytext(args=[tmp_py, "--set-formats", "py,ipynb", "--sync", "--execute"])
nb = read(tmp_ipynb)
assert len(nb.cells) == 1
assert nb.cells[0].outputs[0]["data"] == {"text/plain": "3"}
@requires_nbconvert
@requires_ir_kernel
@skip_on_windows
def test_execute_r(tmpdir, caplog, capsys): # pragma: no cover
tmp_ipynb = str(tmpdir.join("notebook.ipynb"))
tmp_md = str(tmpdir.join("notebook.md"))
with open(tmp_md, "w") as fp:
fp.write(
"""```r
1 + 2 + 3
```
"""
)
jupytext(args=[tmp_md, "--to", "ipynb", "--execute"])
nb = read(tmp_ipynb)
assert len(nb.cells) == 1
assert nb.cells[0].outputs[0]["data"]["text/markdown"] == "6"
@requires_nbconvert
@skip_on_windows
def test_execute_in_subfolder(tmpdir, caplog, capsys):
subfolder = tmpdir.mkdir("subfolder")
tmp_csv = str(subfolder.join("inputs.csv"))
tmp_py = str(subfolder.join("notebook.py"))
tmp_ipynb = str(subfolder.join("notebook.ipynb"))
with open(tmp_csv, "w") as fp:
fp.write("1\n2\n")
with open(tmp_py, "w") as fp:
fp.write(
"""import ast
with open('inputs.csv') as fp:
text = fp.read()
sum(ast.literal_eval(line) for line in text.splitlines())
"""
)
jupytext(args=[tmp_py, "--to", "ipynb", "--execute"])
nb = read(tmp_ipynb)
assert len(nb.cells) == 3
assert nb.cells[2].outputs[0]["data"] == {"text/plain": "3"}
tmp2_py = str(tmpdir.mkdir("another_folder").join("notebook.py"))
tmp2_ipynb = str(tmpdir.join("another_folder", "notebook.ipynb"))
shutil.copy(tmp_py, tmp2_py)
# Executing without run-path fails
import nbconvert
with pytest.raises(
nbconvert.preprocessors.execute.CellExecutionError,
match="No such file or directory: 'inputs.csv'",
):
jupytext(args=[tmp2_py, "--to", "ipynb", "--execute"])
# Raise if folder does not exists
with pytest.raises(ValueError, match="is not a valid path"):
jupytext(args=[tmp2_py, "--to", "ipynb", "--run-path", "wrong_path"])
# Execute in full path
jupytext(args=[tmp2_py, "--to", "ipynb", "--run-path", str(subfolder)])
nb = read(tmp2_ipynb)
assert len(nb.cells) == 3
assert nb.cells[2].outputs[0]["data"] == {"text/plain": "3"}
# Execute in path relative to notebook dir
jupytext(args=[tmp2_py, "--to", "ipynb", "--run-path", "../subfolder"])
nb = read(tmp2_ipynb)
assert len(nb.cells) == 3
assert nb.cells[2].outputs[0]["data"] == {"text/plain": "3"}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.