text stringlengths 8 6.05M |
|---|
import argparse
import os
from tensorflow.contrib.learn.python.learn.utils import (
saved_model_export_utils)
from tensorflow.contrib.training.python.training import hparam
# ---------------------------------------------------
# Library used for loading a file from Google Storage
# ---------------------------------------------------
from tensorflow.python.lib.io import file_io
# ---------------------------------------------------
# Library used for uploading a file to Google Storage
# ---------------------------------------------------
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import tensorflow as tf
import matplotlib as mpl
mpl.use('agg')
import os
import matplotlib.pyplot as plt
import csv
import numpy as np
from sklearn.preprocessing import normalize
from sklearn.cluster import KMeans
def MinMaxScaler(data):
''' Min Max Normalization
Parameters, Returns
----------
data : numpy.ndarray
input data to be normalized
shape: [Batch size, dimension]
'''
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
# noise term prevents the zero division
return numerator / (denominator + 1e-7)
def load_series(filename):
filename = filename[0] #filename : route (--train-files <route>)
try:
with file_io.FileIO(filename, mode='r') as csvfile:
print("===in load_series function, fileIO===")
csvreader = csv.reader(csvfile)
data = [row for row in csvreader if len(row) > 0]
return data
except IOError:
return None
def sampling_data(data, window_size, slide_len):
# This function is for time series data sampling
# leg_size : Length of splited data
# window_size : Sliding window size
samples = [] #3차원 데이터가 될듯
for i in range(0,len(data),slide_len):
sample = np.copy(data[i:i+window_size])
if len(sample) != window_size:
continue
samples.append(sample)
#print(np.array(samples).shape)
return samples
def reconstruct(data, window, clusterer):
# reconstruct data with centeroid of clusterer
# data is input data
# window is window function that make (input data's starting point and ending point) to be zero
# Clusterer : scikit-learn Cluster model
print("====in reconstruct function====")
window_len = len(window)
slide_len = int(window_len/2)
[n, e] = data.shape
print(data.shape)
# data spliting with sliding lenght window_len/2 ( some datas are overlapping )
segments = sampling_data(data, window_len, slide_len)
print("====segments shape in reconstruct====")
print(np.array(segments).shape) #3차원 데이터가 될듯
reconstructed_data = np.zeros((len(data), e))
# find nearest centroid among clusters for reconstruction
for segment_n, segment in enumerate(segments):
segment *= window
segment = np.reshape(segment,(1, (window_len*e) ))
nearest_match_idx = clusterer.predict(segment)[0] #sample 한개 넣었으니 [0]
nearest_match = np.copy(clusterer.cluster_centers_[nearest_match_idx])
if(segment_n < 10):
print(np.array(nearest_match).shape)
print(window_len*e)
nearest_match = np.reshape(nearest_match, (window_len, e))
pos = segment_n*slide_len
if(pos+(window_len) < len(data)):
reconstructed_data[pos:pos+(window_len)] += nearest_match
return reconstructed_data
def run_experiment(hparams):
data = load_series(hparams.train_files)
print("=====run experiment=====")
#data가 string의 list라 float형의 np.array로 casting 해준다
data = np.array(data)
data = np.delete(data, (0), axis=0)
data = data.astype(float)
#print(data)
#standardization
xy = MinMaxScaler(data)
#xy = data
#x = xy[:,0:-1]
#build a dataset
print("========data building started========")
data_X = np.array(xy)
#normalization
#data_X = normalize(data_X, axis=0, norm='l2')
#train/test split
print("=====train/test split started=====")
train_size = int(len(data_X)*0.8)
train_X, test_X = np.array(data_X[0:train_size]), np.array(data_X[train_size:len(data_X)])
#train_Y, test_Y = np.array(data_Y[0:train_size]), np.array(data_Y[train_size:len(data_X)])
#hyperparameter
window_len = 10
n_clusters = 150
# weight function is sin^2
weight_rads = np.linspace(0,np.pi,window_len)
weight_func = np.sin(weight_rads)**2
weight_func = np.reshape(weight_func, (-1,1))
#print(weight_func)
#print(weight_func.shape)
#sampling
print("========segmenting started========")
segments = sampling_data(train_X,window_len,2)
######여기 까지 생각함
print("========segments shape========")
print(np.array(segments).shape)
weighted_segments = []
for segment in segments:
segment *= weight_func
weighted_segments.append(segment)
weighted_segments = np.array(weighted_segments)
print(weighted_segments.shape) # 3차원? (?,10,21)
[n,w,e] = weighted_segments.shape
k = w*e
weighted_segments = np.reshape(weighted_segments, (n, k));
print(weighted_segments.shape) #2차원? (?,10*21)
weighted_segments = weighted_segments.tolist()
#Clustering
print("========modeling(clustering) started========")
clusterer = KMeans(n_clusters=n_clusters)
clusterer.fit(weighted_segments)
#Reconstructing
print("========predict(reconstruct) started========")
reconstruction = reconstruct(test_X,window,clusterer)
#Anomaly 검출
print("========Checking anomaly started========")
error = reconstruction - test_X
mse = np.mean(error**2, axis=1)
print("========saving graph started========")
#plotting, file 바로 저장
'''plt.figure()
#n_plot_samples = 500
plt.plot(test_X[:,0],label="os_0") #파
plt.plot(test_X[:,1],label="os_1") #주
plt.plot(test_X[:,2],label="os_2") #초
plt.plot(test_X[:,3],label="os_9") #
plt.plot(test_X[:,4],label="stat_6") #
plt.plot(test_X[:,5],label="stat_7") #
plt.plot(test_X[:,6],label="stat_8") #
plt.plot(test_X[:,7],label="stat_19") #
plt.plot(test_X[:,8],label="stat_20") # '''
plt.figure()
ax11=plt.subplot(331)
ax12=plt.subplot(332)
ax13=plt.subplot(333)
ax21=plt.subplot(334)
ax22=plt.subplot(335)
ax23=plt.subplot(336)
ax31=plt.subplot(337)
ax32=plt.subplot(338)
ax33=plt.subplot(339)
ax11.plot(test_X[:,0],label="os_0") #파
ax11.plot(reconstruction[:,0]) #주
ax11.plot(error[:,0], ls=":") #초 점선
ax12.plot(test_X[:,1],label="os_1")
ax12.plot(reconstruction[:,1])
ax12.plot(error[:,1], ls=":")
ax13.plot(test_X[:,2],label="os_2")
ax13.plot(reconstruction[:,2])
ax13.plot(error[:,2], ls=":")
ax21.plot(test_X[:,3],label="os_9")
ax21.plot(reconstruction[:,3])
ax21.plot(error[:,3], ls=":")
ax22.plot(test_X[:,4],label="stat_6")
ax22.plot(reconstruction[:,4])
ax22.plot(error[:,4], ls=":")
ax23.plot(test_X[:,5],label="stat_7")
ax23.plot(reconstruction[:,5])
ax23.plot(error[:,5], ls=":")
ax31.plot(test_X[:,6],label="stat_8")
ax31.plot(reconstruction[:,6])
ax31.plot(error[:,6], ls=":")
ax32.plot(test_X[:,7],label="stat_19")
ax32.plot(reconstruction[:,7])
ax32.plot(error[:,7], ls=":")
ax33.plot(test_X[:,8],label="stat_20")
ax33.plot(reconstruction[:,8])
ax33.plot(error[:,8], ls=":")
plt.xlabel("Time Index")
plt.savefig('STS_Kmeans_1.png')
credentials = GoogleCredentials.get_application_default()
service = discovery.build('storage', 'v1', credentials=credentials)
filename = 'STS_Kmeans_1.png'
bucket = 'adam-models/mga_apm_ora/STS_Kmeans'
body = {'name': 'graphs/STS_Kmeans_1.png'}
req = service.objects().insert(bucket=bucket, body=body, media_body=filename)
resp = req.execute()
plt.show()
if __name__ == '__main__':
# ---------------------------------------------
# command parsing from Google ML Engine Example
# ---------------------------------------------
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--train-files',
help='GCS or local paths to training data',
nargs='+',
required=True
)
parser.add_argument(
'--num-epochs',
help="""\
Maximum number of training data epochs on which to train.
If both --max-steps and --num-epochs are specified,
the training job will run for --max-steps or --num-epochs,
whichever occurs first. If unspecified will run for --max-steps.\
""",
type=int,
)
parser.add_argument(
'--train-batch-size',
help='Batch size for training steps',
type=int,
default=40
)
parser.add_argument(
'--eval-batch-size',
help='Batch size for evaluation steps',
type=int,
default=40
)
# -------------------------------
# If evaluation file is prepared,
# change 'required' value
# -------------------------------
parser.add_argument(
'--eval-files',
help='GCS or local paths to evaluation data',
nargs='+',
required=False
)
# Training arguments
parser.add_argument(
'--embedding-size',
help='Number of embedding dimensions for categorical columns',
default=8,
type=int
)
parser.add_argument(
'--first-layer-size',
help='Number of nodes in the first layer of the DNN',
default=100,
type=int
)
parser.add_argument(
'--num-layers',
help='Number of layers in the DNN',
default=4,
type=int
)
parser.add_argument(
'--scale-factor',
help='How quickly should the size of the layers in the DNN decay',
default=0.7,
type=float
)
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
# Argument to turn on all logging
parser.add_argument(
'--verbosity',
choices=[
'DEBUG',
'ERROR',
'FATAL',
'INFO',
'WARN'
],
default='INFO',
)
# Experiment arguments
parser.add_argument(
'--train-steps',
help="""\
Steps to run the training job for. If --num-epochs is not specified,
this must be. Otherwise the training job will run indefinitely.\
""",
type=int
)
parser.add_argument(
'--eval-steps',
help='Number of steps to run evalution for at each checkpoint',
default=100,
type=int
)
parser.add_argument(
'--export-format',
help='The input format of the exported SavedModel binary',
choices=['JSON', 'CSV', 'EXAMPLE'],
default='JSON'
)
args = parser.parse_args()
# Set python level verbosity
tf.logging.set_verbosity(args.verbosity)
# Set C++ Graph Execution level verbosity
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(
tf.logging.__dict__[args.verbosity] / 10)
# Run the training job
hparams=hparam.HParams(**args.__dict__)
run_experiment(hparams) |
from lib.list import List
def partition_list(items, node):
lower = List()
upper = List()
current = items.head
while current:
if current.data < node.data:
lower.add(current.data)
elif current.data > node.data:
upper.add(current.data)
current = current.next
lower.tail.next = node
node.next = upper.head
lower.tail = upper.tail
return lower
items = List()
items.add(10)
items.add(4)
items.add(-2)
items.add(3)
items.add(95)
assert str(partition_list(items, items.find(3))) == '-2,3,10,4,95'
|
from __future__ import absolute_import
# encoding: UTF-8
import six
from json import dumps
from datetime import datetime
from time import mktime
from jinja2 import nodes
from jinja2.ext import Extension
from jinja2.filters import do_mark_safe
from jinja2.utils import contextfunction
from tml import full_version
from tml.config import CONFIG
from tml.logger import get_logger
from tml.session_vars import get_current_context
__author__ = 'xepa4ep'
def ts():
return int(mktime(datetime.utcnow().timetuple()))
def _make_new_callable(fn):
def inner(*args, **kwargs):
return fn(*args, **kwargs)
return inner
def dummy_tr(label, data=None, description=None, options=None):
logger = get_logger()
logger.warning("You forget to install `tr(label, data=None, description=None, options=None)` helper for the following arguments: `label=%s`, `description=%s`", label, description)
return None, label, None
SYSTEM_TEMPLATES = {
'inline': """
{% if data.caller == "middleware" or not data.force_injection %}
<script>
(function() {
var script = window.document.createElement('script');
script.setAttribute('id', 'tml-agent');
script.setAttribute('type', 'application/javascript');
script.setAttribute('src', '{{ data.agent_host }}');
script.setAttribute('charset', 'UTF-8');
script.onload = function() {
Trex.init('{{ data.application_key }}', {{ data.agent_config|safe }});
};
window.document.getElementsByTagName('head')[0].appendChild(script);
})();
</script>
{% endif %}
""",
'language_selector': '<div data-tml-language-selector="{{ type }}" {{ opts }}></div>'
}
class TMLExtension(Extension):
# a set of names that trigger the extension.
tags = set(['trs', 'tr', 'tropts', 'tml_inline', 'tml_language_selector'])
def __init__(self, environment):
super(TMLExtension, self).__init__(environment)
self.environment.filters.update(trs=self.trs_filter)
self.environment.globals.update(
tr=dummy_tr,
translate_trs=self._translate_trs,
translate_tr=self._translate_tr
)
environment.extend(
install_tr_callables=self._install_callables
)
def _install_callables(self, **callables):
new_callables = {fn_key : _make_new_callable(fn) for fn_key, fn in six.iteritems(callables)}
self.environment.globals.update(new_callables)
def _fetch_tr(self):
return self.environment.globals['tr']
def parse(self, parser):
"""
{% trs "Hello" %}
{% tropts with source="" target_locale="" %}
{% trs user.name, description="Hello" source="" target_locale="" %}
...
{% endtropts %}
"""
token = next(parser.stream)
lineno = token.lineno
fn_name = 'parse_{}'.format(token.value)
if not hasattr(self, fn_name) or not callable(getattr(self, fn_name)):
raise Exception("`%s` method does not exist. Add it to your extension" % fn_name)
fn = getattr(self, fn_name)
output = fn(parser=parser, lineno=lineno)
return output
def parse_tr(self, parser, lineno):
node = nodes.Scope(lineno=lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target(name_only=True)
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Keyword(target.name, expr, lineno=lineno))
body = parser.parse_statements(('name:endtr',),
drop_needle=True)
return nodes.CallBlock(nodes.Call(
nodes.Name('translate_tr', 'load'), [], assignments, None, None),
[], [], body).set_lineno(lineno)
def parse_trs(self, parser, lineno):
lineno = lineno
args = [parser.parse_expression()]
variables = {}
while parser.stream.current.type != 'block_end':
parser.stream.expect('comma')
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
kwargs =[]
if 'description' in variables:
kwargs = [
nodes.Keyword('description', variables.get('description', ''))]
return nodes.Output([nodes.Call(nodes.Name('translate_trs', 'load'), args, kwargs, None, None)]).set_lineno(lineno)
def parse_tropts(self, parser, lineno):
"""
{% tropts source="index" %}
{% tr %} {% endtr %}
{% tr %} {% endtr %}
{% tr %} {% endtr %}
{% endtropts %}
"""
node = nodes.Scope(lineno=lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
key = parser.parse_assign_target() # a=b (a is key)
parser.stream.expect('assign')
value = parser.parse_expression() # a=b (b is expression)
assignments.append(nodes.Keyword(key.name, value))
node.body = parser.parse_statements(('name:endtropts',), drop_needle=True)
for item in node.body:
if isinstance(item, (nodes.Call, nodes.CallBlock)) and item.call.node.name in ('_translate_trs', '_translate_tr'):
used_keys = set(arg.key for arg in item.call.args)
allowed_assignments = [assign for assign in assignments
if not assign.key in used_keys]
item.call.args += allowed_assignments
return node
def parse_tml_inline(self, parser, lineno):
caller="";
while parser.stream.current.type != 'block_end':
caller = parser.parse_expression().value
context = get_current_context()
agent_config = dict((k, v) for k, v in six.iteritems(CONFIG.get('agent', {})))
agent_host = agent_config.get('host', CONFIG.agent_host())
if agent_config.get('cache', None):
t = ts()
t -= (t % agent_config['cache'])
agent_host += "?ts=%s" % t
agent_config['locale'] = context.locale
agent_config['source'] = context.source
agent_config['css'] = context.application.css
agent_config['sdk'] = full_version()
languages = agent_config.setdefault('languages', [])
for language in context.application.languages:
languages.append({
'locale': language.locale,
'native_name': language.native_name,
'english_name': language.english_name,
'flag_url': language.flag_url})
data = {
'agent_config': dumps(agent_config),
'agent_host': agent_host,
'application_key': context.application.key,
'caller': caller,
'force_injection': agent_config.get('force_injection', False)
}
output = self.environment.from_string(SYSTEM_TEMPLATES['inline']).render(data=data)
return nodes.Output([nodes.Const(do_mark_safe(output))]).set_lineno(lineno)
def parse_tml_language_selector(self, parser, lineno):
args = parser.parse_expression()
variables = {}
while parser.stream.current.type != 'block_end':
parser.stream.expect('comma')
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
data = {
'type': args.value
}
if 'opts' in variables:
data['opts'] = variables.get('opts', '').value
else:
data['opts'] = ""
output = self.environment.from_string(SYSTEM_TEMPLATES['language_selector']).render(type=data['type'], opts=data['opts'])
return nodes.Output([nodes.Const(do_mark_safe(output))]).set_lineno(lineno)
def _translate_trs(self, value, description=None, **kwargs):
opts = self._filter_options(kwargs)
tr = self._fetch_tr()
_, value, _err = tr(value, description=description, options=opts)
return do_mark_safe(value)
def _translate_tr(self, **kwargs):
body = kwargs.pop('caller',)()
description = kwargs.pop('description', '')
options = kwargs.pop('options', {})
options.update(self._filter_options(kwargs))
tr = self._fetch_tr()
_, value, _err = tr(body, data=kwargs, description=description, options=options)
return do_mark_safe(value)
def _filter_options(self, options):
return {k: options[k] for k in
CONFIG['supported_tr_opts'] if k in options}
def trs_filter(self, value, *args):
argc = len(args)
data, description, options = {}, '', {}
if argc > 0:
description = args[0]
if argc > 1:
options = args[1]
# if argc > 2:
# data = args[2]
try:
tr = self._fetch_tr()
_, trans_value, _ = tr(
value, data=data, description=description, options=options)
return trans_value
except Exception as e:
get_logger().exception(e)
return value |
from collections import namedtuple
import numpy as np
from ._base import Base
from ._common import ifunc
from ._cps import swegn96
from ._exception import DispersionError
__all__ = [
"RayleighEllipticity",
"Ellipticity",
]
RayleighEllipticity = namedtuple(
"RayleighEllipticity", ("period", "ellipticity", "mode")
)
class Ellipticity(Base):
def __init__(
self, thickness, velocity_p, velocity_s, density, algorithm="dunkin", dc=0.005,
):
"""
Ellipticity class (only Rayleigh-wave).
Parameters
----------
thickness : array_like
Layer thickness (in km).
velocity_p : array_like
Layer P-wave velocity (in km/s).
velocity_s : array_like
Layer S-wave velocity (in km/s).
density : array_like
Layer density (in g/cm3).
algorithm : str {'dunkin', 'fast-delta'}, optional, default 'dunkin'
Algorithm to use for computation of Rayleigh-wave dispersion:
- 'dunkin': Dunkin's matrix (adapted from surf96),
- 'fast-delta': fast delta matrix (after Buchen and Ben-Hador, 1996).
dc : scalar, optional, default 0.005
Phase velocity increment for root finding.
"""
super().__init__(thickness, velocity_p, velocity_s, density, algorithm, dc)
def __call__(self, t, mode=0):
"""
Compute Rayleigh-wave ellipticity for input period axis and mode.
Parameters
----------
t : array_like
Periods (in s).
mode : int, optional, default 0
Mode number (0 if fundamental).
Returns
-------
:class:`disba.RayleighEllipticity`
Rayleigh-wave ellipticity as a namedtuple (period, ellipticity, mode).
"""
ell = []
for i, tt in enumerate(t):
try:
eig = swegn96(
tt,
self._thickness,
self._velocity_p,
self._velocity_s,
self._density,
mode,
ifunc[self._algorithm]["rayleigh"],
self._dc,
)[:, :2]
ell.append(eig[0, 0] / eig[0, 1])
except DispersionError:
i -= 1
break
return RayleighEllipticity(t[: i + 1], np.array(ell), mode)
|
"""Handles reports over scheduler data.
"""
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import print_function
import bz2
import datetime
import fnmatch
import io
import itertools
import logging
import time
import numpy as np
import pandas as pd
import six
from treadmill import scheduler
_LOGGER = logging.getLogger(__name__)
def servers(cell):
"""Prepare DataFrame with server information."""
# Hard-code order of columns
columns = [
'name', 'location', 'partition', 'traits',
'state', 'valid_until',
'mem', 'cpu', 'disk',
'mem_free', 'cpu_free', 'disk_free'
]
def _server_location(node):
"""Recursively yield the node's parents."""
while node:
yield node.name
node = node.parent
def _server_row(server):
"""Transform server into a DataFrame-ready dict."""
partition = list(server.labels)[0]
row = {
'name': server.name,
'location': '/'.join(reversed(list(
_server_location(server.parent)
))),
'partition': partition if partition else '-',
'traits': server.traits.traits,
'state': server.state.value,
'valid_until': server.valid_until,
'mem': server.init_capacity[0],
'cpu': server.init_capacity[1],
'disk': server.init_capacity[2],
'mem_free': server.free_capacity[0],
'cpu_free': server.free_capacity[1],
'disk_free': server.free_capacity[2]
}
return row
rows = [_server_row(server) for server in cell.members().values()]
frame = pd.DataFrame.from_dict(rows).astype({
'mem': 'int',
'cpu': 'int',
'disk': 'int',
'mem_free': 'int',
'cpu_free': 'int',
'disk_free': 'int'
})
if frame.empty:
frame = pd.DataFrame(columns=columns)
return frame[columns].sort_values(
by=['partition', 'name']).reset_index(drop=True)
def iterate_allocations(path, alloc):
"""Generate (path, alloc) tuples for the leaves of the allocation tree."""
if not alloc.sub_allocations:
return iter([('/'.join(path), alloc)])
else:
def _chain(acc, item):
"""Chains allocation iterators."""
name, suballoc = item
return itertools.chain(
acc,
iterate_allocations(path + [name], suballoc)
)
return six.moves.reduce(
_chain,
six.iteritems(alloc.sub_allocations),
[]
)
def allocations(cell):
"""Prepare DataFrame with allocation information."""
# Hard-code order of columns
columns = [
'partition', 'name', 'mem', 'cpu', 'disk',
'rank', 'rank_adj', 'traits', 'max_util'
]
def _alloc_row(partition, name, alloc):
"""Transform allocation into a DataFrame-ready dict."""
if not name:
name = 'root'
if not partition:
partition = '-'
return {
'partition': partition,
'name': name,
'mem': alloc.reserved[0],
'cpu': alloc.reserved[1],
'disk': alloc.reserved[2],
'rank': alloc.rank,
'rank_adj': alloc.rank_adjustment,
'traits': alloc.traits,
'max_util': alloc.max_utilization,
}
frame = pd.DataFrame.from_dict(
[
_alloc_row(label, name, alloc)
for label, partition in six.iteritems(cell.partitions)
for name, alloc in iterate_allocations(
[], partition.allocation
)
]
)
if frame.empty:
frame = pd.DataFrame(columns=columns)
return frame[columns].astype({
'mem': 'int',
'cpu': 'int',
'disk': 'int'
}).sort_values(by=['partition', 'name']).reset_index(drop=True)
def apps(cell):
"""Prepare DataFrame with app and queue information."""
# Hard-code order of columns
columns = [
'instance', 'allocation', 'rank', 'affinity', 'partition',
'identity_group', 'identity',
'order', 'lease', 'expires', 'data_retention',
'pending', 'server', 'util0', 'util1',
'mem', 'cpu', 'disk'
]
def _app_row(item):
"""Transform app into a DataFrame-ready dict."""
rank, util0, util1, pending, order, app = item
return {
'instance': app.name,
'affinity': app.affinity.name,
'allocation': app.allocation.name,
'rank': rank,
'partition': app.allocation.label or '-',
'util0': util0,
'util1': util1,
'pending': pending,
'order': order,
'identity_group': app.identity_group,
'identity': app.identity,
'mem': app.demand[0],
'cpu': app.demand[1],
'disk': app.demand[2],
'lease': app.lease,
'expires': app.placement_expiry,
'data_retention': app.data_retention_timeout,
'server': app.server
}
queue = []
for partition in cell.partitions.values():
allocation = partition.allocation
queue += allocation.utilization_queue(cell.size(allocation.label))
frame = pd.DataFrame.from_dict([_app_row(item) for item in queue]).fillna({
'expires': -1,
'identity': -1,
'data_retention': -1
})
if frame.empty:
frame = pd.DataFrame(columns=columns)
return frame[columns].astype({
'mem': 'int',
'cpu': 'int',
'disk': 'int',
'order': 'int',
'expires': 'int',
'data_retention': 'int',
'identity': 'int'
}).sort_values(by=['partition',
'rank',
'util0',
'util1',
'pending',
'order']).reset_index(drop=True)
def utilization(prev_utilization, apps_df):
"""Returns dataseries describing cell utilization.
prev_utilization - utilization dataframe before current.
apps - app queue dataframe.
"""
# Passed by ref.
row = apps_df.reset_index()
if row.empty:
return row
row['count'] = 1
row['name'] = row['instance'].apply(lambda x: x.split('#')[0])
row = row.groupby('name').agg({'cpu': np.sum,
'mem': np.sum,
'disk': np.sum,
'count': np.sum,
'util0': np.max,
'util1': np.max})
row = row.stack()
dt_now = datetime.datetime.fromtimestamp(time.time())
current = pd.DataFrame([row], index=pd.DatetimeIndex([dt_now]))
if prev_utilization is None:
return current
else:
return prev_utilization.append(current)
def reboots(cell):
"""Prepare dataframe with server reboot info."""
# Hard-code order of columns
columns = [
'server', 'valid-until', 'days-left',
]
def _reboot_row(server, now):
valid_until = datetime.datetime.fromtimestamp(server.valid_until)
return {
'server': server.name,
'valid-until': valid_until,
'days-left': (valid_until - now).days,
}
now = datetime.datetime.now()
frame = pd.DataFrame.from_dict([
_reboot_row(server, now)
for server in cell.members().values()
])
return frame[columns]
class ExplainVisitor(object):
"""Scheduler visitor"""
def __init__(self):
"""Initialize result"""
self.result = []
def add(self, alloc, entry, acc_demand):
"""Add new row to result"""
rank, util_before, util_after, _pending, _order, app = entry
alloc_name = ':'.join(alloc.path)
self.result.append({
'alloc': alloc_name,
'rank': rank,
'util0': util_before,
'util1': util_after,
'memory': int(acc_demand[0]),
'cpu': int(acc_demand[1]),
'disk': int(acc_demand[2]),
'name': app.name,
})
def finish(self):
"""Post-process result array"""
def _sort_order(entry):
return (entry['alloc'],
entry['util0'],
entry['util1'])
result = sorted(self.result, key=_sort_order)
# annotate with position in alloc queue
pos = 1
alloc = ''
for row in result:
if row['alloc'] != alloc:
alloc = row['alloc']
pos = 1
row['pos'] = pos
pos = pos + 1
self.result = result
def filter(self, pattern):
"""Filter result to rows with matching app instances"""
self.result = [row for row in self.result
if fnmatch.fnmatch(row['name'], pattern)]
def explain_queue(cell, partition, pattern=None):
"""Compute dataframe for explaining app queue"""
alloc = cell.partitions[partition].allocation
size = cell.size(partition)
visitor = ExplainVisitor()
queue = alloc.utilization_queue(size, visitor.add)
# we run the generator to completion, and this builds up the
# visitor as a side-effect
for _ in queue:
pass
visitor.finish()
if pattern:
visitor.filter(pattern)
return pd.DataFrame(visitor.result)
def _preorder_walk(node, _app=None):
"""Walk the tree in preorder"""
return itertools.chain(
[node],
*[_preorder_walk(child) for child in node.children]
)
def _servers_walk(cell, _app):
"""Return servers only
"""
return list(six.itervalues(cell.members()))
def _limited_walk(node, app):
"""Walk the tree like preorder, expand nodes iff placement is feasible."""
if node.check_app_constraints(app):
return itertools.chain(
[node],
*[_limited_walk(child, app) for child in node.children]
)
else:
return [node]
WALKS = {
'servers': _servers_walk,
'full': _preorder_walk,
'default': _limited_walk,
}
def explain_placement(cell, app, mode):
"""Explain placement for app"""
result = []
for node in WALKS[mode](cell, app):
is_server = False
if isinstance(node, scheduler.Server):
is_server = True
lifetime = node.check_app_lifetime(app)
else:
lifetime = True
capacity = node.free_capacity > app.demand
result.append({
'name': node.name,
'server': is_server,
'affinity': node.check_app_affinity_limit(app),
'traits': node.traits.has(app.traits),
'partition': app.allocation.label in node.labels,
'feasible': node.check_app_constraints(app),
'state': node.state == scheduler.State.up,
'lifetime': lifetime,
'memory': capacity[0],
'cpu': capacity[1],
'disk': capacity[2],
})
# Hard-code order of columns
columns = [
'partition', 'traits', 'affinity', 'state', 'lifetime',
'memory', 'cpu', 'disk', 'name', 'server'
]
return pd.DataFrame(result, columns=columns)
def serialize_dataframe(report, compressed=True):
"""Serialize a dataframe for storing.
The dataframe is serialized as CSV and compressed with bzip2.
"""
result = report.to_csv(index=False)
if compressed:
result = bz2.compress(result.encode())
return result
def deserialize_dataframe(report):
"""Deserialize a dataframe.
The dataframe is serialized as CSV and compressed with bzip2.
"""
try:
content = bz2.decompress(report)
except IOError:
content = report
return pd.read_csv(io.StringIO(content.decode()))
|
tabby_cat = "\tI'm tabbed in."
persian_cat = "I'm split\non a line."
backslash_cat = "I'm \\ a \\ cat."
fat_cat = '''
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
'''
print tabby_cat
print persian_cat
print backslash_cat
print fat_cat
# while True:
# for i in ["/","-", "|","\\","|"]:
# print "%s\r" % i,
escape = '\nAh! \nhow \nmany \nnew \nlines \nin \nthis \nlist: {}'
print escape.format('\n\tLoads!\n\tAnd some more') |
from django.db import models
from django.urls import reverse
from tinymce.models import HTMLField
from django.contrib.contenttypes.fields import GenericRelation
from administrator.models import MenuItem
class Category(models.Model):
title = models.CharField(max_length=255, verbose_name='Название')
description = HTMLField(blank=True, null=True, verbose_name='Описание')
image = models.ImageField(upload_to='catalog/category', blank=True, null=True, verbose_name='Изображение')
is_promo = models.BooleanField(default=False, verbose_name='Это акция?')
is_home = models.BooleanField(default=False, verbose_name='Показывать на главной')
is_active = models.BooleanField(default=True, verbose_name='Активная')
slug = models.SlugField(blank=True, null=True, unique=True, verbose_name='Ссылка')
menu_items = GenericRelation(MenuItem)
def get_absolute_url(self):
return reverse('catalog:category', args=[str(self.slug)])
def __str__(self):
return self.title
class Meta:
ordering = ['pk']
verbose_name = 'Раздел'
verbose_name_plural = 'Разделы'
def product_image_path(instance, filename):
return f'catalog/products/{instance.category.slug}/{filename}'
class Product(models.Model):
title = models.CharField(max_length=255, verbose_name='Название')
category = models.ForeignKey(Category, related_name='products', on_delete=models.CASCADE, verbose_name='Раздел')
short_desc = models.TextField(blank=True, null=True, verbose_name='Короткое описание')
full_desc = HTMLField(blank=True, null=True, verbose_name='Полное описание')
image = models.ImageField(upload_to=product_image_path, blank=True, null=True, verbose_name='Изображение')
price = models.DecimalField(max_digits=15, decimal_places=2, default=0, verbose_name='Цена')
label = models.CharField(max_length=255, blank=True, null=True, verbose_name='Ярлык товара')
is_active = models.BooleanField(default=True, verbose_name='Активный')
position = models.PositiveIntegerField(default=0, blank=False, null=False, verbose_name='Порядок')
def __str__(self):
return self.title
class Meta:
ordering = ['position']
verbose_name = 'Товар'
verbose_name_plural = 'Товары'
|
from string import ascii_lowercase
from performance import (contains, contains_fast,
ordered_list_max, ordered_list_max_fast,
list_concat, list_concat_fast,
list_inserts, list_inserts_fast,
list_creation, list_creation_fast)
alist = list(range(1000000))
aset = set(alist)
listofstrings = list(ascii_lowercase) * 1000
def test_contains():
t1, res1 = contains(alist, 500)
t2, res2 = contains_fast(aset, 1000)
assert res1 == res2
assert t1 > t2
def test_ordered_max():
t1, res1 = ordered_list_max(alist)
t2, res2 = ordered_list_max_fast(alist)
assert res1 == res2
assert t1 > t2
def test_concat():
t1, res1 = list_concat(listofstrings)
t2, res2 = list_concat_fast(listofstrings)
assert res1 == res2
assert t1 > t2
def test_list_insert():
t1, res1 = list_inserts(10000)
t2, res2 = list_inserts_fast(10000)
assert list(res1) == list(res2)
assert t1 > t2
def test_list_creation():
t1, res1 = list_creation(10000)
t2, res2 = list_creation_fast(10000)
assert list(res1) == list(res2)
assert t1 > t2 |
# -*- coding: utf-8 -*-
'''
Crea un usuario dentro de la base de datos.
PYTHONPATH="../../../python" python3 createUser.py dni name lastname
sin email ni nada adicional.
'''
from model.registry import Registry
from model.connection import connection
from model.users import users
import systems
import logging
def generatePassword(con, uid):
''' genera una clave para el usuario si es que no tiene '''
passw = users.UserPasswordDAO.findByUserId(con, uid)
if len(passw) > 0:
#logging.debug('{},ya tiene clave,'.format(uid))
return 0
u = users.UserDAO.findById(con, uid)
up = users.UserPassword()
up.userId = uid
up.username = u.dni
up.password = '{}-autogenerado'.format(u.dni)
users.UserPasswordDAO.persist(con, up)
logging.debug('{} {} {}'.format(up.userId, up.username, up.password))
return 1
def createUser(con, dni, name, lastname, password=None):
u = users.UserDAO.findByDni(con, dni)
if u is not None:
logging.warn('Persona ya existente')
logging.warn(u)
return
user = users.User()
user.name = name
user.lastname = lastname
user.dni = dni
uid = users.UserDAO.persist(con, user)
up = users.UserPassword()
up.userId = uid
up.username = dni
if password is None:
up.password = '{}-autogenerado'.format(dni)
else:
up.password = password
users.UserPasswordDAO.persist(con, up)
return uid
def showUserInfo(con, dni):
import pprint
u = users.UserDAO.findByDni(con, dni)
if u is None:
logging.info('No existe ese usuario dentro de la base de datos')
return
(uid, version) = u
us = users.UserDAO.findById(con, [uid])
u = us[0]
logging.info(pprint.pformat(u.__dict__))
ups = users.UserPassword.findByUserId(con, u.id)
for up in ups:
logging.info(pprint.pformat(up.__dict__))
emails = users.Mail.findByUserId(con, u.id)
for e in emails:
logging.info(pprint.pformat(e.__dict__))
if __name__ == '__main__':
import sys
dni = sys.argv[1]
name = sys.argv[2]
lastname = sys.argv[3]
assert dni is not None
assert name is not None
assert lastname is not None
import inject
#inject.configure()
reg = inject.instance(Registry)
conn = connection.Connection(reg.getRegistry('dcsys'))
con = conn.get()
try:
createUser(con, dni, name, lastname)
con.commit()
finally:
conn.put(con)
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2021/4/8 19:07
# @Author :'liuyu'
# @Version:V 0.1
# @File :
# @desc :
def clean(keys):
res = []
for key in keys.split('-'):
ga = key.split('%')
if len(ga) > 2: continue
if len(ga) == 2:
if (not ga[0].isdigit()) or (len(ga[1]) > 0):
continue
res.append(key)
return res |
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from meeting import views
from django.conf.urls import include
urlpatterns = [
path('users/', views.UsersList.as_view()),
path('users/<int:pk>/', views.UsersDetail.as_view()),
path('meetings/', views.MeetingsList.as_view()),
path('meetings/<int:pk>/', views.MeetingsDetail.as_view()),
path('meetingRooms/', views.MeetingRoomsList.as_view()),
path('meetingRooms/<int:pk>/', views.MeetingRoomsDetail.as_view()),
]
urlpatterns += [
path('api-auth/', include('rest_framework.urls')),
]
urlpatterns = format_suffix_patterns(urlpatterns) |
#!/usr/bin/env python3
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
from idlnode import *
def render(idl_node, indent_str=' '):
output = []
indent_stack = []
def begin_indent():
indent_stack.append(indent_str)
def end_indent():
indent_stack.pop()
def sort(nodes):
return sorted(nodes, key=lambda node: node.id)
def wln(node=None):
"""Writes the given node and adds a new line."""
w(node)
output.append('\n')
def wsp(node):
"""Writes the given node and adds a space if there was output."""
mark = len(output)
w(node)
if mark != len(output):
w(' ')
def w(node, list_separator=None):
"""Writes the given node.
Args:
node -- a string, IDLNode instance or a list of such.
list_separator -- if provided, and node is a list,
list_separator will be written between the list items.
"""
if node is None:
return
elif isinstance(node, str):
if output and output[-1].endswith('\n'):
# Auto-indent.
output.extend(indent_stack)
output.append(node)
elif isinstance(node, list):
for i in range(0, len(node)):
if i > 0:
w(list_separator)
w(node[i])
elif isinstance(node, IDLFile):
w(node.interfaces)
w(node.enums)
w(node.typeDefs)
elif isinstance(node, IDLModule):
wsp(node.annotations)
wsp(node.ext_attrs)
wln('module %s {' % node.id)
begin_indent()
w(node.interfaces)
w(node.enums)
w(node.typeDefs)
end_indent()
wln('};')
elif isinstance(node, IDLEnum):
w('enum %s {}' % node.id)
# TODO(antonm): emit values as well.
elif isinstance(node, IDLInterface):
if node.annotations:
wln(node.annotations)
if node.ext_attrs:
wln(node.ext_attrs)
w('interface %s' % node.id)
begin_indent()
begin_indent()
if node.parents:
wln(' :')
w(node.parents, ',\n')
wln(' {')
end_indent()
if node.constants:
wln()
wln('/* Constants */')
w(sort(node.constants))
if node.attributes:
wln()
wln('/* Attributes */')
w(sort(node.attributes))
if node.operations:
wln()
wln('/* Operations */')
w(sort(node.operations))
end_indent()
wln('};')
elif isinstance(node, IDLParentInterface):
wsp(node.annotations)
w(node.type.id)
elif isinstance(node, IDLAnnotations):
sep = ''
for (name, annotation) in sorted(node.items()):
w(sep)
sep = ' '
if annotation and len(annotation):
subRes = []
for (argName, argValue) in sorted(annotation.items()):
if argValue is None:
subRes.append(argName)
else:
subRes.append('%s=%s' % (argName, argValue))
w('@%s(%s)' % (name, ', '.join(subRes)))
else:
w('@%s' % name)
elif isinstance(node, IDLExtAttrs):
if len(node):
w('[')
i = 0
for k in sorted(node):
if i > 0:
w(', ')
w(k)
v = node[k]
if v is not None:
if isinstance(v, IDLExtAttrFunctionValue):
if v.id:
w('=')
w(v)
elif isinstance(v, list):
assert k == 'Constructor'
w(v[0])
for c in v[1:]:
w(', ')
w(k)
w(c)
else:
w('=%s' % v.__str__())
i += 1
w(']')
elif isinstance(node, IDLExtAttrFunctionValue):
if node.id:
w(node.id)
w('(')
w(node.arguments, ', ')
w(')')
elif isinstance(node, IDLAttribute):
wsp(node.annotations)
wsp(node.ext_attrs)
if node.is_read_only:
w('readonly ')
w('attribute ')
w(node.type.id)
if (node.type.nullable):
w('?')
w(' ')
w(node.id)
wln(';')
elif isinstance(node, IDLConstant):
wsp(node.annotations)
wsp(node.ext_attrs)
wln('const %s %s = %s;' % (node.type.id, node.id, node.value))
elif isinstance(node, IDLOperation):
wsp(node.annotations)
wsp(node.ext_attrs)
if node.is_static:
w('static ')
if node.specials:
w(node.specials, ' ')
w(' ')
w(node.type.id)
if (node.type.nullable):
w('?')
w(' ')
w(node.id)
w('(')
w(node.arguments, ', ')
w(')')
wln(';')
elif isinstance(node, IDLArgument):
wsp(node.ext_attrs)
if (node.optional):
w('optional ')
w(node.type.id)
if node.type.nullable:
w('?')
w(' %s' % node.id)
else:
raise TypeError("Expected str or IDLNode but %s found" % type(node))
w(idl_node)
return ''.join(output)
|
def foursome(numbers):
sum = 0
for i in range(len(numbers)):
sum += numbers[i]
return sum
def main():
numbers = [5, 10, 15, 20, 25]
print(foursome(numbers))
if __name__ == "__main__":
main()
|
from Claket import db
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
class TabelaPlano(db.Model):
__tablename__ = 'Plano'
id = db.Column(db.INT(), primary_key=True, nullable=False)
preco= db.Column(db.FLOAT(), nullable=False)
nome= db.Column(db.VARCHAR(45), nullable=False)
img= db.Column(db.VARCHAR(45), nullable=False)
qtd_total_roteiros= db.Column(db.INT(), nullable=False)
def __init__(self, id, preco, nome, img, qtd_total_roteiros):
self.id= id
self.preco=preco
self.nome=nome
self.img=img
self.qtd_total_roteiros=qtd_total_roteiros
class TabelaUsuario(db.Model):
__tablename__ = 'Usuario'
cpf = db.Column(db.VARCHAR(45), primary_key=True, nullable=False)
email = db.Column(db.VARCHAR(45), nullable=False)
senha = db.Column(db.VARCHAR(45), nullable=False)
nome = db.Column(db.VARCHAR(45), nullable=False)
qtd_roteiros_avaliados = db.Column(db.INT())
data_aquisicao_plano = db.Column(db.DATE(), nullable = False)
id_plano = db.Column(db.INT(), ForeignKey('Plano.id'), nullable = False)
fk_id_plano = relationship(TabelaPlano, foreign_keys=[id_plano])
def __init__(self, cpf, email, senha, nome, qtd_roteiros_avaliados, qtd_total_roteiros, data_aquisicao_plano, id_plano):
self.cpf = cpf
self.email = email
self.senha = senha
self.nome = nome
self.qtd_roteiros_avaliados = qtd_roteiros_avaliados
self.data_aquisicao_plano = data_aquisicao_plano
self.id_plano = id_plano
class TabelaGenero(db.Model):
__tablename__ = 'Genero'
id = db.Column(db.INT(), primary_key = True)
genero = db.Column(db.VARCHAR(45), nullable = False)
def __init__(self, id, genero):
self.id = id
self.genero = genero
class TabelaPalavraChave(db.Model):
__tablename__ = 'PalavraChave'
id = db.Column(db.INT(), primary_key = True , nullable = False)
palavra = db.Column(db.VARCHAR(45), nullable = False)
def __init__(self, id, palavra):
self.id = id
self.palavra = palavra
class TabelaRoteiro(db.Model):
__tablename__ = 'Roteiro'
id = db.Column(db.INT(), primary_key = True)
titulo = db.Column(db.VARCHAR(45), nullable = False)
aceitacao = db.Column(db.VARCHAR(45), nullable = True)
id_usuario = db.Column(db.VARCHAR(45), ForeignKey('Usuario.cpf'), nullable = False)
data_avaliacao= db.Column(db.DATE(), nullable = False)
id_Usuario_Roteiro = relationship(TabelaUsuario, foreign_keys = [id_usuario])
def __init__(self, id, titulo, aceitacao, id_usuario, data_avaliacao):
self.id = id
self.titulo = titulo
self.aceitacao = aceitacao
self.id_usuario = id_usuario
self.data_avaliacao = data_avaliacao
class TabelaRoteiroPalavraChave(db.Model):
__tablename__ = 'RoteiroPalavraChave'
id_roteiro = db.Column(db.INT(), ForeignKey('Roteiro.id'), nullable=False, primary_key=True)
id_palavra_chave = db.Column(db.INT(), ForeignKey('PalavraChave.id'), nullable=False, primary_key=True)
id_Roteiro = relationship(TabelaRoteiro, foreign_keys = [id_roteiro])
id_Palavra_Chave = relationship(TabelaPalavraChave, foreign_keys = [id_palavra_chave])
def __init__(self, id_roteiro, id_palavra_chave):
self.id_roteiro = id_roteiro
self.id_palavra_chave = id_palavra_chave
class TabelaRoteiroGenero(db.Model):
__tablename__ = 'RoteiroGenero'
id_roteiro = db.Column(db.INT(), ForeignKey('Roteiro.id'), nullable = False, primary_key = True)
id_genero = db.Column(db.INT(), ForeignKey('Genero.id'), nullable = False, primary_key = True)
id_Roteiro = relationship(TabelaRoteiro, foreign_keys=[id_roteiro])
id_Genero = relationship(TabelaGenero, foreign_keys=[id_genero])
def __init__(self, id_roteiro, id_genero):
self.id_roteiro = id_roteiro
self.id_genero = id_genero
class TabelaSentimento(db.Model):
__tablename__ = 'Sentimento'
id = db.Column(db.INT(), primary_key = True, nullable = False)
sentimento = db.Column(db.VARCHAR(45), nullable = False)
def __init__(self, id, sentimento):
self.id = id
self.sentimento = sentimento
class TabelaSentimentoPalavraChave(db.Model):
__tablename__ = 'SentimentoPalavraChave'
id_sentimento = db.Column(db.INT(), ForeignKey('Sentimento.id'), primary_key = True, nullable = False)
id_palavra_chave = db.Column(db.INT(), ForeignKey('PalavraChave.id'), primary_key = True, nullable = False)
data = db.Column(db.DATE(), nullable = False)
qnt_de_tweets = db.Column(db.INT(), nullable= False)
id_Sentimento = relationship(TabelaSentimento, foreign_keys = [id_sentimento])
id_Palavrachave = relationship(TabelaPalavraChave, foreign_keys = [id_palavra_chave])
def __init__(self, id_sentimento, id_palavra_chave,data,qnt_de_tweets):
self.id_sentimento = id_sentimento
self.id_palavra_chave = id_palavra_chave
self.data = data
self.qnt_de_tweets = qnt_de_tweets
|
print("---Basic Lambda---")
answer = (lambda x:x * x)
print(answer(3))
# Here order doesn't matter as we can assign to variables while passing in any order
math_operation = (lambda a, b, c, d: (a + b) / (c - d))
print(math_operation(4, 3, 2, 1))
print(math_operation(d=1, a=4, c=2, b=3))
print("\n---Map Lambda---")
numerals = [1, 2, 3, 4, 5]
squared = list(map(lambda x: x ** 2, numerals))
cubed = list(map(lambda x: x ** 3, numerals))
print("Squares: ", squared)
print("Cubes: ", cubed)
print("\n---Filter---")
numerals_range = range(-10, 10)
negative = list(filter(lambda x: x < 0, numerals_range))
print(negative)
print("\n---Reduce---")
from _functools import reduce
numerals = [1, 2, 3, 4, 5]
result = reduce(lambda x, y : x * y, numerals)
print(result)
# Reduce is same is below
result = 1
for iteration in numerals:
result = result * iteration
print(result)
|
"""
测试mtcnn_freezed_model.pb是否可用
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import tensorflow as tf
import numpy as np
import sys
import os
import detect_face
import cv2
from tensorflow.python.framework.graph_util import convert_variables_to_constants
img_size=40
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1/std_adj)
return y
def loadImage(path):
img=misc.imread(path)
img=misc.imresize(img,(img_size,img_size),interp='bilinear')
return img
#return prewhiten(img)
if __name__=="__main__":
#load pb
with open("mtcnn_freezed_model.pb","rb") as f:
#(1)load model from protocl buffer
sess=tf.Session()
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def)
#(2)save tensorboard
tf.summary.FileWriter("./tboard",sess.graph)
#(3) PNET
minsize = 38 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0 # scale factor
p_t_input =tf.get_default_graph().get_tensor_by_name("import/pnet/input:0")
p_t_prob =tf.get_default_graph().get_tensor_by_name("import/pnet/prob1:0")
p_t_bbr =tf.get_default_graph().get_tensor_by_name("import/pnet/conv4-2/BiasAdd:0")
img=[misc.imread("timg.jpg",mode="RGB")]
p_prob,p_bbr=sess.run((p_t_prob,p_t_bbr),feed_dict={p_t_input:img}) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@author: hogelog
'''
from setuptools import setup
setup(name="kestrel-cli",
version="0.0.6",
description="kestrel command-line interface",
license="MIT",
author="hogelog",
author_email="hogelog@hogel.org",
url="http://github.com/hogelog/kestrel-cli",
install_requires=["pykestrel"],
package_dir={"kestrelcli": "kestrelcli"},
packages=["kestrelcli"],
entry_points={
"console_scripts": {
"kestrel-cli = kestrelcli.cli:main"
}
},
keywords=["kestrel cli commandline"],
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Topic :: Utilities"
])
|
from django import forms
from .models import Rate
from account.models import MyUser
class PostRateForm(forms.ModelForm):
class Meta:
model = Rate
fields = ('account',
'liner',
'pol',
'pod',
'buying20',
'selling20',
'buying40',
'selling40',
'buying4H',
'selling4H',
'loadingFT',
'dischargingFT',
'offeredDate',
'effectiveDate',
'remark',
'deleted',
)
class PostSearchForm(forms.ModelForm):
inputperson = forms.ModelChoiceField(queryset = MyUser.objects.all())
class Meta:
model = Rate
fields = ('account',
'liner',
'pol',
'pod',
)
|
from math import sqrt
x1=float(input("x1= "))
x2=float(input("x2= "))
x3=x2-x1
if x3<=(-0.1):
print(x3*(-1))
else:
print(x3) |
# coding: utf-8
# 1
"""
Реализовать контекстный менеджер такой же как open,
но с проверкой на наличие файла
т.е. если файла нет то конструкция не должна падать с ошибкой
with MyOpen('wrong_file_path') as fh:
fh.readlines()
"""
import os
class MyOpen(object):
def __init__(self, file_name, mode='r'):
self.file, self.mode = file_name, mode
def __enter__(self):
# create file if there is no one
if not os.path.isfile(self.file):
fh = open(self.file, 'w')
fh.close()
self.fh = open(self.file, self.mode)
return self.fh
def __exit__(self, type, value, traceback):
self.fh.close()
|
# JTSK-350112
# deadoralive.py
# Taiyr Begeyev
# t.begeyev@jacobs-university.de
import random
class Card(object):
""" A card object with a suit and rank."""
RANKS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)
SUITS = ('Spades', 'Diamonds', 'Hearts', 'Clubs')
def __init__(self, rank, suit):
"""Creates a card with the given rank and suit."""
self.rank = rank
self.suit = suit
def __str__(self):
"""Returns the string representation of a card."""
if self.rank == 1:
rank = 'Ace'
elif self.rank == 11:
rank = 'Jack'
elif self.rank == 12:
rank = 'Queen'
elif self.rank == 13:
rank = 'King'
else:
rank = self.rank
return str(rank) + ' of ' + self.suit
class Deck(object):
""" A deck containing 52 cards."""
def __init__(self):
"""Creates a full deck of cards."""
self._cards = []
for suit in Card.SUITS:
for rank in Card.RANKS:
c = Card(rank, suit)
self._cards.append(c)
def shuffle(self):
"""Shuffles the cards."""
random.shuffle(self._cards)
def deal(self):
"""Removes and returns the top card or None
if the deck is empty."""
if len(self) == 0:
return None
else:
return self._cards.pop(0)
def __len__(self):
"""Returns the number of cards left in the deck."""
return len(self._cards)
def __str__(self):
"""Returns the string representation of a deck."""
result = ''
for c in self._cards:
result = self.result + str(c) + '\n'
return result
class Player(object):
"""This class represents a player in
a dead or alive game."""
def __init__(self, cards):
self._cards = cards
def __str__(self):
"""Returns string rep of cards and points."""
result = ", ".join(map(str, self._cards))
result += "\n " + str(self.getPoints()) + " points"
return result
def hit(self, card):
self._cards.append(card)
def getCard(self):
return self._cards.pop()
def getPoints(self):
"""Returns the number of points in the hand."""
count = 0
for card in self._cards:
if card.rank > 9:
count += 10
elif card.rank == 1:
count += 11
else:
count += card.rank
# Deduct 10 if Ace is available and needed as 1
for card in self._cards:
if count <= 21:
break
elif card.rank == 1:
count -= 10
return count
def hasBlackjack(self):
"""Dealt 21 or not."""
return len(self._cards) == 2 and self.getPoints() == 21
class Dealer(Player):
"""Like a Player, but with some restrictions."""
def __init__(self, cards):
"""Initial state: show one card only."""
Player.__init__(self, cards)
self._showOneCard = True
def __str__(self):
"""Return just one card if not hit yet."""
if self._showOneCard:
return str(self._cards[0])
else:
return Player.__str__(self)
def hit(self, deck):
"""Add cards while points < 17,
then allow all to be shown."""
self._showOneCard = False
while self.getPoints() < 17:
self._cards.append(deck.deal())
class DeadOrAlive(object):
def __init__(self):
self._deck=Deck()
self._deck.shuffle()
self._player = Player([])
self._dealer = Player([])
def play(self):
cnt1 = 0
cnt2 = 0
for i in range(52):
choice = input("Do you want to play [y/n]?: ")
if choice == 'Y' or choice == 'y':
self._player.hit(self._deck.deal())
self._dealer.hit(self._deck.deal())
card1 = self._player.getCard()
card1a = card1.rank
card2 = self._dealer.getCard()
card2a = card2.rank
print("Player 1:\n",card1a)
print("Player 2:\n",card2a)
if card1a > card2a:
print("Player 1 won")
cnt1 += 1
elif card2a > card1a:
print("Player 2 won")
cnt2 += 1
else:
if card1.suit == "Spades":
print("Player 2 won with", card2.suit)
cnt2 += 1
elif card1.suit == "Diamonds" and (card2.suit=="Hearts"
or card2.suit=="Clubs"):
print("Player 2 won with", card2.suit)
cnt2 += 1
elif card1.suit == "Hearts" and card2.suit=="Clubs":
print("Player 2 won with", card2.suit)
cnt2 += 1
elif card1.suit == card2.suit:
print("There is a tie")
else:
print("Player 1 won with", card1.suit)
cnt1 += 1
if i == 25:
print("\nPlayer 1:", cnt1, "wins")
print("Player 2:", cnt2, "wins")
else:
print("Player 1: ", cnt1, "wins")
print("Player 2: ", cnt2 ,"wins")
break
class Blackjack(object):
def __init__(self):
self._deck = Deck()
self._deck.shuffle()
self._player = Player([self._deck.deal(), self._deck.deal()])
self._dealer = Dealer([self._deck.deal(), self._deck.deal()])
def play(self):
print("Player:\n", self._player)
print("Dealer:\n", self._dealer)
while True:
choice = input("Do you want a hit? [y/n]: ")
if choice in ("Y", "y"):
self._player.hit(self._deck.deal())
points = self._player.getPoints()
print("Player:\n", self._player)
if points >= 21:
break
else:
break
playerPoints = self._player.getPoints()
if playerPoints > 21:
print("You bust and lose")
else:
self._dealer.hit(self._deck)
print("Dealer:\n", self._dealer)
dealerPoints = self._dealer.getPoints()
if dealerPoints > 21:
print("Dealer busts and you win")
elif dealerPoints > playerPoints:
print("Dealer wins")
elif dealerPoints < playerPoints and playerPoints <= 21:
print("You win")
elif dealerPoints == playerPoints:
if self._player.hasBlackjack() and not self._dealer.hasBlackjack():
print("You win")
elif not self._player.hasBlackjack() and self._dealer.hasBlackjack():
print("Dealer wins")
else:
print("There is a tie")
def main():
game = DeadOrAlive()
game.play()
main() |
MIN_LENGTH = 4
print("Please enter a valid password, with a length no less than {}".format(MIN_LENGTH))
password = input("> ")
while len(password) < MIN_LENGTH:
print("Invalid password length")
print("Please enter a valid password, with a length no less than {}".format(MIN_LENGTH))
password = input("> ")
print("Your password has been approved:")
for i in range(len(password)):
print("*", end=' ')
print()
|
def unordered_search(givenList, value):
for index in range(len(givenList)):
if value == givenList[index]:
return index
return None
def ordered_search(givenList, value):
for index in range(len(givenList)):
if value == givenList[index]:
return index
elif givenList[index] > term:
return None
return None
|
"""
후위 표기식
https://www.acmicpc.net/problem/1918
첫째 줄에 중위 표기식이 주어진다. 단 이 수식의 피연산자는 알파벳 대문자로 이루어지며 수식에서 한 번씩만 등장한다.
그리고 -A+B와 같이 -가 가장 앞에 오거나 AB와 같이 *가 생략되는 등의 수식은 주어지지 않는다.
표기식은 알파벳 대문자와 +, -, *, /, (, )로만 이루어져 있으며, 길이는 100을 넘지 않는다.
"""
"""
풀이:
우선순위가 중요하다.
( ) < + - < * /
0 1 2
)가 나온경우 (이 나올때까지 pop을 진행한다.
"""
data = input()
|
# This program uses turtle graphics and nested loops to draw a snowflake.
import turtle
win = turtle.Screen()
t = turtle.Turtle()
# set pen size and color
t.pensize(10)
t.pencolor("purple")
# Using nested loops, draw a snowflake
for i in range(6):
# Turn the other direction
for j in range(6):
t.forward(130)
# Hexagon, sides are 360 / 6 = 60 degrees
t.left(60)
t.left(60)
|
# -*- coding: utf-8 -*-
""" Administration forms used to edit news """
from app.forms import custom_validators, CustomGrid, CustomFieldSet, \
create_date_field
from app.models import News
from web import config
import datetime
# Patterns & formats used by the validators
DT_FORMAT = "%d/%m/%Y"
# Lambda methods used to enrich the fields with labels & validators
FORMATTED_DT = lambda field: field.label(u"Date").validate(custom_validators.dt_validator(DT_FORMAT))
NEWS = lambda field: field.label(u"News")
class EditNewsGrid(CustomGrid):
""" Administration grid used to edit news """
def __init__(self):
# Grid initialization
super(EditNewsGrid, self).__init__(News, News.all())
# Creation of a customized date field to edit the news' date
self.append(create_date_field("formatted_news_dt", "news_dt", DT_FORMAT))
# Grid configuration
inc = [FORMATTED_DT(self.formatted_news_dt), NEWS(self.news)]
self.configure(include=inc)
def post_sync(self):
# Parses the entered date and updates the model
self.model.news_dt = datetime.datetime.strptime(self.formatted_news_dt.value, DT_FORMAT).date()
class NewNewsFieldSet(CustomFieldSet):
""" Administration form used to create news """
def __init__(self):
# FieldSet initialization
super(NewNewsFieldSet, self).__init__(News)
# Creation of a customized date field to edit the news' date
self.append(create_date_field("formatted_news_dt", "news_dt", DT_FORMAT))
# FieldSet configuration
inc = [FORMATTED_DT(self.formatted_news_dt), NEWS(self.news)]
self.configure(include=inc)
def post_sync(self):
# Parses the entered date and updates the model
self.model.news_dt = datetime.datetime.strptime(self.formatted_news_dt.value, DT_FORMAT).date()
# Appends the news to the session
config.orm.add(self.model)
|
import os
import traceback
import subprocess
import tempfile
import dj_database_url
from xml.etree import ElementTree
import re
import logging
from xml.dom import minidom
from django.views.generic import View
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse,HttpResponseBadRequest,HttpResponseServerError
from django.template import Context, Template
from tablemanager.models import ForeignTable
# Create your views here.
logger = logging.getLogger(__name__)
class VRTFileView(View):
"""
Process vrt file
"""
http_method_names = ['post']
fdw_dict = dj_database_url.parse(settings.FDW_URL)
_field_re = re.compile("[ \t]*(?P<type>[a-zA-Z0-9]+)[ \t]*(\([ \t]*(?P<width>[0-9]+)\.(?P<precision>[0-9]+)\))?[ \t]*")
_datasource_info_re = re.compile("[(\n)|(\r\n)](?P<key>[a-zA-Z0-9_\-][a-zA-Z0-9_\- ]*[a-zA-Z0-9_\-]?)[ \t]*:(?P<value>[^\r\n]*([(\r\n)|(\n)](([ \t]+[^\r\n]*)|(GEOGCS[^\r\n]*)))*)")
@method_decorator(login_required)
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(VRTFileView, self).dispatch(*args, **kwargs)
def post(self,request,*args,**kwargs):
"""
update vrt file
"""
action = request.POST.get('action')
try:
if not action:
return HttpResponseBadRequest(reason="Empty action.")
elif action == "insert_fields":
return self._insert_fields(request)
else:
return HttpResponseBadRequest(reason="Unrecoginzed action({0}).".format(action))
except Exception as ex:
logger.error(traceback.format_exc())
return HttpResponseServerError(reason="{0}:{1}".format(type(ex),ex.message))
def _get_datasource_info(self,vrt):
"""
Return datasource info as a list of (key, value) items.
"""
vrt_file = tempfile.NamedTemporaryFile()
try:
vrt_file.write(vrt)
vrt_file.flush()
output = subprocess.check_output(["ogrinfo", "-ro", "-al" , "-so", vrt_file.name], stderr=subprocess.STDOUT)
if output.find("ERROR") > -1:
logger.error(output)
raise Exception(l)
else:
if output[0:1] != "\n" and output[0:2] != "\r\n":
output = "\n" + output
result = self._datasource_info_re.findall(output)
return [(item[0],item[1]) for item in result]
finally:
vrt_file.close()
def _xmltostring(self,element):
"""
return a pretty formated xml string
"""
new_vrt = ElementTree.tostring(element,"UTF-8")
root = minidom.parseString(new_vrt)
new_vrt = root.toprettyxml(indent=" ")
return "\n".join([line for line in new_vrt.splitlines() if line.strip()])
def _insert_fields(self,request):
"""
Insert all fields of data source into vrt file.
If some fields are already in the vrt file, these fields will be preserved and only insert the other fields.
"""
#retrieve the parameters from request
vrt = request.POST.get('vrt')
if not vrt:
return HttpResponseBadRequest(reason="Empty vrt.")
name = request.POST.get('name')
if not name:
return HttpResponseBadRequest(reason="Missing input name.")
foreign_table_id = request.POST.get('foreign_table')
if foreign_table_id:
try:
foreign_table_id = int(foreign_table_id)
except:
return HttpResponseBadRequest(reason="Foreign table identity is not integer.")
foreign_table = None
d = {'name':name}
if foreign_table:
try:
foreign_table = ForeignTable.objects.get(pk = foreign_table_id)
except:
return HttpResponseBadRequest(reason="Foreign table does not exist")
d['name'] = foreign_table.name
d.update(fdw_dict)
#instantiate vrt template
vrt = Template(vrt).render(Context({"self": d}))
root = None
try:
root = ElementTree.fromstring(vrt)
except:
return HttpResponseBadRequest("Invalid xml format.")
layer = list(root)[0]
#find the first non OGRVRTWarpedLayer layer
while layer.tag == "OGRVRTWarpedLayer":
layer = layer.find("OGRVRTLayer") or layer.find("OGRVRTUnionLayer") or layer.find("OGRVRTWarpedLayer")
union_layer = None
if layer.tag == "OGRVRTUnionLayer":
#currently only support union similiar layers which has same table structure, all fields will be configured in the first layer,
union_layer = layer
layer = list(union_layer)[0]
while layer.tag == "OGRVRTWarpedLayer":
layer = layer.find("OGRVRTLayer") or layer.find("OGRVRTUnionLayer") or layer.find("OGRVRTWarpedLayer")
#currently,only does not support union layer include another union layer .
if layer.tag == "OGRVRTUnionLayer":
return HttpResponseBadRequest(reason="Does not support union layer includes another union layer.")
field_childs = layer.findall("Field") or []
#remove fields first
for f in field_childs:
layer.remove(f)
if union_layer is not None:
#remove all fields from union layer
for f in union_layer.findall("Field") or []:
union_layer.remove(f)
#remove all fields from included layers
for l in list(union_layer):
while l.tag == "OGRVRTWarpedLayer":
l = layer.find("OGRVRTLayer") or layer.find("OGRVRTUnionLayer") or layer.find("OGRVRTWarpedLayer")
#currently,only does not support union layer include another union layer .
if l.tag == "OGRVRTUnionLayer":
return HttpResponseBadRequest(reason="Does not support union layer includes another union layer.")
for f in l.findall("Field") or []:
l.remove(f)
#add field strategy into union layer
field_strategy = union_layer.find("FieldStrategy")
if field_strategy is not None:
union_layer.remove(field_strategy)
#add first layer strategy into union layer
field_strategy = ElementTree.Element("FieldStrategy")
setattr(field_strategy,"text","FirstLayer")
union_layer.append(field_strategy)
vrt = self._xmltostring(root)
#get data source information.
info = None
try:
info = self._get_datasource_info(vrt)
except Exception as ex:
return HttpResponseBadRequest(reason="{0}:{1}".format(type(ex),ex.message))
fields = []
for k,v in info:
if k in ("INFO","Layer name","Geometry","Metadata","Feature Count","Extent","Layer SRS WKT"):
continue
if k.find(" ") >= 0:
#include a emptry, can't be a column
continue
m = self._field_re.search(v)
if m:
#convert the column name to lower case
fields.append((k.lower(),m.group('type'),m.group('width'),m.group('precision')))
#convert the column name into lower case
for f in field_childs:
f.set('name',f.get('name').lower())
field_child_dict = dict(zip([f.get('name') for f in field_childs],field_childs))
#readd all the fields
element_attrs = {}
for f in fields:
if f[0] in field_child_dict:
layer.append(field_child_dict[f[0]])
else:
element_attrs['name'] = f[0]
element_attrs['type'] = f[1]
if f[2] and f[2] != "0":
element_attrs['width'] = f[2]
elif 'width' in element_attrs:
del element_attrs['width']
if f[3] and f[3] != "0":
element_attrs['precision'] = f[3]
elif 'precision' in element_attrs:
del element_attrs['precision']
layer.append(ElementTree.Element("Field",attrib=element_attrs))
return HttpResponse(self._xmltostring(root), content_type="text/plain")
|
from unittest.mock import patch
from family_foto.app import add_user
from tests.base_test_case import BaseTestCase
class BaseLoginTestCase(BaseTestCase):
"""
Test Case that provides a logged in user mock.
"""
def setUp(self):
super().setUp()
self.patcher = patch('flask_login.utils._get_user')
self.mock_current_user = self.patcher.start()
user = add_user('marcel', '1234')
self.mock_current_user.return_value = user
self.mock_current_user.id = user.id
def tearDown(self):
self.patcher.stop()
super().tearDown()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 23:40:23 2020
@author: eugeniy
"""
from pylibdmtx.pylibdmtx import encode
from PIL import Image
from docx import Document
from docx.shared import Mm
def marks_download (data=''):
marks = [['123123323133dsfsdds3213ssdcfsdvsd12312','Обувь мужская','М10','Импорт'],
['12312332sd234sdfsdf3dsfsdfsdfsdfsdfsd','Обувь женская','М10','Импорт']]
return marks
def page_generation (doc, mark):
dtmx = get_image(mark[0])
doc.add_picture(dtmx)
doc.add_paragraph(mark[1])
doc.add_paragraph(mark[2])
doc.add_paragraph(mark[3])
doc.add_page_break()
def docx_file_genetation ():
document = Document()
document.add_heading('Document Title', 0)
document.settings
section = document.sections[0]
section.page_height = Mm(58)
section.page_width = Mm(60)
section.left_margin = Mm(2)
section.right_margin = Mm(2)
section.top_margin = Mm(2)
section.bottom_margin = Mm(2)
section.header_distance = Mm(0)
section.footer_distance = Mm(0)
marks=marks_download()
for mark in marks:
page_generation(document, mark)
document.add_page_break()
document.save('demo.docx')
def get_image(data ='Some text'):
encoded = encode(data)
img = Image.frombytes('RGB', (encoded.width, encoded.height), encoded.pixels)
filename = data+'.png'
img.save(filename)
return filename
docx_file_genetation()
|
import pandas as pd
data = pd.read_csv('psy_20151221.csv', encoding='cp1251', sep=';', index_col=False, na_values='?', decimal=',')
nominal_data = '0:1,2'
for nominal_line in nominal_data.split(';'):
nom_column, nom_data = nominal_line.split(':')
nom_id = int(nom_column)
data.iloc[:, nom_id] = data.iloc[:, nom_id].astype('category')
data.iloc[:, nom_id] = data.iloc[:, nom_id].cat.rename_categories(nom_data.split(','))
# discretize all non categorical columns
for index, column in enumerate(data.columns.values):
if not data[column].dtype.name == 'category':
data[column] = pd.cut(data[column], 3)
data[column] = data[column].cat.rename_categories(['1', '2', '3'])
f = open('psy_20151221_d.gqj', 'w')
f.write('1\n')
f.write(','.join(map(str, range(1, data.shape[1] + 1))) + '\n')
f.write('1:1,2;' + ';'.join([str(x) + ':1,2,3' for x in range(2, data.shape[1] + 1)]) + '\n')
f.flush()
f.close()
data.to_csv('psy_20151221_d.gqj', encoding='cp1251', sep='\t', index=False, mode='a', float_format=',', na_rep='?',
header=True)
|
class Solution:
def solve(self, s):
tokens=s.split()
string=""
for word in tokens:
if word != "and":
string += str(word[0])
return string.upper()
ob = Solution()
print(ob.solve("Indian Space Research Organisation"))
|
import asyncio
import redis
import json
import aiohttp
from colorama import Back, Fore
class ProxyChecker:
test_url = "http://ya.ru"
timeout_sec = 10
# read the list of proxy IPs in proxyList from the first Argument given
r = redis.Redis()
redis_key = 'proxy'
proxy_list = []
async def is_bad_proxy(self, ipport):
async with aiohttp.ClientSession() as client:
try:
async with client.get(self.test_url, proxy=ipport, timeout=self.timeout_sec) as resp:
if resp.status == 200:
self.r.rpush('proxy', bytes(json.dumps({'proxy':ipport, 'checked': True}), 'utf-8'))
print(Fore.GREEN + "Working:", ipport + Fore.RESET)
else:
print(Fore.RED + "Not Working:", ipport + Fore.RESET)
except:
print(Back.LIGHTRED_EX + ipport + Back.RESET)
def __init__(self):
l = self.r.llen(self.redis_key)
while l > 0:
l -= 1
item = json.loads(self.r.lpop(self.redis_key).decode('utf-8'))
if item['checked'] == False:
self.proxy_list.append(item)
else:
self.r.rpush('proxy', bytes(json.dumps(item), 'utf-8'))
def __call__(self):
print(Back.GREEN + "Starting... \n" + Back.RESET)
loop = asyncio.get_event_loop()
tasks = []
for item in self.proxy_list:
proxy = "http://" + item['proxy']
tasks.append(asyncio.ensure_future(self.is_bad_proxy(proxy)))
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
print(Back.GREEN + "\n...Finished" + Back.RESET) |
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.machinery
import importlib.util
import io
import sys
import cProfile
import pstats
from typing import Any, Callable, Dict, Sequence, Tuple
import logging
import time
from d3m import container
from d3m.metadata import base
MISSING_VALUE_INDICATOR = "__miss_salt_8acf6447-fd14-480e-9cfb-0cb46accfafd"
SINGLETON_INDICATOR = "__sing_salt_6df854b8-a0ba-41ba-b598-ddeba2edfb53"
CATEGORICALS = (
"https://metadata.datadrivendiscovery.org/types/CategoricalData",
"https://metadata.datadrivendiscovery.org/types/OrdinalData",
"http://schema.org/DateTime",
"http://schema.org/Boolean",
)
VECTOR = "https://metadata.datadrivendiscovery.org/types/FloatVector"
def metadata_to_str(metadata: base.Metadata, selector: base.Selector = None) -> str:
buf = io.StringIO()
metadata.pretty_print(selector, buf)
return buf.getvalue()
def get_operating_columns(
inputs: container.DataFrame,
use_columns: Sequence[int],
semantic_types: Sequence[str],
require_attribute: bool = True,
) -> Sequence[int]:
# use caller supplied columns if supplied
cols = set(use_columns)
type_cols = set(inputs.metadata.list_columns_with_semantic_types(semantic_types))
if require_attribute:
attributes = set(
inputs.metadata.list_columns_with_semantic_types(
("https://metadata.datadrivendiscovery.org/types/Attribute",)
)
)
type_cols = type_cols & attributes
if len(cols) > 0:
cols = type_cols & cols
else:
cols = type_cols
return list(cols)
def get_operating_columns_structural_type(
inputs: container.DataFrame,
use_columns: Sequence[int],
structural_types: Sequence[str],
require_attribute: bool = True,
) -> Sequence[int]:
# use caller supplied columns if supplied
cols = set(use_columns)
type_cols = set(
inputs.metadata.list_columns_with_structural_types(structural_types)
)
if require_attribute:
attributes = set(
inputs.metadata.list_columns_with_semantic_types(
("https://metadata.datadrivendiscovery.org/types/Attribute",)
)
)
type_cols = type_cols & attributes
if len(cols) > 0:
cols = type_cols & cols
else:
cols = type_cols
return list(cols)
def lazy_load(fullname: str):
# lazy load a module - needed for imports that trigger long running static model
# loads
if fullname in sys.modules:
return sys.modules[fullname]
else:
spec = importlib.util.find_spec(fullname)
module = importlib.util.module_from_spec(spec)
loader = importlib.util.LazyLoader(spec.loader)
# Make module with proper locking and get it inserted into sys.modules.
loader.exec_module(module)
return module
# an annotation that can be added to function calls to time their execution
def timed(fcn: Callable) -> Callable:
def wrapped(*args: Tuple, **kwargs: Dict[str, Any]) -> Any:
logger = logging.getLogger(__name__)
start = time.time()
logger.debug(f"Executing: {fcn.__module__}.{fcn.__name__}")
result = fcn(*args, **kwargs)
end = time.time()
logger.debug(f"Finished: {fcn.__module__}.{fcn.__name__} in {end - start} ms")
return result
return wrapped
# an annotation that can be added to function calls to fully profile their execution
def profiled(fcn: Callable) -> Callable:
def wrapped(*args: Tuple, **kwargs: Dict[str, Any]) -> Any:
pr = cProfile.Profile()
pr.enable()
fcn(*args, **kwargs)
pr.disable()
s = io.StringIO()
sortby = "cumulative"
ps = pstats.Stats(pr, stream=s).sort_stats(sortby).strip_dirs()
ps.print_stats()
print(s.getvalue())
return wrapped
|
import wx
import wx.lib.newevent
import utility
import os
from appdirs import user_config_dir
PrefsChangedEvent, EVT_PREFS_CHANGED = wx.lib.newevent.NewEvent()
_config = None
_defaults = {
'font' : wx.Font( 12, wx.TELETYPE, wx.NORMAL, wx.NORMAL ).GetNativeFontInfoDesc(),
'save_window_size' : True,
'window_width' : 800,
'window_height' : 600,
'input_height' : 25,
'use_ansi' : True,
'use_ansi_blink' : True,
'highlight_urls' : True,
'save_mcp_window_size' : True,
'autoconnect_last_world' : True,
'local_echo' : False,
'scroll_on_output' : True,
'mcp_window_width' : 600,
'mcp_window_height' : 400,
'external_editor' : 'gvim -f',
'use_x_copy_paste' : utility.platform == 'linux',
'theme' : 'ANSI',
}
def prefs_dir():
config_dir = user_config_dir('wxpymoo')
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return config_dir
def Initialize():
global _config
config_file = os.path.join(prefs_dir(), 'config')
_config = wx.FileConfig(localFilename = config_file)
for default in _defaults.items():
(key, def_val) = default
# if nothing exists for that key, set it to the default.
if get(key) == None:
set(key, str(def_val))
def get(key, default = None):
val = _config.Read(key)
# ugly string -> Boolean handling. Hope we never have a value actually named "True" or "False"
if val == "True": val = True
if val == "False": val = False
if val == "" : val = default
return val
def set(param, val):
_config.Write(param, str(val))
_config.Flush()
def update(pw):
# pw == prefs_window
# This is doing some nasty GetAsString and GetNativeFontInfoDesc foo here,
# instead of encapsulated in prefs, which I think I'm OK with.
set('save_window_size', pw.general_page.save_size_checkbox.GetValue() )
set('autoconnect_last_world', pw.general_page.autoconnect_checkbox.GetValue() )
set('use_x_copy_paste', pw.general_page.xmouse_checkbox.GetValue() )
set('local_echo', pw.general_page.local_echo_checkbox.GetValue() )
set('scroll_on_output', pw.general_page.scroll_on_output_checkbox.GetValue() )
set('font', pw.fonts_page.font_ctrl.GetSelectedFont().GetNativeFontInfoDesc())
set('use_ansi', pw.fonts_page.ansi_checkbox.GetValue() )
set('use_ansi_blink', pw.fonts_page.ansi_blink_checkbox.GetValue() )
set('theme', pw.fonts_page.theme_picker.GetStringSelection() )
set('external_editor', pw.paths_page.external_editor.GetValue() )
wx.PostEvent(pw.parent, PrefsChangedEvent())
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Notes(models.Model):
user_id=models.ForeignKey(User,on_delete=models.CASCADE)
title=models.CharField(max_length=255)
notes_data=models.TextField()
thumbnail=models.FileField(default="")
created_at=models.DateTimeField(auto_now_add=True)
|
# -*- coding=utf-8 -*-
# @Time:2020/10/12 10:42 上午
# Author :王文娜
# @File:useragents.py
# @Software:PyCharm
ua_list=['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'
,'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:50.0) Gecko/20100101 Firefox/50.0'] |
#!/usr/bin/python3
"""
Base class
"""
import uuid
import datetime as dt
from . import storage
class BaseModel():
"""
Defines all common attributes/methods for other classes
"""
def __init__(self, *args, **kwargs):
"""
Creating attributes
id - Generated a str converted Unique id
created_at - assign with the current datetime when instance is created
updated_at - assigning the updated the datetime everytime
object is changed
"""
if not kwargs:
self.id = str(uuid.uuid4())
self.created_at = dt.datetime.now()
self.updated_at = dt.datetime.now()
storage.new(self)
else:
for k, v in kwargs.items():
if k == "created_at" or k == "updated_at":
setattr(self, k, dt.datetime.strptime(
v, "%Y-%m-%dT%H:%M:%S.%f"))
elif k == "__class__":
pass
else:
setattr(self, k, v)
def __str__(self):
"""
__str__ - Here we are returning [{}] ({}) <{}>
field 1 - class (BaseModel)
field 2 - id (1234574587453)
field 3 - dict (dictionary)
"""
return ("[{}] ({}) {}".format(self.__class__.__name__,
self.id, self.__dict__))
def save(self):
"""
save - Updating the public instance attribute
update_at with current datetime
"""
self.updated_at = dt.datetime.now()
storage.new(self)
storage.save()
def to_dict(self):
"""
to_dict - Here we return a copy of the dictionary
that contains all keys/values of the current dictionary
"""
dict_1 = self.__dict__.copy()
dict_1["__class__"] = self.__class__.__name__
for k, v in self.__dict__.items():
if k == "created_at" or k == "updated_at":
v = self.__dict__[k].isoformat()
dict_1[k] = v
return (dict_1)
|
#!/usr/bin/env python
import os
import sys
import time
import sqlite3
import argparse
from subprocess import PIPE
from subprocess import Popen
class Capture():
def __init__(self, iface):
self.nodes_ita = {}
exists = os.path.isfile('DB')
self.db = sqlite3.connect('DB')
self.ieee = {}
for line in open('ieee.db', 'r'):
for element in line.split(',,,'):
try:
mac, comp = element.split('===')
self.ieee[mac] = comp
except: pass
self.capture = self._start_capture(iface)
if not exists:
self._setup_tables()
def _setup_tables(self):
self.db.execute('create table prober(id integer primary key autoincrement, src char(20) not null unique, company char(100), name char(100));')
self.db.execute('create table probed(id integer primary key autoincrement, dest char(20) not null unique, essid char(100));')
self.db.execute('create table packet(timestamp char(20), signal int, channel int, prober_id int, probed_id int);')
self.db.commit()
def _start_capture(self, iface):
airmon = Popen(['sudo', 'airmon-ng', 'stop', 'mon0'], stdout=PIPE, stderr=PIPE)
airmon.wait()
airmon = Popen(['sudo', 'airmon-ng', 'start', iface], stdout=PIPE, stderr=PIPE)
airmon.wait()
return Popen(['sudo', 'tcpdump', '-e', '-i', 'mon0'], stdout=PIPE, stderr=PIPE)
def _parse_probe(self, packet):
if "Probe Response" in packet:
return {'essid': packet.split("Probe Response (")[1].split(") ")[0],
'dest': packet.split("BSSID:")[1].split(" (")[0],
'signal': packet.split(" signal antenna")[0].split(" ")[-1],
'channel': packet.split("CH: ")[1].split(",")[0].rstrip('\n'),
'src': packet.split("Unknown) DA:")[1].split(" (oui")[0]}
def parse(self):
try:
last_printed = time.time()
while True:
line = self.capture.stdout.readline()
try:
response = self._parse_probe(line)
except:
print "[Error] could not parse\n\n%s" % line
if response:
response['timestamp'] = time.time()
try:
response['company'] = self.ieee[response['src'][:8]]
except:
response['company'] = 'unknown'
self._save_to_db(response)
self._is_it_new(response)
if time.time() - last_printed > 1:
self._print_data(response)
last_printed = time.time()
except KeyboardInterrupt:
pass
def _is_it_new(self, response):
probe_id, name = self.db.execute("select id, name from prober where src = ?;", (response['src'],)).fetchone()
if name != "none":
return
timestamp1 = self.db.execute("select max(timestamp) from packet where prober_id = ?", (probe_id,)).fetchone()[0]
timestamp2 = self.db.execute("select max(timestamp) from packet where prober_id = ? and timestamp < ?", (probe_id, timestamp1)).fetchone()[0]
if timestamp2 == None or time.time() - float(timestamp2) > 1800:
name = raw_input("\n%s\t%s\t%s\nEnter the name of the new node? " % (probe_id, response['src'], response['company']))
if len(name) < 1:
name = 'none'
self.db.execute("update prober set name=? where id=?;", (name, probe_id))
self.db.commit()
return True
def _print_data(self, response):
keepers = {}
ct = time.time()
self.nodes_ita[response['src']] = response
os.system('clear')
print '{:<10} {:<20} {:<15} {:<15} {:<20} {:<20}\n'.format('probe_id', 'name', 'timestamp', 'last seen', 'src', 'company')
for node in self.nodes_ita:
if ct - self.nodes_ita[node]['timestamp'] < 360:
keepers[node] = self.nodes_ita[node]
probe_id, name = self.db.execute("select id, name from prober where src = ?;", (self.nodes_ita[node]['src'],)).fetchone()
timestamp = self.db.execute("select max(timestamp) from packet where prober_id = ?", (probe_id,)).fetchone()[0]
last_seen = time.time() - float(timestamp)
timestamp = time.strftime("%H:%M:%S", time.localtime(float(timestamp)))
last_seen = time.strftime("%M:%S", time.localtime(float(last_seen)))
print '{:<10} {:<20} {:<15} {:<15} {:<20} {:<25}'.format(probe_id, name, timestamp, last_seen, self.nodes_ita[node]['src'], self.nodes_ita[node]['company'])
self.nodes_ita = keepers
def _save_to_db(self, response):
try:
self.db.execute("insert into prober (src, name, company) values (?, ?, ?)",
(response['src'], 'none', response['company']))
except sqlite3.IntegrityError:
pass
try:
self.db.execute("insert into probed (dest, essid) values (?, ?)",
(response['dest'], response['essid']))
except sqlite3.IntegrityError:
pass
self.db.commit()
prober_id = self.db.execute("select id from prober where src = (?)", (response['src'],)).fetchone()[0]
probed_id = self.db.execute("select id from probed where dest = (?)", (response['dest'],)).fetchone()[0]
signal = int(response['signal'].rstrip('dB'))
self.db.execute("insert into packet (timestamp, signal, channel, prober_id, probed_id) values (?, ?, ?, ?, ?)",
(response['timestamp'], signal, int(response['channel']), prober_id, probed_id))
self.db.commit()
def __exit__(self):
airmon = Popen(['sudo', 'airmon-ng', 'stop', 'mon0'], stdout=PIPE, stderr=PIPE)
airmon.wait()
print '\n\tThanks for using my script'
if __name__ == "__main__":
parse = argparse.ArgumentParser()
parse.add_argument('iface', help='interface to grab capture from')
parse.add_argument('--routers', help='display routers near me')
args = parse.parse_args()
capture = Capture(args.iface)
capture.parse()
|
import sqlite3
''' #==============================================================================
Database Interactions
''' #==============================================================================
class DatabaseInfo:
def __init__(self):
self.database = sqlite3.connect('ServerDatabase.sql')
self.cursor = self.database.cursor()
''' #==============================================================================
Database Setup
''' # ==============================================================================
# Create the tables in the database
def setup_database(self):
self.setup_room_table()
self.setup_user_table()
self.setup_hero_table()
print("Creating Database.")
# Create table to store all rooms
def setup_room_table(self):
self.cursor.execute('''
CREATE TABLE IF NOT EXISTS rooms(
room_name TEXT PRIMARY KEY)
''')
# Add the table
self.database.commit()
# Create table to store all users
def setup_user_table(self):
self.cursor.execute('''
CREATE TABLE IF NOT EXISTS users(
username TEXT PRIMARY KEY,
password TEXT,
salt TEXT)
''')
self.database.commit()
# Create table to store all players
def setup_hero_table(self):
self.cursor.execute('''
CREATE TABLE IF NOT EXISTS heroes(
id INTEGER PRIMARY KEY,
owner TEXT,
hero_name TEXT,
current_room TEXT)
''')
self.database.commit()
''' #==============================================================================
Login and Account functions
''' #==============================================================================
# Checks password is that of the user
def check_login_details(self, username_login, password_login):
try:
self.cursor.execute("SELECT password FROM users WHERE username == '" + username_login + "'")
password_in_database = self.cursor.fetchone()[0]
if password_login == password_in_database:
return True
else:
return False
except:
print('!LOGIN ERROR! - PASSWORD NOT IN DATABASE')
return False
# Adds new account details to database
def create_account(self, username, password, salt):
try:
self.cursor.execute("select * from Users where username == '" + username + "'")
rows = self.cursor.fetchall()
if len(rows) == 0:
self.cursor.execute('insert into Users(username, password, salt) VALUES(?,?,?)',
(username, password, salt))
# Add to SQL database
self.database.commit()
return True
else:
return False
except:
print('Failed to add to DB')
return False
# Checks if the account is in the database already
def check_account(self, username_login):
try:
self.cursor.execute("SELECT username FROM users WHERE username == '" + username_login + "'")
username_in_database = self.cursor.fetchone()[0]
if username_login == username_in_database:
return True
else:
return False
except:
print('LOGIN ERROR! - USERNAME ERROR')
return False
# Get this users password salt to send across
def fetch_salt_in_database(self, username_login):
try:
self.cursor.execute("SELECT salt FROM users WHERE username == '" + username_login + "'")
salt_in_database = self.cursor.fetchone()[0]
return salt_in_database
except:
print('LOGIN ERROR! - SALT FETCH ERROR')
return False
''' #==============================================================================
Player Information
''' #==============================================================================
# Adds a new hero to the database
def create_hero(self, username, hero_name, starting_room):
try:
self.cursor.execute("SELECT * FROM heroes WHERE hero_name == '" + hero_name + "'")
rows = self.cursor.fetchall()
if len(rows) == 0:
self.cursor.execute('insert into heroes(hero_name, owner, current_room) VALUES(?,?,?)',
(hero_name, username, starting_room))
self.database.commit()
return True
else:
return False
except:
print('HERO CREATION ERROR!')
return False
# Gets a list of all the heroes that user has created
def list_of_heroes(self, username):
try:
self.cursor.execute("SELECT * FROM heroes WHERE owner == '" + username + "'")
heroes_owned = self.cursor.fetchall()
return heroes_owned
except:
print('HERO FETCH ERROR!')
return False
# Selects a hero fromm the database
def choose_hero(self, username, hero_name):
try:
self.cursor.execute("SELECT * FROM heroes WHERE hero_name == '" + hero_name + "'" + " AND owner == '"
+ username + "'")
hero = self.cursor.fetchone()
if hero is not None:
return hero
else:
return None
except:
print('HERO SELECTION ERROR!')
return False
# Gets the rom the hero is currently in
def get_current_room(self, hero_name):
try:
self.cursor.execute("SELECT current_room FROM heroes WHERE hero_name == '" + hero_name + "'")
hero_room = self.cursor.fetchone()
if hero_room is not None:
return hero_room[0]
else:
return None
except:
print('ROOM GET ERROR!')
return False
# Change the heroes current room in the database
def update_hero_room(self, hero_name, new_room):
try:
self.cursor.execute("SELECT * FROM heroes WHERE hero_name == '" + hero_name + "'")
hero = self.cursor.fetchone()
if hero:
self.cursor.execute("UPDATE heroes SET current_room = '" + new_room + "'" + " WHERE hero_name == '" + hero_name + "'")
self.database.commit()
else:
return None
except:
print('ROOM UPDATE ERROR!')
return False
|
import sys
input = sys.stdin.readline
word1 = sorted(list(input().strip()))
word2 = sorted(list(input().strip()))
meh = word1[:]
for i in range(len(word2)):
if word1[i] == word2[i]:
#print(meh)
del meh[meh.index(word1[i])]
print(meh[0])
#print(word1)
#if len(set(word1)) > len(set(word2)):
#jerry = list(set(word1) - set(word2))
#carol = list(set(word2) - set(word1))
#print(carol, jerry)
#if jerry:
# print(jerry[0])
#if carol:
# print(carol[0])
#if len(jerry) > 0:
# print(jerry[CAROL])
#print(jerry)
|
#!/usr/bin/env python2
"""
replacement for submatrix.awk
"""
import sys
start = int(sys.argv[1])
end = int(sys.argv[2])
in_fn = sys.argv[3]
for line in open(in_fn).readlines():
sl = line.split()
col_one = int(sl[0])
if (col_one >= start) and (col_one <= end):
col_one = col_one - start + 1
print col_one, sl[1], sl[2]
|
"""
LIBRARY_NAME -> str
get_initial_conditions(reporting_unit: int) -> List[InitialConditionNonSpatialDistributionSerializer]
create_spatial_initial_conditions
"""
import os
import sys
from importlib.util import spec_from_file_location, module_from_spec
from landscapesim.importers import ProjectImporter, ScenarioImporter, ReportImporter
REGISTERED_LIBRARY_PROCESSORS = {}
def _load_library_processors():
""" Find and import all possible contrib modules to facilitate project, scenario and report imports. """
# Dont load double underscored filenames
possible_module_paths = [(os.path.join(os.path.dirname(__file__), x), x)
for x in os.listdir(os.path.dirname(__file__)) if '__' not in x]
# Now try loading each module
for full_path, file_name in possible_module_paths:
name = file_name.split('.')[-2]
module_name = 'landscapesim.contrib.{}'.format(name)
try:
spec = spec_from_file_location(module_name, full_path)
mod = module_from_spec(spec)
sys.modules[module_name] = mod
spec.loader.exec_module(mod)
REGISTERED_LIBRARY_PROCESSORS[name] = module_name
except ImportError:
print("Could not load {} at path {}.".format(name, full_path))
# Now find available library modules
_load_library_processors()
def find_library_module(library_name):
""" Find a contrib module based on the STSim library name. """
for name, mod_name in REGISTERED_LIBRARY_PROCESSORS.items():
mod = sys.modules[mod_name]
if getattr(mod, 'LIBRARY_NAME') == library_name:
return mod
return None
def get_project_importer_cls(library_name):
""" Return a ProjectImporter class to handle importing a specific library's project information. """
lib_module = find_library_module(library_name)
if lib_module:
try:
return lib_module.PROJECT_IMPORTER_CLASS
except AttributeError:
pass
return ProjectImporter
def get_scenario_importer_cls(library_name):
""" Return a ScenarioImporter class to handle importing a specific library's scenario information. """
lib_module = find_library_module(library_name)
if lib_module:
try:
return lib_module.SCENARIO_IMPORTER_CLASS
except AttributeError:
pass
return ScenarioImporter
def get_report_importer_cls(library_name):
""" Return a ReportImporter class to handle importing a specific library's report information. """
lib_module = find_library_module(library_name)
if lib_module:
try:
return lib_module.REPORT_IMPORTER_CLASS
except AttributeError:
pass
return ReportImporter
def get_initial_conditions(library_name, scenario, reporting_unit=None):
lib_module = find_library_module(library_name)
if lib_module and reporting_unit is not None:
return lib_module.get_initial_conditions(scenario, reporting_unit)
return None
|
import os
from subprocess import call
import sys
import tkinter as tk
def click_checkinn():
call(["python", "checkin_gui_and_program.py"])
def click_list():
call(["python", "listgui.py"])
def click_checkout():
call(["python", "checkoutgui.py"])
def click_getinfo():
call(["python","getinfoui.py"])
class FindMyLocker:
def __init__(self):
root = tk.Tk()
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#ffffff' # X11 color: 'white'
_ana1color = '#ffffff' # X11 color: 'white'
_ana2color = '#ffffff' # X11 color: 'white'
font14 = "-family {Segoe UI} -size 15 -weight bold -slant " \
"roman -underline 0 -overstrike 0"
font16 = "-family {Swis721 BlkCn BT} -size 35 -weight bold " \
"-slant roman -underline 0 -overstrike 0"
font9 = "-family {Segoe UI} -size 9 -weight normal -slant " \
"roman -underline 0 -overstrike 0"
root.geometry("963x749+540+110")
root.title("FIND MY LOCKER")
root.configure(background="#D1D0CE")
root.configure(highlightbackground="#d9d9d9")
root.configure(highlightcolor="black")
self.menubar = tk.Menu(root,font=font9,bg=_bgcolor,fg=_fgcolor)
root.configure(menu = self.menubar)
self.Frame1 = tk.Frame(root)
self.Frame1.place(relx=0.02, rely=0.03, relheight=0.94, relwidth=0.96)
self.Frame1.configure(relief=tk.GROOVE)
self.Frame1.configure(borderwidth="2")
self.Frame1.configure(relief=tk.GROOVE)
self.Frame1.configure(background="#E5E4E2") #Gray
self.Frame1.configure(highlightbackground="#d9d9d9")
self.Frame1.configure(highlightcolor="black")
self.Frame1.configure(width=1000)
self.Message6 = tk.Message(self.Frame1)
self.Message6.place(relx=0.09, rely=0.01, relheight=0.15, relwidth=0.86)
self.Message6.configure(background="#E5E4E2")
self.Message6.configure(font=font16)
self.Message6.configure(foreground="#000000")
self.Message6.configure(highlightbackground="#d9d9d9")
self.Message6.configure(highlightcolor="black")
self.Message6.configure(text='WELCOME TO FIND MY LOCKER')
self.Message6.configure(width=900)
self.Button2 = tk.Button(self.Frame1)
self.Button2.place(relx=0.18, rely=0.17, height=103, width=566)
self.Button2.configure(activebackground="#adddce")
self.Button2.configure(activeforeground="#000000")
self.Button2.configure(background="#adddce")
self.Button2.configure(disabledforeground="#bfbfbf")
self.Button2.configure(font=font14)
self.Button2.configure(foreground="#000000")
self.Button2.configure(highlightbackground="#d9d9d9")
self.Button2.configure(highlightcolor="black")
self.Button2.configure(pady="0")
self.Button2.configure(text='''1) CHECK IN''')
self.Button2.configure(width=566)
self.Button2.configure(command=click_checkinn)
self.Button3 = tk.Button(self.Frame1)
self.Button3.place(relx=0.18, rely=0.33, height=93, width=566)
self.Button3.configure(activebackground="#9C8ADE")
self.Button3.configure(activeforeground="#000000")
self.Button3.configure(background="#9C8ADE")
self.Button3.configure(disabledforeground="#bfbfbf")
self.Button3.configure(font=font14)
self.Button3.configure(foreground="#000000")
self.Button3.configure(highlightbackground="#d9d9d9")
self.Button3.configure(highlightcolor="black")
self.Button3.configure(pady="0")
self.Button3.configure(text='''2) SHOW GUESTS LIST''')
self.Button3.configure(width=566)
self.Button3.configure(command=click_list)
self.Button4 = tk.Button(self.Frame1)
self.Button4.place(relx=0.18, rely=0.47, height=93, width=566)
self.Button4.configure(activebackground="#E69288")
self.Button4.configure(activeforeground="#000000")
self.Button4.configure(background="#E69288")
self.Button4.configure(disabledforeground="#bfbfbf")
self.Button4.configure(font=font14)
self.Button4.configure(foreground="#000000")
self.Button4.configure(highlightbackground="#d9d9d9")
self.Button4.configure(highlightcolor="black")
self.Button4.configure(pady="0")
self.Button4.configure(text='''3) CHECK OUT''')
self.Button4.configure(width=566)
self.Button4.configure(command=click_checkout)
self.Button5 = tk.Button(self.Frame1)
self.Button5.place(relx=0.18, rely=0.61, height=103, width=566)
self.Button5.configure(activebackground="#FEB68E")
self.Button5.configure(activeforeground="#000000")
self.Button5.configure(background="#FEB68E")
self.Button5.configure(disabledforeground="#bfbfbf")
self.Button5.configure(font=font14)
self.Button5.configure(foreground="#000000")
self.Button5.configure(highlightbackground="#d9d9d9")
self.Button5.configure(highlightcolor="black")
self.Button5.configure(pady="0")
self.Button5.configure(text='''4) GET INFO OF ANY GUESTS''')
self.Button5.configure(width=566)
self.Button5.configure(command=click_getinfo)
self.Button6 = tk.Button(self.Frame1)
self.Button6.place(relx=0.18, rely=0.77, height=103, width=566)
self.Button6.configure(activebackground="#E8CCC0")
self.Button6.configure(activeforeground="#000000")
self.Button6.configure(background="#E8CCC0")
self.Button6.configure(disabledforeground="#bfbfbf")
self.Button6.configure(font=font14)
self.Button6.configure(foreground="#000000")
self.Button6.configure(highlightbackground="#d9d9d9")
self.Button6.configure(highlightcolor="black")
self.Button6.configure(pady="0")
self.Button6.configure(text='''5) EXIT''')
self.Button6.configure(width=566)
self.Button6.configure(command=quit)
root.mainloop()
if __name__ == '__main__':
GUEST = FindMyLocker()
|
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from tastypie.api import Api
# Import our tastypie API resources
from azzaip.api import MessageResource, AccessResource, ManageResource
# Import our github webook view
from backend import views
# Register a URL endpoint for each api resource
API = Api(api_name='v1')
API.register(MessageResource())
API.register(AccessResource())
API.register(ManageResource())
# Route the actual URL patterns
urlpatterns = [
url(r'', include('azzaip.urls')),
url(r'^admin/', admin.site.urls),
url(r'^api/', include(API.urls)),
url(r'^hook/$', views.hook, name='hook')
]
|
# -*- encoding: utf-8 -*-
from django import forms
class SearchForm(forms.Form):
query = forms.CharField(min_length=3, required=False)
|
#I pledge my honor that I have abided by the stevens honor system.
#OmP1.py
def main():
n = int(input("How many numbers would you like in the list?"))
numlist = list(float(x) for x in input("Enter a list of numbers separated by spaces.").strip().split())[:n]
print("The entries of the list are", numlist)
for i in range(n):
numlist[i] = numlist[i] ** 2
print("Each of these values squared is", numlist)
main()
|
import random
def twoLists():
set1 = random.sample(range(20), 10)
print("Первый список: " + str(set1))
set2 = random.sample(range(20), 10)
print("Второй список: " + str(set2))
result = set(set1).union(set(set2))
print("Общий список: " + str(result))
twoLists()
|
import os
ACCESS_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'
AUTHORIZATION_URL = 'https://accounts.google.com/o/oauth2/v2/auth?access_type=offline&prompt=consent'
AUTHORIZATION_SCOPE = 'openid email profile'
AUTH_REDIRECT_URI = "http://localhost:8080/api/auth/signin/google/callback" # os.environ.get("FN_AUTH_REDIRECT_URI", default=False)
BASE_URI = "http://localhost:3000" # 8080" # os.environ.get("FN_BASE_URI", default=False)
CLIENT_ID = "671001533244-kf1k1gmp6mnl0r030qmvdu6v36ghmim6.apps.googleusercontent.com"
CLIENT_SECRET = "Yo4qbKZniqvojzUQ60iKlxqR"
AUTH_TOKEN_KEY = 'auth_token'
AUTH_STATE_KEY = 'auth_state'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import http
import math
import os
import json
import time
from enum import Enum
from os.path import expanduser
from electrum.util import Ticker, make_aiohttp_session
import requests
# from eth_accounts.account_utils import AccountUtils
from eth_keyfile import keyfile
from eth_utils import to_checksum_address
from web3 import HTTPProvider, Web3
from .eth_transaction import Eth_Transaction
from decimal import Decimal
import sqlite3
from .network import Network
from electrum.constants import read_json
eth_servers = {}
ETHERSCAN_API_KEY = "R796P9T31MEA24P8FNDZBCA88UHW8YCNVW"
INFURA_PROJECT_ID = "f001ce716b6e4a33a557f74df6fe8eff"
ROUND_DIGITS = 3
DEFAULT_GAS_PRICE_GWEI = 4
DEFAULT_GAS_LIMIT = 21000
GWEI_BASE = 1000000000
DEFAULT_GAS_SPEED = 1
KEYSTORE_DIR_PREFIX = expanduser("~")
# default pyethapp keystore path
KEYSTORE_DIR_SUFFIX = ".electrum/eth/keystore/"
# REQUESTS_HEADERS = {
# "User-Agent": "https://github.com/AndreMiras/PyWallet",
# }
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class InsufficientFundsException(Exception):
"""
Raised when user want to send funds and have insufficient balance on address
"""
pass
class InsufficientERC20FundsException(Exception):
"""
Raised when user want to send ERC20 contract tokens and have insufficient balance
of these tokens on wallet's address
"""
pass
class ERC20NotExistsException(Exception):
"""
Raised when user want manipulate with token which doesn't exist in wallet.
"""
pass
class InvalidTransactionNonceException(Exception):
"""
Raised when duplicated nonce occur or any other problem with nonce
"""
pass
class InvalidValueException(Exception):
"""
Raised when some of expected values is not correct.
"""
pass
class InvalidAddress(ValueError):
"""
The supplied address does not have a valid checksum, as defined in EIP-55
"""
pass
class InvalidPasswordException(Exception):
"""
Raised when invalid password was entered.
"""
pass
class InfuraErrorException(Exception):
"""
Raised when wallet cannot connect to infura node.
"""
class UnknownEtherscanException(Exception):
pass
class NoTransactionFoundException(UnknownEtherscanException):
pass
def get_abi_json():
root_dir = os.path.dirname(os.path.abspath(__file__))
abi_path = os.path.join(root_dir, '.', 'abi.json')
with open(abi_path) as f:
fitcoin = json.load(f)
return fitcoin
def handle_etherscan_response_json(response_json):
"""Raises an exception on unexpected response json."""
status = response_json["status"]
message = response_json["message"]
if status != "1":
if message == "No transactions found":
raise NoTransactionFoundException()
else:
raise UnknownEtherscanException(response_json)
#assert message == "OK"
def handle_etherscan_response_status(status_code):
"""Raises an exception on unexpected response status."""
if status_code != http.HTTPStatus.OK:
raise UnknownEtherscanException(status_code)
def handle_etherscan_response(response):
"""Raises an exception on unexpected response."""
handle_etherscan_response_status(response.status_code)
handle_etherscan_response_json(response.json())
def requests_get(url):
try:
return requests.get(url, timeout=2, verify=False)
except BaseException as e:
raise e
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36"
}
class PyWalib:
web3 = None
market_server = None
tx_list_server = None
gas_server = None
symbols_price = {}
config = None
chain_type = None
chain_id = None
conn = None
cursor = None
def __init__(self, config, chain_type="mainnet", path=""):
PyWalib.chain_type = chain_type
PyWalib.config = config
PyWalib.conn = sqlite3.connect(path)
PyWalib.cursor = self.conn.cursor()
self.create_db()
self.init_symbols()
def create_db(self):
PyWalib.cursor.execute("CREATE TABLE IF NOT EXISTS txlist (tx_hash TEXT PRIMARY KEY, address TEXT, time INTEGER, tx TEXT)")
def init_symbols(self):
symbol_list = self.config.get("symbol_list", {'ETH':'','EOS':''})
for symbol in symbol_list:
PyWalib.symbols_price[symbol] = PyWalib.get_currency(symbol, 'BTC')
global symbol_ticker
symbol_ticker = Ticker(1*60, self.get_symbols_price)
symbol_ticker.start()
def get_symbols_price(self):
try:
for symbol, price in PyWalib.symbols_price.items():
PyWalib.symbols_price[symbol] = self.get_currency(symbol, 'BTC')
PyWalib.config.set_key("symbol_list", PyWalib.symbols_price)
time.sleep(1)
except BaseException as e:
raise e
@classmethod
def get_json(cls, url):
network = Network.get_instance()
proxy = network.proxy if network else None
with make_aiohttp_session(proxy) as session:
with session.get(url) as response:
response.raise_for_status()
# set content_type to None to disable checking MIME type
return response.json(content_type=None)
@classmethod
def get_currency(cls, from_cur, to_cur):
try:
out_price = {}
for server in PyWalib.market_server:
for name, url in server.items():
if name == "coinbase":
url += from_cur
response = requests.get(url, timeout=5, verify=False)
json = response.json()
return [str(Decimal(rate)) for (ccy, rate) in json["data"]["rates"].items() if ccy == to_cur][0]
# if name == "binance":
# url += from_cur.upper()+to_cur.upper()
# try:
# response = requests.get(url, timeout=3, verify=False)
# obj = response.json()
# out_price[name] = obj['data']['lastprice']
# except BaseException as e:
# pass
elif name == 'bixin':
url += from_cur.upper() + '/' + to_cur.upper()
try:
response = requests.get(url, timeout=3, verify=False)
obj = response.json()
#out_price[name] = obj['data']['price']
return obj['data']['price']
except BaseException as e:
pass
# elif name == 'huobi':
# url += from_cur.lower() + to_cur.lower()
# try:
# response = requests.get(url, timeout=3, verify=False)
# obj = response.json()
# out_price[name] = (obj['data']['bid'][0] + obj['data']['ask'][0])/2.0
# except BaseException as e:
# pass
# elif name == 'ok':
# print("TODO")
# return_price = 0.0
# for price in out_price.values():
# return_price += float(price)
# return return_price/len(out_price)
except BaseException as e:
print(f"get symbol price error {e}")
pass
@staticmethod
def get_web3():
return PyWalib.web3
@staticmethod
def set_server(info):
PyWalib.market_server = info['Market']
PyWalib.tx_list_server = info['TxliServer']
PyWalib.gas_server = info['GasServer']
for i in info['Provider']:
if PyWalib.chain_type in i:
url = i[PyWalib.chain_type]
chain_id = i['chainid']
PyWalib.web3 = Web3(HTTPProvider(url))
PyWalib.chain_id = chain_id
@staticmethod
def get_coin_price(from_cur):
try:
from_cur = from_cur.upper()
if from_cur in PyWalib.symbols_price:
symbol_price = PyWalib.symbols_price[from_cur]
return symbol_price if symbol_price is not None else PyWalib.get_currency(from_cur, 'BTC')
else:
symbol_price = PyWalib.get_currency(from_cur, 'BTC')
PyWalib.symbols_price[from_cur] = symbol_price
PyWalib.config.set_key("symbol_list", PyWalib.symbols_price)
return symbol_price
except BaseException as e:
raise e
def get_gas_price(self):
try:
#response = requests.get(eth_servers['GasServer'], headers=headers)
if PyWalib.gas_server is not None:
response = requests.get(PyWalib.gas_server, headers=headers)
obj = response.json()
out = dict()
if obj['code'] == 200:
for type, wei in obj['data'].items():
fee_info = dict()
fee_info['price'] = int(self.web3.fromWei(wei, "gwei"))
if type == "rapid":
fee_info['time'] = "15 Seconds"
elif type == "fast":
fee_info['time'] = "1 Minute"
elif type == "standard":
fee_info['time'] = "3 Minutes"
elif type == "timestamp":
fee_info['time'] = "> 10 Minutes"
out[type] = fee_info
return json.dumps(out)
except BaseException as ex:
raise ex
def get_max_use_gas(self, gas_price):
gas = gas_price * DEFAULT_GAS_LIMIT
return self.web3.fromWei(gas * GWEI_BASE, 'ether')
def get_transaction(self, from_address, to_address, value, contract=None, gasprice = DEFAULT_GAS_PRICE_GWEI):
try:
float(value)
except ValueError:
raise InvalidValueException()
if contract is None: # create ETH transaction dictionary
tx_dict = Eth_Transaction.build_transaction(
to_address=self.web3.toChecksumAddress(to_address),
value=self.web3.toWei(value, "ether"),
gas=DEFAULT_GAS_LIMIT, # fixed gasLimit to transfer ether from one EOA to another EOA (doesn't include contracts)
#gas_price=self.web3.eth.gasPrice * gas_price_speed,
gas_price=self.web3.toWei(gasprice, "gwei"),
# be careful about sending more transactions in row, nonce will be duplicated
nonce=self.web3.eth.getTransactionCount(self.web3.toChecksumAddress(from_address)),
chain_id=int(PyWalib.chain_id)
)
else: # create ERC20 contract transaction dictionary
erc20_decimals = contract.get_decimals()
# token_amount = int(float(value) * (10 ** erc20_decimals))
token_amount = int(float(value))
data_for_contract = Eth_Transaction.get_tx_erc20_data_field(to_address, token_amount)
# check whether there is sufficient ERC20 token balance
_, erc20_balance = self.get_balance(self.web3.toChecksumAddress(from_address), contract)
if float(value) > erc20_balance:
raise InsufficientERC20FundsException()
addr = self.web3.toChecksumAddress(contract.get_address())
#calculate how much gas I need, unused gas is returned to the wallet
estimated_gas = self.web3.eth.estimateGas(
{'to': contract.get_address(),
'from': self.web3.toChecksumAddress(from_address),
'data': data_for_contract
})
tx_dict = Eth_Transaction.build_transaction(
to_address=contract.get_address(), # receiver address is defined in data field for this contract
value=0, # amount of tokens to send is defined in data field for contract
gas=estimated_gas,
gas_price=self.web3.toWei(gasprice, "gwei"),
# be careful about sending more transactions in row, nonce will be duplicated
nonce=self.web3.eth.getTransactionCount(self.web3.toChecksumAddress(from_address)),
chain_id=int(PyWalib.chain_id),
data=data_for_contract
)
# check whether to address is valid checksum address
if not self.web3.isChecksumAddress(self.web3.toChecksumAddress(to_address)):
raise InvalidAddress()
# check whether there is sufficient eth balance for this transaction
#_, balance = self.get_balance(from_address)
balance = self.web3.fromWei(self.web3.eth.getBalance(self.web3.toChecksumAddress(from_address)), 'ether')
transaction_const_wei = tx_dict['gas'] * tx_dict['gasPrice']
transaction_const_eth = self.web3.fromWei(transaction_const_wei, 'ether')
if contract is None:
if (transaction_const_eth + Decimal(value)) > balance:
raise InsufficientFundsException()
else:
if transaction_const_eth > balance:
raise InsufficientFundsException()
return tx_dict
def sign_and_send_tx(self, account, tx_dict):
tx_hash = Eth_Transaction.send_transaction(account, self.web3, tx_dict)
print('Pending', end='', flush=True)
while True:
tx_receipt = self.web3.eth.getTransactionReceipt(tx_hash)
if tx_receipt is None:
print('.', end='', flush=True)
import time
time.sleep(1)
else:
print('\nTransaction mined!')
break
return tx_hash
def serialize_and_send_tx(self, tx_dict, vrs):
tx_hash = Eth_Transaction.serialize_and_send_tx(self.web3, tx_dict, vrs)
print('Pending', end='', flush=True)
while True:
tx_receipt = self.web3.eth.getTransactionReceipt(tx_hash)
if tx_receipt is None:
print('.', end='', flush=True)
import time
time.sleep(1)
else:
print('\nTransaction mined!')
break
@staticmethod
def get_balance(wallet_address, contract=None):
if contract is None:
eth_balance = PyWalib.get_web3().fromWei(PyWalib.get_web3().eth.getBalance(wallet_address), 'ether')
return "eth", eth_balance
else:
erc_balance = contract.get_balance(wallet_address)
return contract.get_symbol(), erc_balance
# def get_balance_web3(self, address):
# """
# The balance is returned in ETH rounded to the second decimal.
# """
# address = to_checksum_address(address)
# balance_wei = self.web3.eth.getBalance(address)
# balance_eth = balance_wei / float(pow(10, 18))
# balance_eth = round(balance_eth, ROUND_DIGITS)
# return balance_eth
@staticmethod
def get_transaction_history(address, recovery=False):
tx_list = PyWalib.get_transaction_history_fun(address, recovery=recovery)
if len(tx_list) == 0 and not recovery:
PyWalib.cursor.execute("SELECT * FROM txlist WHERE address=? ORDER BY time DESC Limit 10", (address,))
result = PyWalib.cursor.fetchall()
for info in result:
tx_list.append(info)
return tx_list
@classmethod
def tx_list_ping(cls, recovery=False):
try:
speed_list = {}
for server in PyWalib.tx_list_server:
for key, value in server.items():
try:
if -1 == key.find(PyWalib.chain_type):
continue
else:
if recovery:
if -1 == key.find("trezor"):
continue
speed_list[key] = value
return speed_list
except BaseException as e:
pass
return None
except BaseException as e:
raise e
def get_tx_from_etherscan(address, url):
url += (
'?module=account&action=txlist'
'&sort=asc'
f'&address={address}'
f'&apikey={ETHERSCAN_API_KEY}'
)
try:
response = requests_get(url)
handle_etherscan_response(response)
response_json = response.json()
except BaseException as e:
print(f"error....when get_eth_history.....{e}")
pass
return []
transactions = response_json['result']
out_tx_list = []
for transaction in transactions:
value_wei = int(transaction['value'])
value_eth = value_wei / float(pow(10, 18))
value_eth = round(value_eth, ROUND_DIGITS)
from_address = to_checksum_address(transaction['from'])
to_address = transaction['to']
# on contract creation, "to" is replaced by the "contractAddress"
if not to_address:
to_address = transaction['contractAddress']
to_address = to_checksum_address(to_address)
sent = from_address == address
received = not sent
extra_dict = {
'time': transaction['timeStamp'],
'value_eth': value_eth,
'sent': sent,
'received': received,
'from_address': from_address,
'to_address': to_address,
}
time = int(transaction['timeStamp'])
PyWalib.cursor.execute("INSERT OR IGNORE INTO txlist VALUES(?, ?,?,?)", (transaction['hash'], address, time, json.dumps(extra_dict)))
out_tx_list.append(extra_dict)
PyWalib.conn.commit()
out_tx_list.sort(key=lambda x: x['time'])
out_len = 10 if len(out_tx_list) >= 10 else len(out_tx_list)
return out_tx_list[:out_len]
def get_recovery_flag_from_trezor(address, url):
try:
url += f'/address/{address}'
response = requests_get(url)
#handle_etherscan_response(response)
response_json = response.json()
txs = response_json['txs']
return response_json['txids'] if txs != 0 else []
except BaseException as e:
print(f"get_tx_flag ...errr{e}")
pass
return []
def get_tx_from_trezor(address, url):
url += f'/address/{address}'
try:
response = requests_get(url)
handle_etherscan_response(response)
response_json = response.json()
txids = response_json['txids']
except BaseException as e:
print(f"errror .....get address from trezor....{e}")
pass
return []
out_tx_list = []
for txid in txids:
url += f'/tx/{txid}'
try:
response = requests_get(url)
handle_etherscan_response(response)
response_json = response.json()
except BaseException as e:
print(f"errror .....get tx from trezor....{e}")
continue
value_wei = int(response_json['value'])
value_eth = value_wei / float(pow(10, 18))
value_eth = round(value_eth, ROUND_DIGITS)
from_address = to_checksum_address(response_json['vin'][0]['addresses'])
to_address = response_json['vout'][0]['addresses']
# on contract creation, "to" is replaced by the "contractAddress"
# if not to_address:
# to_address = transaction['contractAddress']
to_address = to_checksum_address(to_address)
sent = from_address == address
received = not sent
extra_dict = {
'time': response_json['blockTime'],
'value_eth': value_eth,
'sent': sent,
'received': received,
'from_address': from_address,
'to_address': to_address,
}
time = int(response_json['blockTime'])
PyWalib.cursor.execute("INSERT OR IGNORE INTO txlist VALUES(?, ?,?,?)",
(response_json['txid'], address, time, json.dumps(extra_dict)))
out_tx_list.append(extra_dict)
PyWalib.conn.commit()
out_tx_list.sort(key=lambda x: x['time'])
out_len = 10 if len(out_tx_list) >= 10 else len(out_tx_list)
return out_tx_list[:out_len]
def get_transaction_history_fun(address, recovery=False):
"""
Retrieves the transaction history from server list
"""
address = to_checksum_address(address)
tx_list = []
speed_list = PyWalib.tx_list_ping(recovery=recovery)
for server_key, url in speed_list.items():
if -1 != server_key.find("trezor"):
if recovery:
print(f"get_transaction history from trezor to recovery....{address, url}")
tx_list = PyWalib.get_recovery_flag_from_trezor(address, url)
else:
print(f"get_transaction history from trezor....{address, url}")
tx_list = PyWalib.get_tx_from_trezor(address, url)
elif -1 != server_key.find("etherscan"):
print(f"get_transaction history from etherscan....{address, url}")
tx_list = PyWalib.get_tx_from_etherscan(address, url)
if len(tx_list) != 0:
return tx_list
return tx_list
# @staticmethod
# def get_out_transaction_history(address):
# """
# Retrieves the outbound transaction history from Etherscan.
# """
# transactions = PyWalib.get_transaction_history(address, PyWalib.chain_id)
# out_transactions = []
# for transaction in transactions:
# if transaction['extra_dict']['sent']:
# out_transactions.append(transaction)
# return out_transactions
# TODO: can be removed since the migration to web3
@staticmethod
def get_nonce(address):
"""
Gets the nonce by counting the list of outbound transactions from
Etherscan.
"""
try:
out_transactions = PyWalib.get_out_transaction_history(
address, PyWalib.chain_id)
except NoTransactionFoundException:
out_transactions = []
nonce = len(out_transactions)
return nonce
@staticmethod
def handle_web3_exception(exception: ValueError):
"""
Raises the appropriated typed exception on web3 ValueError exception.
"""
error = exception.args[0]
code = error.get("code")
if code in [-32000, -32010]:
raise InsufficientFundsException(error)
else:
raise UnknownEtherscanException(error) |
from .train import Train
from .infer import Infer
from ._logging import Logging
class Runner():
def __init__(self, param):
self.exp_param = param["exp_param"]
self.train_param = param["train_param"]
self.log_param = param["log_param"]
self.train_param.update(self.exp_param)
self.log_param.update(self.exp_param)
def __call__(self):
Trainer = Train(self.train_param)
Inferer = Infer(self.train_param)
Logger = Logging(self.log_param)
Trainer()
Inferer()
Logger() |
from Up import Person
from getconn import getconn
from video import video
import logging
from Up import up
import re
import time
uplist=list(map(str,[39180492,382666849]))
conn = getconn()
# 爬取up基本信息
time1 = 0
for uid in uplist:
per = Person(uid)
basic = per.getbasic()
print(time1,basic["name"] )
keys = ','.join(basic.keys())
values = list(basic.values())
try:
cursor = conn.cursor()
# sql="insert into upbasic({keys})values({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10})".format(keys=keys,a1=values[0],a2=values[1],a3=values[2],a4=5,a5=values[4],a6=values[5],a7=values[6],a8=values[7],a9=values[8],a10=values[9])
sql = "insert into upbasic(mid,name,sex,sign,level,birthday,coins,following,black,follower)values('%d','%s','%s','%s','%d','%s','%d','%d','%d','%d')" % (
values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7], values[8], values[9])
cursor.execute(sql)
conn.commit()
print("up基本信息爬取成功!!!!")
except Exception as e:
print(e)
# 爬取up视频列表
u = up(uid)
videos = list(set(u.getvideos()))
videos = ';'.join(videos)
try:
cursor = conn.cursor()
sql = "insert into bvlist(UID,BVlist)values('%s','%s')" % (uid, videos)
cursor.execute(sql)
conn.commit()
print('up视频列表爬取成功!!!!')
except Exception as e:
print(e)
listvideo=list(set(u.getvideos()))
# 爬取视频基本信息
for av in listvideo:
av1=video(av)
bv = av1.putbv()
videos = video(bv)
# print(video.getbasic())
basic = videos.getbasic()
# print(basic.keys())
# print(basic.values())
# for i in basic.values():
# print(type(i))
videolist = u.moreinfo
last = videolist[int(av)]
typeid_union = list(last.values())#获取除video以外的视频信息
values = list(basic.values())
print(values)
values.extend(typeid_union)
try:
cursor = conn.cursor()
sql = "insert into videobasic(aid,`view`,danmaku,reply,favorite,coin,`share`,now_rank,his_rank,`like`,dislike,no_reprint,copyright,videos,tid,tname,title,`desc`,typeid,created,length,is_union_video)" \
"values('%d','%d','%d','%d','%d','%d','%d','%d','%d','%d','%d','%d','%d','%d','%d','%s','%s','%s','%d','%d','%s','%d')" \
% (
values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7], values[8],
values[9],
values[10], values[11], values[12], values[13], values[14], values[15], values[16], values[17],
values[18],
values[19], values[20], values[21])
sql = sql.replace('\n', '')
cursor.execute(sql)
conn.commit()
print(bv + "视频基本信息爬取成功")
except Exception as e:
print(e)
#爬取视频评论
try:
reply=';'.join(videos.getreply(100))
res=re.compile("[^\u4e00-\u9fa5^a-z^A-Z^0-9^;]")
reply=res.sub('',reply)
except Exception as e:
print(e)
try:
cursor=conn.cursor()
sql="insert into comments(BV,comment)values('%s','%s')"%(bv,reply)
cursor.execute(sql)
conn.commit()
print(bv+"视频评论爬取成功!!!!")
except Exception as e:
print(e) |
n1 = 0
n2 = 1
i = 3
while i <= 20:
n3 = n1 + n2
n1 = n2
n2 = n3
i += 1
print(n3)
# 檔名: exercise0710.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import re
LOG = logging.getLogger('tests.common.environ')
test_start_cluster_args = os.environ.get("TEST_START_CLUSTER_ARGS", "")
IMPALA_HOME = os.environ.get("IMPALA_HOME", "")
# Find the likely BuildType of the running Impala. Assume it's found through the path
# $IMPALA_HOME/be/build/latest as a fallback.
build_type_arg_regex = re.compile(r'--build_type=(\w+)', re.I)
build_type_arg_search_result = re.search(build_type_arg_regex, test_start_cluster_args)
if build_type_arg_search_result is not None:
build_type_dir = build_type_search_result.groups()[0].lower()
else:
build_type_dir = 'latest'
# Resolve any symlinks in the path.
impalad_basedir = \
os.path.realpath(os.path.join(IMPALA_HOME, 'be/build', build_type_dir)).rstrip('/')
class SpecificImpaladBuildTypes:
"""
Represents the possible CMAKE_BUILD_TYPE values. These specific build types are needed
by Python test code, e.g. to set different timeouts for different builds. All values
are lower-cased to enable case-insensitive comparison.
"""
# ./buildall.sh -asan
ADDRESS_SANITIZER = 'address_sanitizer'
# ./buildall.sh
DEBUG = 'debug'
# ./buildall.sh -release
RELEASE = 'release'
# ./buildall.sh -codecoverage
CODE_COVERAGE_DEBUG = 'code_coverage_debug'
# ./buildall.sh -release -codecoverage
CODE_COVERAGE_RELEASE = 'code_coverage_release'
# ./buildall.sh -tsan
TSAN = 'tsan'
# ./buildall.sh -ubsan
UBSAN = 'ubsan'
VALID_BUILD_TYPES = [ADDRESS_SANITIZER, DEBUG, CODE_COVERAGE_DEBUG, RELEASE,
CODE_COVERAGE_RELEASE, TSAN, UBSAN]
@classmethod
def detect(cls, impala_build_root):
"""
Determine the build type based on the .cmake_build_type file created by
${IMPALA_HOME}/CMakeLists.txt. impala_build_root should be the path of the
Impala source checkout, i.e. ${IMPALA_HOME}.
"""
build_type_path = os.path.join(impala_build_root, ".cmake_build_type")
try:
with open(build_type_path) as build_type_file:
build_type = build_type_file.read().strip().lower()
except IOError:
LOG.error("Could not open %s assuming DEBUG", build_type_path)
return cls.DEBUG
if build_type not in cls.VALID_BUILD_TYPES:
raise Exception("Unknown build type {0}".format(build_type))
LOG.debug("Build type detected: %s", build_type)
return build_type
class ImpaladBuild(object):
"""
Acquires and provides characteristics about the way the Impala under test was compiled
and its likely effects on its responsiveness to automated test timings. Currently
assumes that the Impala daemon under test was built in our current source checkout.
TODO: we could get this information for remote cluster tests if we exposed the build
type via a metric or the Impalad web UI.
"""
def __init__(self, impala_build_root):
self._specific_build_type = SpecificImpaladBuildTypes.detect(impala_build_root)
@property
def specific_build_type(self):
"""
Return the correct SpecificImpaladBuildTypes for the Impala under test.
"""
return self._specific_build_type
def has_code_coverage(self):
"""
Return whether the Impala under test was compiled with code coverage enabled.
"""
return self.specific_build_type in (SpecificImpaladBuildTypes.CODE_COVERAGE_DEBUG,
SpecificImpaladBuildTypes.CODE_COVERAGE_RELEASE)
def is_asan(self):
"""
Return whether the Impala under test was compiled with ASAN.
"""
return self.specific_build_type == SpecificImpaladBuildTypes.ADDRESS_SANITIZER
def is_tsan(self):
"""
Return whether the Impala under test was compiled with TSAN.
"""
return self.specific_build_type == SpecificImpaladBuildTypes.TSAN
def is_ubsan(self):
"""
Return whether the Impala under test was compiled with UBSAN.
"""
return self.specific_build_type == SpecificImpaladBuildTypes.UBSAN
def is_dev(self):
"""
Return whether the Impala under test is a development build (i.e., any debug or ASAN
build).
"""
return self.specific_build_type in (
SpecificImpaladBuildTypes.ADDRESS_SANITIZER, SpecificImpaladBuildTypes.DEBUG,
SpecificImpaladBuildTypes.CODE_COVERAGE_DEBUG,
SpecificImpaladBuildTypes.TSAN, SpecificImpaladBuildTypes.UBSAN)
def runs_slowly(self):
"""
Return whether the Impala under test "runs slowly". For our purposes this means
either compiled with code coverage enabled or one of the sanitizers.
"""
return self.has_code_coverage() or self.is_asan() or self.is_tsan() or self.is_ubsan()
IMPALAD_BUILD = ImpaladBuild(IMPALA_HOME)
def specific_build_type_timeout(
default_timeout, slow_build_timeout=None, asan_build_timeout=None,
code_coverage_build_timeout=None):
"""
Return a test environment-specific timeout based on the sort of
SpecificImpalaBuildType under test.
Required parameter: default_timeout - default timeout value. This applies when Impala is
a standard release or debug build, or if no other timeouts are specified.
Optional parameters:
slow_build_timeout - timeout to use if we're running against *any* build known to be
slow. If specified, this will preempt default_timeout if Impala is expected to be
"slow". You can use this as a shorthand in lieu of specifying all of the following
parameters.
The parameters below correspond to specific build types. These preempt both
slow_build_timeout and default_timeout, if the Impala under test is a build of the
applicable type:
asan_build_timeout - timeout to use if Impala with ASAN is running
code_coverage_build_timeout - timeout to use if Impala with code coverage is running
(both debug and release code coverage)
"""
if IMPALAD_BUILD.is_asan() and asan_build_timeout is not None:
timeout_val = asan_build_timeout
elif IMPALAD_BUILD.has_code_coverage() and code_coverage_build_timeout is not None:
timeout_val = code_coverage_build_timeout
elif IMPALAD_BUILD.runs_slowly() and slow_build_timeout is not None:
timeout_val = slow_build_timeout
else:
timeout_val = default_timeout
return timeout_val
|
import os
import time
import fnmatch
import datetime
#get directory list of files
#find only files that are with DY*EU
#find only files that are with DY*
#find only files that are with DY*CA
"""
Renames the filenames within the same directory to be Unix friendly
(1) Changes spaces to hyphens
(2) Makes lowercase (not a Unix requirement, just looks better ;)
Usage:
python rename.py
"""
#import my date function
#path = 'H:/Data Warehouse/KWI Refresh Data/'
path = 'H:/Data Warehouse/KWI Refresh Data/'
#S:\SHARED-DEPARTMENT-FOLDERS\SALES ADMINISTRATION\Retail Buying\KWI Uploads\MinMax\drop
#path = 'C:/Data/'
allFiles = os.listdir(path)
for filename in allFiles:
print "Filename is " + filename
filenameM = os.path.join(path, filename)
#if fnmatch.fnmatch(filenameM, path+'DY*.txt'):
if filename.startswith('DY'):
print "YYYYYYYYYY "+(filename)
st = datetime.datetime.now() #strftime('%Y-%m-%d%H:%M:%S')
formattedDate = str(st.strftime("%m%d%y"))+"-"+str(st.strftime("%H%M%S"))
splitFilename = filename.split('.')
newFilename = splitFilename[0]+formattedDate+str('.txt')
print "testing " + newFilename
#os.rename(filename, newFilename[0]+formattedDate+str('.txt'))
os.rename(os.path.join(path,filename), os.path.join(path, newFilename))
#print "PPPPP " + filename
#print "MMMMM " + newFilename[0]
#print "MMMMM " + newFilename
#newFilename =
# os.rename(filename, newFilename.append("bbbb").upper())
# >>> d.strftime("%d/%m/%y")
#'11/03/02'
#>>> d.strftime("%A %d. %B %Y") |
class Beverage(object):
"""
Beverage Class, parent class to all other beverage classes.
Attributes:
name: beverage name.
price: beverage price.
time: time it takes to make beverage.
"""
def __init__(self, name, price, time):
self._name = name
self._price = price
self._time = time
@property
def name(self):
## Getter of the beverage name
return self._name
@property
def price(self):
## Getter of the beverage price
return self._price
@property
def time(self):
## Getter of the beverage time takes to make
return self._time
class Americano(Beverage):
"""
Americano Class, parent class is a Beverage class.
description is a class variable that represent beverage info. \n
Using super method, set name, price and time of the beverage.
"""
description = "아메리카노의 가격은 3000원, 소요시간은 10초 입니다."
def __init__(self):
super(Americano, self).__init__("아메리카노", 3000, 10)
class CafeLatte(Beverage):
"""
CafeLatte Class, parent class is a Beverage class.
description is a class variable that represent beverage info. \n
Using super method, set name, price and time of the beverage.
"""
description = "카페라떼의 가격은 3500원, 소요시간은 12초 입니다."
def __init__(self):
super(CafeLatte, self).__init__("카페라떼", 3500, 12)
class Cappuccino(Beverage):
"""
Cappuccino Class, parent class is a Beverage class.
description is a class variable that represent beverage info. \n
Using super method, set name, price and time of the beverage.
"""
description = "카푸치노의 가격은 4000원, 소요시간은 13초 입니다."
def __init__(self):
super(Cappuccino, self).__init__("카푸치노", 4000, 13)
class VanillaLatte(Beverage):
"""
VanillaLatte Class, parent class is a Beverage class.
description is a class variable that represent beverage info. \n
Using super method, set name, price and time of the beverage.
"""
description = "바닐라라떼의 가격은 4000원, 소요시간은 13초 입니다."
def __init__(self):
super(VanillaLatte, self).__init__("바닐라라떼", 4000, 13)
class CafeMocha(Beverage):
"""
CafeMocha Class, parent class is a Beverage class.
description is a class variable that represent beverage info. \n
Using super method, set name, price and time of the beverage.
"""
description = "카페모카의 가격은 4500원, 소요시간은 15초 입니다."
def __init__(self):
super(CafeMocha, self).__init__("카페모카", 4500, 15)
class CaramelMaki(Beverage):
"""
CaramelMaki Class, parent class is a Beverage class.
description is a class variable that represent beverage info. \n
Using super method, set name, price and time of the beverage.
"""
description = "카라멜 마끼아또의 가격은 4500원, 소요시간은 15초 입니다."
def __init__(self):
super(CaramelMaki, self).__init__("카라멜마끼아또", 4500, 15)
class HotChocolate(Beverage):
"""
HotChocolate Class, parent class is a Beverage class.
description is a class variable that represent beverage info. \n
Using super method, set name, price and time of the beverage.
"""
description = "핫초코의 가격은 4000원, 소요시간은 10초 입니다."
def __init__(self):
super(HotChocolate, self).__init__("핫초코", 4000, 10)
class MintChocolate(Beverage):
"""
MintChocolate Class, parent class is a Beverage class.
description is a class variable that represent beverage info. \n
Using super method, set name, price and time of the beverage.
"""
description = "민트초코의 가격은 4500원, 소요시간은 12초 입니다."
def __init__(self):
super(MintChocolate, self).__init__("민트초코", 4500, 12) |
def isLeapYear(year):
if year%4==0 and (year%100!=0 or year%400==0):
return True
return False
'''
In this kata you should simply determine, whether a given year is a leap year or not.
In case you don't know the rules, here they are:
years divisible by 4 are leap years
but years divisible by 100 are no leap years
but years divisible by 400 are leap years
Additional Notes:
Only valid years (positive integers) will be tested, so you don't have to validate them
Examples can be found in the test fixture.
'''
|
###############################
#
# Name: PyDrive
# Author: Jessie Ray
# Purpose: Provide Google Drive
# cross platform
#
###############################
def main():
print("test")
main() |
# Generated by Django 2.2.3 on 2020-09-12 08:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('email', models.EmailField(max_length=60, unique=True, verbose_name='email')),
('username', models.EmailField(max_length=30, unique=True)),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date_joined')),
('last_login', models.DateTimeField(auto_now_add=True, verbose_name='last_login')),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('first_name', models.CharField(max_length=60)),
('last_name', models.CharField(max_length=60)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('price', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('Ticket', models.ImageField(upload_to='uploads/')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sender_name', models.CharField(blank=True, max_length=50, null=True)),
('comment', models.CharField(max_length=255)),
('rating', models.CharField(choices=[('0', 'None'), ('1', 'Very Poor'), ('2', 'Bad'), ('3', 'Average'), ('4', 'Good'), ('5', 'Excellent')], default='0', max_length=20)),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
('calculated_review', models.BooleanField(default=False)),
('product_item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product', to='reviewapp.Product')),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviewsender', to=settings.AUTH_USER_MODEL)),
],
),
]
|
import numpy as np
import pylab
n = int(raw_input("Enter the number of nodes in the beam: "))
noelem = n - 1
K = np.zeros([n, n])
F = np.zeros([n, 1])
EI = float(raw_input("Enter the bending stiffness of the beam(Ncm^2): "))
L = float(raw_input("Enter the length of the beam(cm): "))
M = float(raw_input("Enter the internal bending moment(Ncm): "))
lenelem = L/noelem
elestif = EI/lenelem
elestif1 = -elestif
elefor = -(M * lenelem) / 2
for element in range(n - 1):
K[element, element] += elestif
K[element + 1, element + 1] += elestif
K[element, element + 1] += elestif1
K[element + 1, element] += elestif1
F[element, 0] += elefor
F[element + 1, 0] += elefor
nodalsol = np.zeros([n, 1])
nodalsol[0] = float(raw_input("Enter the displacement value at node1: "))
nodalsol[noelem] = float(raw_input("Enter the displacement value at node" + str(n) + ": "))
numiter = 10
soleqn = range(1, noelem)
for iteration in range(numiter):
for row in soleqn:
const = F[row]
for col in range(n):
if row != col:
const = const - (K[row, col] * nodalsol[col])
nodalsol[row] = const/K[row, row]
xlen = [0]
for length in range(1, n):
xlen.append(length * lenelem)
ylen = []
for values in range(n):
ylen.append(nodalsol[values, 0])
pylab.plot(xlen, ylen,)
pylab.title('Simply Supported Beam Subjected to Concentrated End Momemts')
pylab.xlabel('Length of Beam (cm)')
pylab.ylabel('Beam Deflection (cm)')
pylab.grid(True)
pylab.show()
|
from django.apps import AppConfig
class PerpostConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'perpost'
|
import os
class PDMLConverter:
def convertPCAP(self, pcap):
self.pcap = pcap
if pcap is None:
print("Empty File")
else:
pdml = self.pcap.split('.')
cmd = "tshark -T pdml -r " + str(self.pcap) + " > " + pdml[0] + ".pdml"
os.system(cmd)
|
# -*- coding: utf-8 -*-
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
def __eq__(self, other):
return self.start == other.start and self.end == other.end
class Solution:
def merge(self, intervals):
intervals.sort(key=lambda interval: interval.start)
result = []
i = 0
while i < len(intervals):
newInterval = Interval(intervals[i].start, intervals[i].end)
i += 1
while i < len(intervals) and intervals[i].start <= newInterval.end:
newInterval = Interval(
min(newInterval.start, intervals[i].start),
max(newInterval.end, intervals[i].end),
)
i += 1
result.append(newInterval)
return result
if __name__ == "__main__":
solution = Solution()
assert [Interval(1, 6), Interval(8, 10), Interval(15, 18),] == solution.merge(
[
Interval(1, 3),
Interval(2, 6),
Interval(8, 10),
Interval(15, 18),
]
)
assert [Interval(1, 5),] == solution.merge(
[
Interval(1, 4),
Interval(4, 5),
]
)
assert [Interval(0, 0), Interval(1, 4),] == solution.merge(
[
Interval(1, 4),
Interval(0, 0),
]
)
|
from django.shortcuts import render,get_list_or_404,get_object_or_404
from django.contrib.auth import get_user_model
from notification.models import Notification
from django.utils import timezone
from rest_framework import generics,status,viewsets
from rest_framework.views import APIView
from rest_framework.response import Response
from datetime import datetime
from rest_framework_jwt.settings import api_settings
from drf_yasg.utils import swagger_auto_schema
from api.serializer import notification
from rest_framework.permissions import AllowAny
from drf_yasg.openapi import Schema, TYPE_OBJECT, TYPE_STRING, TYPE_ARRAY
from rest_framework.decorators import action
# Create your views here.
class NotificationViewSet(viewsets.ViewSet):
"""
Get Resident Model.
"""
@swagger_auto_schema(responses={200: notification.NotificationSerializer()})
def list(self, request):
queryset = Notification.objects.all().order_by('-created_at')
r = get_list_or_404(queryset, user_id=request.user.id)
serializer = notification.NotificationSerializer(r,many=True)
return Response(serializer.data)
@action(detail=False, methods=['get'])
def count(self, request):
queryset = Notification.objects.filter(is_read=False)
r = get_list_or_404(queryset, user_id=request.user.id)
return Response({'count':len(r)},
status=status.HTTP_200_OK)
def update(self, request, pk=None):
response_data = {'status':'success'}
newPk = pk[:2]
if(newPk == 'id'):
Notification.objects.filter(object_id=pk[2:]).update(is_read = 1)
return Response(response_data, status=status.HTTP_200_OK)
else:
queryset = Notification.objects.all()
track = get_object_or_404(queryset, pk=pk)
serializer = notification.NotificationSerializer(track, data=request.data,context = {'request': self.request},partial=True)
if serializer.is_valid():
serializer.save()
return Response(response_data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@action(detail=False, methods=['delete'])
def destroyAll(self, request):
queryset = Notification.objects.all()
r = get_list_or_404(queryset, user_id=request.user.id)
for a in r:
a.delete()
response_data = {'status':'success'}
return Response(response_data, status=status.HTTP_200_OK)
def destroy(self, request, pk=None):
queryset = Notification.objects.all()
notification = get_object_or_404(queryset, pk=pk)
notification.delete()
response_data = {'status':'success'}
return Response(response_data, status=status.HTTP_200_OK) |
from rlpy.Tools.run import run
run("examples/gridworld/posterior_sampling.py","./Results/Tests/gridworld/PSRL",ids=range(5), parallelization ="joblib")
run("examples/gridworld/lspi.py","./Results/Tests/gridworld/LSPI",ids=range(5), parallelization ="joblib")
run("examples/gridworld/sarsa.py","./Results/Tests/gridworld/SARSA",ids=range(5), parallelization ="joblib")
run("examples/gridworld/ucrl.py","./Results/Tests/gridworld/UCRL",ids=range(5), parallelization ="joblib")
|
from flask import Flask, request, url_for, render_template, flash, redirect
from flask_wtf import FlaskForm, CSRFProtect
from wtforms import StringField, TextField, SubmitField, IntegerField, RadioField
from wtforms.validators import DataRequired, Length, NumberRange, ValidationError
import os
import pickle
class InferenceForm(FlaskForm):
"""Inference form"""
type_bien = RadioField(
'Type du bien', validators=[DataRequired(message=("Choisissez un champ"))],
choices=[('appartement','appartement'),('maison','maison')])
surface = IntegerField(
'Surface habitable en m2', validators=[DataRequired(message="Entrez un nombre"),
NumberRange(min=9, max=500, message="Doit être compris entre 9 et 500")])
nb_pieces = IntegerField(
'Nombre de pièces principales', validators=[DataRequired(message="Entrez un nombre"),
NumberRange(min=1, max=50, message="Doit être compris entre 1 et 50")])
code_postal = IntegerField(
"Code postal", validators=[DataRequired(message="Entrez un nombre"),
NumberRange(min=1000, max=98000, message="Doit être compris entre 1000 et 98000")])
submit = SubmitField('Prédire')
# inference fonction, parameters have passed form filters before reaching this function
# still user can input inexistant code postal (verification could be implimented from flat file or db)
def make_inference(type_bien: str, surface: int, nb_pieces: int, code_postal: int):
with open(f"ml_models/tree_{type_bien}.pkl", "rb") as file:
tree = pickle.load(file)
prediction = tree.predict([[surface, nb_pieces, code_postal]])
return prediction[0]
csrf = CSRFProtect()
app = Flask(__name__)
try:
app.config.from_pyfile('config_dev.py')
except:
app.config['SECRET_KEY'] = os.environ["FLASK_KEY"]
csrf.init_app(app)
@app.route('/', methods=['GET'])
@app.route('/home', methods=['GET'])
def home():
return render_template('home.html')
@app.route('/about', methods=['GET'])
def about():
return render_template('about.html')
@app.route('/immobilier/presentation', methods=['GET'])
def immo_presentation():
return render_template('immobilier/presentation.html')
@app.route('/immobilier/estimation', methods=['GET', 'POST'])
def immo_estimation():
form = InferenceForm()
if form.validate_on_submit():
type_bien = form.type_bien.data
surface = form.surface.data
nb_pieces = form.nb_pieces.data
code_postal = form.code_postal.data
result = round(make_inference(type_bien, surface, nb_pieces, code_postal))
return render_template('immobilier/predictions/resultat_estimation.html', result=result)
else:
print(form.errors.items())
return render_template('immobilier/predictions/estimation.html', form=form)
#page that contain map
@app.route('/immobilier/map_departement', methods=['GET'])
def immo_map_departement():
map_name = "map_departement_folium"
return render_template('immobilier/maps/map_departement.html', map_name=map_name)
#only the folium map, it will be rendered inside map_departement
@app.route('/immobilier/show_map', methods=['GET'])
def immo_show_map():
map_name = request.args.get("map_name")
return render_template(f'immobilier/maps/{map_name}.html')
# @app.route('/show_map/<map_name>', methods=['GET'])
# def show_map(map_name):
# return render_template(f'immobilier/maps/{map_name}.html')
# DVF_2019_raport_initial
@app.route('/immobilier/data_exploration', methods=['GET'])
def immo_data_exploration():
data = {"notebook1_name": "immobilier_EDA_1",
"notebook1_height" : 2680,
"report1_name": "DVF_2019_raport_initial",
"report1_height" : 17560}
return render_template('immobilier/data_exploration/data_exploration.html', data=data)
@app.route('/immobilier/show_notebook/<notebook>')
def immo_show_notebook(notebook):
return render_template(f'immobilier/notebooks/{notebook}.html')
@app.route('/immobilier/pandas_profiling')
def immo_pandas_profiling():
report_name = request.args.get("report")
report_height = request.args.get("height")
report = {"name": report_name,
"height": report_height}
return render_template(f"immobilier/data_exploration/pandas_profiling.html", report=report)
@app.route('/immobilier/show_pandas_profiling')
def immo_show_pandas_profiling():
report_name = request.args.get("report_name")
return render_template(f'immobilier/data_exploration/{report_name}.html')
@app.route('/climat/presentation')
def cl_presentation():
return render_template('climat/presentation.html')
@app.route('/climat/specifications_donnees')
def cl_specifications_donnees():
return render_template("climat/specifications_donnees.html")
@app.route('/climat/traitement_donnees')
def cl_traitement_donnees():
data = {"notebook1_name": "traitement_donnees",
"notebook1_height" : 13290}
return render_template("climat/traitement_donnees.html", data=data)
@app.route('/climat/clusterisation')
def cl_clusterisation():
data = {"notebook1_name": "clusterisation",
"notebook1_height" : 10640}
return render_template("climat/clusterisation.html", data=data)
@app.route('/climat/modelisation_pbi')
def cl_modelisation_pbi():
return render_template("climat/modelisation_pbi.html")
@app.route('/climat/rapport_power_bi')
def cl_rapport_power_bi():
return render_template("climat/rapport_power_bi.html")
@app.route('/climat/show_notebook/<notebook>')
def cl_show_notebook(notebook):
return render_template(f'climat/notebooks/{notebook}.html')
@app.route('/trading/presentation')
def tr_presentation():
return render_template('trading/presentation.html')
@app.route('/trading/rapport')
def tr_rapport():
return render_template('trading/rapport.html')
|
from yolact import Yolact
from utils.augmentations import FastBaseTransform
from layers.output_utils import postprocess
import pycocotools
from data import cfg, set_cfg
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import time
import json
import os
import cv2
class YolactInterface(object):
def __init__(self, model_pth, output_num=5):
self.output_num = output_num
with torch.no_grad():
set_cfg("yolact_base_config")
torch.cuda.set_device(0)
cudnn.benchmark = True
cudnn.fastest = True
torch.set_default_tensor_type('torch.cuda.FloatTensor')
self.net = Yolact()
self.net.load_weights(model_pth)
self.net.eval()
self.net = self.net.cuda()
print("load model complete")
def run_once(self, src):
self.net.detect.cross_class_nms = True
self.net.detect.use_fast_nms = True
cfg.mask_proto_debug = False
with torch.no_grad():
frame = torch.Tensor(src).cuda().float()
batch = FastBaseTransform()(frame.unsqueeze(0))
time_start = time.clock()
preds = self.net(batch)
time_elapsed = (time.clock() - time_start)
h, w, _ = src.shape
t = postprocess(preds, w, h, visualize_lincomb=False, crop_masks=True,
score_threshold=0.) # TODO: give a suitable threshold
torch.cuda.synchronize()
classes, scores, bboxes, masks = [x[:self.output_num].cpu().numpy() for x in
t] # TODO: Only 5 objects for test
print(time_elapsed)
instances = self.build_up_result(masks.shape[0], classes, bboxes, masks, scores)
return {"instances": instances}
def build_up_result(self, num, classes, bboxes, masks, scores):
instances = []
for i in range(num):
bbox = [bboxes[i, 0], bboxes[i, 1], bboxes[i, 2] - bboxes[i, 0], bboxes[i, 3] - bboxes[i, 1]]
# Round to the nearest 10th to avoid huge file sizes, as COCO suggests
bbox = [round(float(x) * 10) / 10 for x in bbox]
# encode segmentation with RLE
rle = pycocotools.mask.encode(
np.asfortranarray(masks[i, :, :].astype(np.uint8))) # rle binary encoding
rle['counts'] = rle['counts'].decode('ascii') # json.dump doesn't like bytes strings
# create one instance json
instances.append({
'category_id': int(classes[i]), # TODO: origin: get_coco_cat(int(category_id))
'bbox': {"b": bbox},
"segmentation": rle,
'score': float(scores[i])
})
return instances
|
def is_isogram(string):
return is_isogram_help(string.lower())
def is_isogram_help(string):
if len(string) <= 1:
return True
elif not (string[0].isalpha()):
return is_isogram_help(string[1:])
else:
print(string[0], string[1:])
return (not (string[0] in string[1:])) and is_isogram_help(string[1:])
|
from __future__ import absolute_import
from collections import deque
class Instruction(object):
def __call__(self, vm, match):
pass
class Fetch(Instruction):
def __init__(self, pos):
self._pos = pos
def __call__(self, vm, match):
vm.push(match.group(self._pos))
def __repr__(self):
return "<fetch:{d}>".format(d=repr(self._pos))
class Push(Instruction):
def __init__(self, value):
self._value = value
def __call__(self, vm, match):
vm.push(self._value)
def __repr__(self):
return "<push:{d}>".format(d=repr(self._value))
class CallFunc(Instruction):
def __init__(self, func):
self._func = func
def __call__(self, vm, match):
self._func(vm, match)
def __repr__(self):
return "<call:{d}>".format(d=repr(self._func))
class NateVm(deque, object):
def __init__(self):
deque.__init__(self)
def run(self, match, code):
self.clear()
for instruction in code:
instruction(self, match)
def push(self, *atoms):
for atom in atoms:
self.appendleft(atom)
def get(self):
self.reverse()
return list(self)
|
even_numbers = [2, 4, 6, 8, 10]
heros = ['Ironman','Thor','Hulk','Spiderman']
info = ['Batman', 4500, 6375.60]
numbers = list(range(5))
numlist = [4] * 5
dlist = [1, 2, 3] * 3
print(even_numbers)
print(heros)
print(info)
print(numbers)
print(numlist)
print(dlist) |
import math
import os
import random
import re
import sys
def solve(string):
letters = list(string)
result = ""
for index in range(len(letters)):
if index == 0:
letters[index] = letters[index].capitalize()
if letters[index] == " ":
letters[index+1]=letters[index+1].capitalize()
result = result+letters[index]
return result
string = input()
print(solve(string))
|
name = 'mini'
age = 17 # not a lie
height = 64 # inches
weight = 120 # lbs
eyes = 'Black'
teeth = 'White'
hair = 'Black'
print "Let's talk about %r." % name
print "She's %r inches tall." % height
print "She's %f pounds heavy." % weight
print "Actually that's heavy."
print "She's got %s eyes and %s hair." % (eyes, hair)
print "Her teeth are usually %s depending on the coffee." % teeth
# this line is tricky, try to get it exactly right
print "If I add %d, %d, and %d I get %d." % (
age, height, weight, age + height + weight) |
"""
Filreader enriching files with synonyms out of wordnet
"""
import sys
from os import listdir, rename, makedirs, remove
from os.path import join, isfile, dirname, exists
import shutil
from pydub import AudioSegment
import subprocess
__author__ = "kaufmann-a@hotmail.ch"
temp_path = "./temp"
def copy_files(sourcedir, outputdir, suffix, maxCopy, override):
src_files= listdir(sourcedir)
for file in src_files:
if maxCopy == 0: break
new_songname = file[:-suffix.__len__()]
old_file = join(sourcedir, file)
new_folder = join(outputdir, new_songname)
new_songname_instr = 'instrumental_' + new_songname + '.wav'
new_songname_vocals = 'vocals_' + new_songname + '.wav'
new_songfile_instr = join(new_folder, new_songname_instr)
new_songfile_vocals = join(new_folder, new_songname_vocals)
if not exists(new_folder): makedirs(new_folder)
if exists(new_songfile_instr) and override: remove(new_songfile_instr)
if exists(new_songfile_vocals) and override: remove(new_songfile_vocals)
if (not exists(new_songfile_vocals) and not exists(new_songfile_instr)) or override:
# Extract wav files from mp4 and put to temp-path
try:
cmd = "ffmpeg -i \"" + old_file + "\" -map 0:1 -ac 2 ./temp/line1.wav -map 0:2 -ac 2 ./temp/line2.wav -map 0:3 -ac 2 ./temp/line3.wav -map 0:4 -ac 2 ./temp/line4.wav"
subprocess.check_call(cmd, shell=True) # cwd = cwd
except Exception as stats:
print(type(stats))
# Create a fullmix of all audio tracks
try:
cmd = "ffmpeg" + ' -i "' + join(temp_path, "line1.wav") + '" -i "' + join(temp_path, "line2.wav") + '" -i "' + join(temp_path, "line3.wav") + '" -i "' \
+ join(temp_path, "line4.wav") + "\" -filter_complex \"[0:0][1:0][2:0][3:0]amix=inputs=4[mix]\" -map [mix] -ac 2 \"" \
+ join(temp_path, "fullmix_" + new_songname + ".wav") + "\""
subprocess.check_call(cmd, shell=True)
except Exception as stats:
print(type(stats))
# Calculate the average volume of the fullmix and suptract it from the reference normalization volume
try:
avg_db = AudioSegment.from_file(join(temp_path, "fullmix_" + new_songname + ".wav")).dBFS
differenz_db = -20 - avg_db
except Exception as inst:
print(str(type(inst)) + ": Audiosegment couldn't read audiofile " + new_songname + "\n")
differenz_db = -20
# Finally mix vocals and instrumentals separately and apply volume difference to the traacks
try:
cmd = "ffmpeg" + ' -i "' + join(temp_path, "line1.wav") + '" -i "' + join(temp_path, "line2.wav") + '" -i "' + join(temp_path, "line3.wav") + '" -i "' + join(temp_path, "line4.wav") + \
"\" -filter_complex \"[0:0]volume=0.75[bass];[1:0]volume=0.75[drums];[2:0]volume=0.75[restinst];[3:0]volume=0.25[vocals];" \
"[bass][drums][restinst]amix=inputs=3,volume=" + str(differenz_db) + "dB[instr];[vocals]volume=" \
+ str(differenz_db) + "dB[vocs]\" -map [instr] -ac 2 \"" + new_songfile_instr + "\" -map [vocs] -ac 2 \"" + new_songfile_vocals + "\""
subprocess.check_call(cmd, shell=True)
# For testing purposes
cmd_test = "ffmpeg -i \"" + join(temp_path,
"fullmix_" + new_songname + ".wav") + "\" -filter_complex \"[0:0]volume=" + str(
differenz_db) + "dB\" -y -ar 44100 \"" + join(temp_path,
"fullmix_norm_" + new_songname + ".wav") + "\""
subprocess.call(cmd_test, shell=True)
except Exception as inst:
print(type(inst))
try:
files = listdir(temp_path)
for f in files:
remove(join(temp_path, f))
except:
print("tempfolder could not be emptied")
maxCopy -= 1
if __name__ == '__main__':
#Call script with scriptname maxfiles override
#Example call: musdb18_fileprocessing.py 20 True
#This will convert the first twenty files in the source dir and override already existing files in the outputdir
maxCopy = -1
override = True
unmix_server = '//192.168.1.29/unmix-server'
print('Argument List:', str(sys.argv))
if sys.argv.__len__() == 2:
unmix_server = sys.argv[1]
# Setup paths
sources = unmix_server + "/1_sources/musdb18/songs"
destination = unmix_server + "/2_prepared/musdb18"
if not exists(temp_path): makedirs(temp_path)
copy_files(sources, destination, '.stem.wav', maxCopy, override)
print('Finished converting')
|
"""
Sorting people into different files according to the clustering
Author : Diviyan Kalainathan
Date : 28/06/2016
"""
from lib_lopez_paz import experiment_challenge as lp
from multiprocessing import Process
import os,sys
inputdata = 'obj8'
lopez_paz = True
max_proc=int(sys.argv[1])
# Creating parameters
cluster_n = 1
jobs = []
while os.path.exists('output/' + inputdata + '/split_data/cluster_' + str(cluster_n)) :
inputfilespath = []
outputfilespath = []
print('Cluster '+str(cluster_n))
part_number = 0
while os.path.exists('output/' + inputdata + '/split_data/cluster_' + str(cluster_n) + '/pairs_c_' + str(
cluster_n) + '_p' + str(part_number) + '.csv'):
inputfilespath.append('output/' + inputdata + '/split_data/cluster_' + str(cluster_n) + '/pairs_c_' + str(
cluster_n) + '_p' + str(part_number) + '.csv')
outputfilespath.append('output/' + inputdata + '/split_data/cluster_' + str(cluster_n) + '/results_lp_c_' + str(
cluster_n) + '_p' + str(part_number) + '.csv')
part_number += 1
# Creating process
lp.predict(inputfilespath,outputfilespath,max_proc)
cluster_n += 1
print('End of program.') |
def message(dct):
name = dct['name']
role = dct['role']
movie = dct['movie']
print(f'In {movie}, {name} is a {role}')
message(
{
"name": "Han Solo",
"role": "smuggler",
"movie": "Star Wars"
}
)
|
"""Routine to plot the neutrino power spectrum, as output by this code."""
import math
import numpy as np
import scipy.interpolate
def load_genpk(path,box):
"""Load a GenPk format power spectum, plotting the DM and the neutrinos (if present)
Does not plot baryons."""
#Load DM P(k)
matpow=np.loadtxt(path)
scale=2*math.pi/box
#Adjust Fourier convention to match CAMB.
simk=matpow[1:,0]*scale
Pk=matpow[1:,1]/scale**3*(2*math.pi)**3
return (simk,Pk)
def get_nu_power(filename):
"""Reads the neutrino power spectrum.
Format is: ( k, P_nu(k) ).
Units are: 1/L, L^3, where L is
Gadget internal length units for
Gadget-2 and Mpc/h for MP-Gadget."""
data = np.loadtxt(filename)
k = data[:,0]
#Convert fourier convention to CAMB.
pnu = data[:,1]
return (k, pnu)
def get_camb_nu_power(matpow, transfer):
"""Plot the neutrino power spectrum from CAMB.
This is just the matter power multiplied
by the neutrino transfer function.
CAMB internal units are used.
Assume they have the same k binning."""
matter = np.loadtxt(matpow)
trans = np.loadtxt(transfer)
#Adjust Fourier convention to match CAMB.
tnufac = (trans[:,5]/trans[:,6])**2
return matter[:,0], matter[:,1]*tnufac
def get_hyb_nu_power(nu_filename, genpk_neutrino, box, npart, part_prop):
"""Get the total matter power spectrum when some of it is in particles, some analytic."""
(k_part,pk_part)=load_genpk(genpk_neutrino,box)
(k_sl, pk_sl) = get_nu_power(nu_filename)
intp=scipy.interpolate.InterpolatedUnivariateSpline(np.log(k_part),pk_part)
pk_part_r = intp(np.log(k_sl))
shot=(box/npart)**3/(2*math.pi**2)*np.ones(np.size(pk_part_r))
pk = (part_prop*np.sqrt(pk_part_r-shot)+(1-part_prop)*np.sqrt(pk_sl))**2
return (k_sl, pk)
|
""" Assignment 2: Trees for Treemap
=== CSC148 Fall 2016 ===
##################################
Department of Computer Science,
University of Toronto
=== Module Description ===
This module contains the basic tree interface required by the treemap
visualiser. You will both add to the abstract class, and complete a
concrete implementation of a subclass to represent files and folders on
your computer's file system.
"""
import os
from random import randint
import math
class AbstractTree:
""" A tree class that stores the value and its size
This is an abstract class
=== Private Attributes ===
@type _root: obj
The value of the tree
@type _subtrees: list[AbstractTree]
A list of subtrees
@type _parent_tree: AbstractTree | None
The parent tree of this tree
=== Public Attributes ===
@type color: (int, int, int)
Color of this tree, in RBG form
@type data_size: int
The size of this tree
=== Static Attributes ===
@type _stored_treedata: list[((int, int, int, int), (int, int, int), AbstractTree)]
Cached tree_data, with the first being the rectangle of the tree, second being
the tree's color, and third being the tree object itself
=== Representative Invariants ===
○ An empty tree is when _root is None, data_size is 0, and _parent_tree is None
○ Every integer in color must be between [0, 255]
○ _subtrees must not have any None values; though it can have empty trees
"""
_stored_treedata = None
def __init__(self, root, subtrees, data_size=0):
""" Constructs the AbstractTree
Will compute the total data_size if it is NOT a leaf
Pre-conditions:
○ An empty tree is when root is None, and subtrees is []
○ A leaf is when root is NOT none, subtrees is [], and data_size is specified
○ When subtrees is NOT empty, leave data_size to 0
○ subtrees must not have any None values
@type self: AbstractTree
The current AbstractTree object
@type root: obj
The name of the tree
@type subtrees: list[AbstractTree]
A list of abstract trees
@type data_size: int
The size of the leaf
@rtype: None
"""
self._root = root
self._subtrees = subtrees
self._parent_tree = None
self.colour = (randint(0, 255), randint(0, 255), randint(0, 255))
# If subtrees is empty
if len(self._subtrees) == 0:
self.data_size = data_size
# Otherwise, the tree's data_size is computed from the data_size
# of the subtrees
else:
# Set the parent of its subtrees
for subtree in subtrees:
subtree._parent_tree = self
# Compute the datasize
self.data_size = 0
for subtree in self._subtrees:
# data_size is equal to the sum of the data_sizes of its
# sub_trees
self.data_size += subtree.data_size
def is_empty(self):
""" Checks if the current leaf is empty
@type self: AbstractTree
The current tree
@rtype: True | False
Will return true if it is empty; else false
"""
return self._root is None
def increase_size(self):
""" Increase the leaf's data_size by 1%
Will readjust the data_size of its ancestors
Pre-conditions: self must be a leaf
@type self: AbstractTree
@rtype: None
"""
AbstractTree._stored_treedata = None
amount_to_increase = math.ceil(self.data_size * 0.01)
self._change_datasize(amount_to_increase)
def decrease_size(self):
""" Decreases the leaf's data_size by 1%.
<data_size> will not go any lower than 1
Will readjust the data_size of its ancestors
Pre-conditions: self must be a leaf
@type self: AbstractTree
The current tree
@rtype: None
"""
AbstractTree._stored_treedata = None
amount_to_decrease = math.ceil(self.data_size * 0.01)
if self.data_size - amount_to_decrease < 1:
amount_to_decrease = self.data_size - 1
self._change_datasize(amount_to_decrease * -1)
def _change_datasize(self, amount):
""" Changes the current tree's datasize AND its ancestors by
<amount>
If <amount> <= 0 then it will subtract the tree and the
ancestor's data_size. If <amount> > 0 then it will add
the tree and the ancestor's data_size
@type self: AbstractTree
The current AbstractTree object
@type amount: int
The amount to add/subtract to data_size
@rtype: None
"""
self.data_size += amount
if self._parent_tree:
self._parent_tree._change_datasize(amount)
def _get_non_empty_trees(self):
""" Returns a list of trees whos data_size > 0
Post-condition: Will not mutate self._subtrees
@type self: AbstractTree
The current AbstractTree object
@rtype: list[AbstractTree]
Trees with data_size > 0
"""
valid_trees = []
for tree in self._subtrees:
if tree.data_size > 0:
valid_trees.append(tree)
return valid_trees
def _create_vertical_treemap(self, rect):
""" A helper function for _create_treemap(). It
creates rectangles for each subtree vertically.
Pre-condition: <rect> is a PyGame rectangle
@type self: AbstractTree
The current AbstractTree object
@type rect: (int, int, int, int)
The leftX, topY, width, and height of rectangle
@rtype: list[((int, int, int, int), (int, int, int), AbstractTree)]
Returns the rectangles, colors, and the
associated AbstractTree object
"""
x, y, width, height = rect
rectangles = []
left_x = x
non_empty_subtrees = self._get_non_empty_trees()
# Get the (n - 1) subtrees to have rectangles based on proportions
for i in range(len(non_empty_subtrees) - 1):
subtree = non_empty_subtrees[i]
proportions = subtree.data_size / self.data_size
sub_width = math.floor(proportions * width)
sub_rect = (left_x, y, sub_width, height)
rectangles += subtree._create_treemap(sub_rect)
left_x += sub_width
# Allow the last subtree to have the remaining area of the rect
if len(non_empty_subtrees) > 0:
last_subtree = non_empty_subtrees[len(non_empty_subtrees) - 1]
last_rect = (left_x, y, width - (left_x - x), height)
rectangles += last_subtree._create_treemap(last_rect)
return rectangles
def _create_horizontal_treemap(self, rect):
""" A helper function for _create_treemap(). It
creates rectangles for each subtree horizontally.
Pre-condition: <rect> is a PyGame rectangle
@type self: AbstractTree
The current AbstractTree object
@type rect: (int, int, int, int)
The leftX, topY, width, and height of rectangle
@rtype: list[((int, int, int, int), (int, int, int), AbstractTree)]
Returns the rectangles, colors, and the
associated AbstractTree object
"""
x, y, width, height = rect
rectangles = []
top_y = y
non_empty_subtrees = self._get_non_empty_trees()
# Get the (n - 1) subtrees to have rectangles based on proportions
for i in range(len(non_empty_subtrees) - 1):
subtree = non_empty_subtrees[i]
proportions = subtree.data_size / self.data_size
sub_height = math.floor(proportions * height)
sub_rect = (x, top_y, width, sub_height)
rectangles += subtree._create_treemap(sub_rect)
top_y += sub_height
# Allow the last subtree to have the remaining area of the rect
if len(non_empty_subtrees) > 0:
last_subtree = non_empty_subtrees[len(non_empty_subtrees) - 1]
last_rect = (x, top_y, width, height - (top_y - y))
rectangles += last_subtree._create_treemap(last_rect)
return rectangles
def _create_treemap(self, rect):
""" Creates rectangles for the subtree horizontally
and vertically that would fit in <rect>
If the rect's width > height, it will create rectangles
vertically; else it will create rectangles horizontally
Pre-condition: <rect> is a PyGame rectangle
@type self: AbstractTree
The current AbstractTree object
@type rect: (int, int, int, int)
The leftX, topY, width, and height of rectangle
@rtype: list[((int, int, int, int), (int, int, int), AbstractTree)]
Returns the rectangles, colors, and the
associated AbstractTree object
"""
# Basecase: If it is an empty tree or <data_size> is 0
if self.is_empty() or self.data_size == 0:
return []
# Basecase: If it is a leaf, cover up the entire rectangle
if len(self._subtrees) == 0 and self.data_size > 0:
return [(rect, self.colour, self)]
# Create rectangles vertically if width > height
if rect[2] > rect[3]:
return self._create_vertical_treemap(rect)
else:
# Create rectangles horizontaly if height <= width
return self._create_horizontal_treemap(rect)
def generate_treemap(self, rect):
""" Run the treemap algorithm on this tree and return the rectangles.
Each returned tuple contains a pygame rectangle and a colour:
((x, y, width, height), (r, g, b)).
One tuple should be returned per non-empty leaf in this tree.
@type self: AbstractTree
The current abstract tree object
@type rect: (int, int, int, int)
Rectangle is in the format: (leftx, topy, width, height)
@rtype: list[ ( (int, int, int, int), (int, int, int) ) ]
A list of rectangles (1D) to be drawn to the screen
First tuple is the rectangle to be drawn to the screen
(x, y, width, height)
and the second are colors in RGB form
"""
if AbstractTree._stored_treedata is None:
print("Updated cache")
AbstractTree._stored_treedata = self._create_treemap(rect)
rects = [(rect[0], rect[1]) for rect in AbstractTree._stored_treedata]
return rects
def get_selected_leaf(self, coordinates, rect):
""" Return the selected leaf based on coordinates that the user
selected. If cannot be found, it returns None.
== Pre-Condition ==
○ The tree exists
○ <coordinates> must all be positive numbers
○ <rect> must be a valid PyGame rect
@type self: AbstractTree
The current tree object
@type coordinates: tuple(int, int)
Coordinates from treemap_visualiser
@type rect: tuple(int, int, int, int)
Rectangle is in the format: (leftx, topy, width, height)
@rtype: AbstractTree | None
Returns the AbstractTree or a None if the tree is empty or
has an empty size
"""
if AbstractTree._stored_treedata is None:
print("Updated cache")
AbstractTree._stored_treedata = self._create_treemap(rect)
for rectangle in AbstractTree._stored_treedata:
x, y, width, height = rectangle[0]
obj = rectangle[2]
if x <= coordinates[0] <= x + width:
if y <= coordinates[1] <= y + height:
return obj
def get_separator(self):
""" Return the string used to separate nodes in the string
representation of a path from the tree root to a leaf.
Used by the treemap visualiser to generate a string displaying
the items from the root of the tree to the currentl leaf.
This should be overridden by each AbstractTree subclass, to
customize how these items are separated for different
data domains.
@type self: AbstractTree
@rtype: str
"""
raise NotImplementedError
def delete(self):
""" Deletes the leaf by sets the data_size to 0.
Will readjust the data_size of its ancestors
Pre-conditions: self must be a leaf
@type self: AbstractTree
@rtype: None
"""
AbstractTree._stored_treedata = None
self._change_datasize(self.data_size * -1)
class FileSystemTree(AbstractTree):
""" A tree representation of files and folders in a file system.
The internal nodes represent folders, and the leaves represent
regular files (e.g., PDF documents, movie files, Python files, etc.)
The _root attribute stores the *name* of the folder or file, not its
full path. E.g., store 'assignments', not ''/csc148/assignments'
The data_size attribute for regular files as simply the size of the
file, as reported by os.path.getsize.
"""
def __init__(self, path):
""" Store the file tree structure contained in the given file or folder.
Precondition: <path> is a valid path for this computer.
@type self: FileSystemTree
@type path: str
@rtype: None
"""
# Basecase: If the filepath refers to a file
if not os.path.isdir(path):
file_size = os.path.getsize(path)
basename = os.path.basename(path)
AbstractTree.__init__(self, basename, [], file_size)
return
# If it is a folder
else:
# Grab its subitems
contents = os.listdir(path)
basename = os.path.basename(path)
subitems = []
for content in contents:
item = FileSystemTree(os.path.join(path, content))
subitems.append(item)
item._parent_tree = self
# Create the folder
AbstractTree.__init__(self, basename, subitems)
def get_separator(self):
""" Returns a file system directory format of the node.
@type self: FileSystemTree
The current node of the tree
@rtype: str
The string representation of the tree's file directory
"""
if self.is_empty():
return ""
# If it is the root node
if self._parent_tree is None:
return self._root
parent_directory = self._parent_tree.get_separator()
return os.path.join(parent_directory, self._root)
if __name__ == '__main__':
import python_ta
# Remember to change this to check_all when cleaning up your code.
python_ta.check_errors(config='pylintrc.txt')
python_ta.check_all(config='pylintrc.txt') |
from .tum_validation_data_module import TumValidationDataModule
from .tum_validation_dataset import TumValidationDataset
from ..concat_dataset import ConcatDataset
class TumValidationDataModuleFactory(object):
def __init__(self, dataset_folders):
if type(dataset_folders) is not list:
dataset_folders = [dataset_folders]
self._dataset_folders = dataset_folders
def make_data_module(self, final_image_size, batch_size, num_workers):
dataset = ConcatDataset([TumValidationDataset(x, final_image_size) for x in self._dataset_folders])
return TumValidationDataModule(dataset, batch_size, num_workers)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Build a trial loop Step 2
Use this template to turn Step 1 into a loop
@author: katherineduncan
"""
#%%
#In the task, there are 8 trials. To begin a trial, press the space bar. After a
#short delay, a face will be shown. After another brief delay, a second face will
#be shown. If the second face is the same as the first, press 1. If different,
#press 0. Once you submit your response to the second image, the screen will go
#blank and you can begin the next trial by pressing the space bar. Once you
#complete the last trial, the program will close automatically. The script outputs
#a 'test_data' text file that contains the correct answer to each trial, and the
#participant's response in separate columns.
#%%
import numpy as np
import pandas as pd
import os, sys
import random
from psychopy import visual, core, event
#Initialize answer and response arrays
correct_answer = []
response = []
#Initialize stim lists
target = list(range(1,8))
probe = list(range(1,8))
np.random.shuffle(target)
np.random.shuffle(probe)
#%%
for x in range(len(target)):
win = visual.Window(fullscr=True,
allowGUI=False,
color='white',
unit='height'
)
fixation = visual.GratingStim(win=win,
size=0.01,
pos=[0,0],
sf=0,
rgb='black')
fixation.draw()
keys = event.waitKeys(keyList =['space']) # Wait for space bar press to begin trial
event.Mouse(visible=False)
win.flip()
core.wait(0.5)
win.close()
#Present target face image
win2 = visual.Window(fullscr=True,
allowGUI=False,
color='white',
unit='height'
)
temp = target[x]
file = '/Users/jmsaito/Documents/GitHub/trialloops-jmsaito25/faces/face' + str(temp) + '.jpg'
current_face = visual.ImageStim(win2, image=file, pos=(0,0))
current_face.draw()
event.Mouse(visible=False)
win2.flip()
core.wait(0.5)
win2.close()
#Delay Period
win3 = visual.Window(fullscr=True,
allowGUI=False,
color='white',
unit='height'
)
fixation = visual.GratingStim(win=win3,
size=0.01,
pos=[0,0],
sf=0,
rgb='black')
event.Mouse(visible=False)
win3.flip()
core.wait(3)
win3.close()
#Present probe face
win4 = visual.Window(fullscr=True,
allowGUI=False,
color='white',
unit='height'
)
temp2 = probe[x]
file = '/Users/jmsaito/Documents/GitHub/trialloops-jmsaito25/faces/face' + str(temp2) + '.jpg'
current_face = visual.ImageStim(win4, image=file, pos=(0,0))
current_face.draw()
if temp == temp2:
correct_answer.append(1)
else:
correct_answer.append(0)
event.Mouse(visible=False)
win4.flip()
#Gather participant same/different response
keys = event.waitKeys(keyList =['1','0'])
response.append(keys)
win4.close()
#End loop with blank screen to indicate end of trial
win5 = visual.Window(fullscr=True,
allowGUI=False,
color='white',
unit='height'
)
event.Mouse(visible=False)
win5.flip()
#%%
core.wait(0.5)
win5.close()
data = pd.DataFrame({'answer':correct_answer,'response':response})
data.to_csv('test_data', sep='\t') |
from abc import ABC, abstractmethod
class AcquisitionError(Exception):
pass
class Acquisition(ABC):
"""Acquisition abstract class
"""
@abstractmethod
def get_data(self):
return iter()
@abstractmethod
def terminate(self):
pass
acquisition_strategies = {}
def register_acquisition(cls):
""" register a new strategy to acquisition dictionary
This function was made to be used as decorator on
subclass of bcpy.acquisition.Acquisition
Parameters
----------
- cls : subclass of bcpy.acquisition.Acquisition
subclass that will be register as an avaliable strategy
Returns
-------
- subclass of bcpy.acquisition.Acquisition
class passed on parameter
Raises
------
- AcquisitionError
raises when the class is already register on dictionary
"""
if (cls.__name__ in acquisition_strategies):
raise AcquisitionError(
"Acquisition strategy" + cls.__name__ +
"already register in acquisition_strategies")
acquisition_strategies[cls.__name__] = cls
return cls
def getdata(strategy, *args, **kargs):
"""Get data using some strategy
Parameters
----------
- strategy: `str` or `Acquisition`
Strategy to get data
Returns
-------
- data: `generator` of `[n_channels]`
"""
if isinstance(strategy, str):
if not (strategy in acquisition_strategies):
raise AcquisitionError(
"Unknown acquisition {a}".format(a=strategy))
acq = acquisition_strategies[strategy](**kargs)
# acq.get_data(*args)
return acq.get_data()
elif isinstance(strategy, Acquisition):
return strategy.get_data(*args)
|
# Author:ambiguoustexture
# Date: 2020-02-24
from chunk_analysis import chunk_analysis
file_parsed = './neko.txt.cabocha'
file_result = './verbs_case_result.txt'
with open(file_parsed, 'r') as text_parsed, open(file_result, 'w') as text_result:
sentences = chunk_analysis(text_parsed)
for sentence in sentences:
for chunk in sentence:
verbs = chunk.get_morphs_by_pos('動詞')
if len(verbs) < 1:
continue
particles = []
for src in chunk.srcs:
particles_in_chunk = sentence[src].get_morphs_by_pos('助詞')
if len(particles_in_chunk) > 1:
case_particles = sentence[src].get_morphs_by_pos('助詞', '格助詞')
if len(case_particles) > 0:
particles_in_chunk = case_particles
if len(particles_in_chunk) > 0:
particles.append(particles_in_chunk[-1])
if len(particles) < 1:
continue
text_result.write('{}\t{}\n'\
.format(verbs[0].base, ' '.join(sorted(particle.surface for particle in particles))))
|
#!/usr/local/bin/python3.8
'''
In case you want to use a variable you defined outside of a function (global variable)
you can do so using the 'global' keyword
Limitation:
> you cannot use a global variable in a function if the function parameter is the name
of the global variable
Min prod: you can create a new global variable inside a function
'''
########################################
print('BEFORE GLOBAL')
########################################
y = 5
def func(y):
print('Inner y:', y)
# global y # will error: function parameter = global variable
func(10)
print('outter y:', y)
########################################
print('\nAFTER GLOBAL\n')
########################################
y = 5
def func(z):
c = z # whatever variable (c) you don't want
global y # you can use the global variable because the parameter is not a global name
global x # creating a new global variable
y = c # manipulating the function to assign a new global variable
x = 7
print('y before func:', y)
func(10)
print('y after func:', y)
print('x after func:', x)
|
import pygame
from view.game_view import GameView
from model.game_model import GameModel
from controller.player_input import player_input, player_input2
from controller.enemy_input import enemy_input
from model.vehicle_handling.spawn_enemies import spawn_chance
from global_variables import MOVEMENT_PATTERNS
import time
def p2_start(window):
game_view = GameView(window)
game_model = GameModel(2)
has_not_quit_game = True
while has_not_quit_game:
events = pygame.event.get()
spawn_chance(game_model.vehicles)
has_not_quit_game = player_input(game_model.player, events)
if len(game_model.vehicles) > 1 and game_model.player2.movement_pattern == MOVEMENT_PATTERNS[1]:
player_input2(game_model.vehicles[1], events)
# print(self.game_model.player.cur_x_vel, self.game_model.player.reaction_x_vel, self.game_model.player.cur_y_vel, self.game_model.player.reaction_y_vel)
enemy_input(game_model.vehicles)
game_model.update()
game_view.update(game_model.vehicles)
if not game_model.check_if_player_is_alive(game_model.player) or\
not game_model.check_if_player_is_alive(game_model.player2):
time.sleep(2)
break
# print(window.clock.get_fps())
window.clock.tick(120) |
import os
import random
import torch
import numpy as np
from torch.utils.data import Dataset
from .common import PairedDataset
from .davis2017 import davis2017
def attrib_basic(_sample, class_id):
"""
Add basic attribute
Args:
_sample: data sample
class_id: class label asscociated with the data
(sometimes indicting from which subset the data are drawn)
"""
return {'class_id': class_id}
def get_fg_mask(label, class_id):
"""
Generate FG/BG mask from the segmentation mask
Args:
label:
semantic mask
scribble:
scribble mask
class_id:
semantic class of interest
class_ids:
all class id in this episode
"""
# Dense Mask
fg_mask = torch.where(label == class_id,
torch.ones_like(label), torch.zeros_like(label))
return {'fg_mask': fg_mask}
def get_bg_mask(label,class_ids):
bg_mask = torch.ones_like(label)
for class_id in class_ids:
bg_mask[label == class_id] = 0
return {'bg_mask': bg_mask}
# def fewShot(paired_sample, n_ways, n_shots, cnt_query, davis=True):
# """
# Postprocess paired sample for fewshot settings
#
# Args:
# paired_sample:
# data sample from a PairedDataset
# n_ways:
# n-way few-shot learning
# n_shots:
# n-shot few-shot learning
# cnt_query:
# number of query images for each class in the support set
# """
# ###### Compose the support and query image list ######
# cumsum_idx = np.cumsum([0,] + [n_shots + x for x in cnt_query])
#
#
# class_ids = [paired_sample[cumsum_idx[i]]['basic_class_id'] for i in range(n_ways)]
#
# # support images
# support_images = [[paired_sample[cumsum_idx[i] + j]['image'] for j in range(n_shots)]
# for i in range(n_ways)]
# support_num_objs = [[paired_sample[cumsum_idx[i] + j]['obj_ids'] for j in range(n_shots)]
# for i in range(n_ways)]
# support_num_objs = support_num_objs[0][0] #
# support_images_t = [[paired_sample[cumsum_idx[i] + j]['image_t'] for j in range(n_shots)]
# for i in range(n_ways)]
#
# # support image labels
# if davis:
# support_labels = [[paired_sample[cumsum_idx[i] + j]['label'][class_ids[i]]
# for j in range(n_shots)] for i in range(n_ways)]
# else:
# raise ValueError("When 'davis=true', you should use davis2017 dataset")
#
# # query images, masks and class indices
# query_images = [paired_sample[cumsum_idx[i+1] - j - 1]['image'] for i in range(n_ways)
# for j in range(cnt_query[i])]
# query_images_t = [paired_sample[cumsum_idx[i+1] - j - 1]['image_t'] for i in range(n_ways)
# for j in range(cnt_query[i])]
#
# if davis:
# query_labels = [paired_sample[cumsum_idx[i+1] - j - 1]['pre_label'][class_ids[i]]
# for i in range(n_ways) for j in range(cnt_query[i])]
# label_t = [paired_sample[cumsum_idx[i + 1] - j - 1]['label_t'][class_ids[i]]
# for i in range(n_ways) for j in range(cnt_query[i])]
# else:
# raise ValueError("When 'davis=true', you should use davis2017 dataset")
#
# ###### Generate support image masks ######
# # need to ensure the following line is right
# support_fg_mask = [[get_fg_mask(support_labels[0][shot], support_num_objs[way])
# for shot in range(n_shots)] for way in range(len(support_num_objs))]
# support_bg_mask = [[get_bg_mask(support_labels[way][shot],support_num_objs)
# for shot in range(n_shots)] for way in range(n_ways)]
# # ###### Generate query label (class indices in one episode, i.e. the ground truth)######
#
# query_labels_tmp = [torch.zeros_like(x) for x in label_t]
# for i, query_label_tmp in enumerate(query_labels_tmp):
# query_label_tmp[label_t[i] == 255] = 255
# for j in range(len(support_num_objs)):
# query_label_tmp[label_t[i] == support_num_objs[j]] = j+1 # it doesn't have to be j + 1
#
#
# return {'class_ids': class_ids,
#
# 'support_images_t': support_images_t,
# 'support_images': support_images,
# 'support_fg_mask': support_fg_mask, # length of support mask is not way
# 'support_bg_mask': support_bg_mask,
# # 'support_num_objs':support_num_objs,
# 'query_images_t': query_images_t,
# 'query_images': query_images,
# 'query_labels': query_labels_tmp,
# 'query_masks': query_labels,
# }
class davis2017_test_loader(Dataset):
def __init__(self, datasets, num_objects,base_dir):
super().__init__()
self.datasets = datasets
self.n_datasets = len(self.datasets)
self.n_data = [len(dataset) for dataset in self.datasets]
self.num_objects = num_objects
self.base_dir = base_dir
val_dir = base_dir + '/ImageSets/2017/val.txt'
with open(val_dir) as f:
class_name = f.readlines()
class_names = [i.rstrip('\n') for i in class_name]
self.class_names = sorted(class_names)
def __len__(self):
return self.n_datasets
def __getitem__(self, idx):
img = []
for _, i in enumerate(self.datasets[idx]):
img.append(i)
return img, self.num_objects[idx],self.class_names[idx]
def davis2017_test(base_dir, split, transforms, to_tensor, labels):
# load image ids for each class
davisseg = davis2017(base_dir,split,transforms,to_tensor)
davisseg.add_attrib('basic',attrib_basic, {})
sub_ids = [davisseg.get_Imgids(i) for i in labels]
num_objects = [len(i) for i in sub_ids]
subsets = davisseg.subsets(sub_ids, [{'basic': {'class_id': i}} for i in labels])
paired_data = davis2017_test_loader(subsets, num_objects,base_dir)
return paired_data
if __name__ == '__main__':
pass
|
import math
# i started the financial calculater by printing out 3 statement
# the 1st statement explains to the user what to do
# the 2nd and 3rd 1 explains to the user what the inputs are namely bond and investement.
print("Choose either 'investment' or 'bond' from menu below: \n")
print("Investment -to calculate the amount of interest you'll earn on interest.")
print("Bond -to calculate the amount you'll have to pay on a home loan. \n")
# I then wrote a input statement requesting the user to input whhich option they would like.
# i stored the users input in a variable called menu select.
menu_select=input("Enter either 'investment' or 'bond':").lower()
# i then used if condition with or so that the user would be able to input the word in caps or small letters
# if user input was investment the program would ask user for details needed for investment.
# it would store users input in the variables namely money_deposit, interest_rate, num_years & interest.
if menu_select == "investment":
print("You have selected investment. \n")
money_deposit=float(input("Enter how much money you are depositing:"))
interest_rate=int(input("Enter the interest rate:"))
num_years=int(input("Enter the amount of years you would like to invest for:"))
interest=input("Would you like 'simple' or 'compound' Interest?:").lower()
# below i have 2 indented if statements which are dependant on users input for interest.
# if user puts in simple it would calculate simple interest using all users input variables.
# then it would print out the answer
if interest == "simple" :
simple_interest = money_deposit * (1 + interest_rate / 100 * num_years)
print("The amount with simple interest is R",simple_interest)
# if user enters compound it would calculate the simple interest for the investment.
# then it would print out the answer
if interest == "compound" :
compound_interest = money_deposit * math.pow((1 + interest_rate / 100), num_years)
compound_int=round(compound_interest, 2)
print("The amount with compound interest is R",compound_int)
# i used elif if user entered bond and used or so whether user enters it in caps or small letters the program would still run
# it would print they have selected bond
# once they entered bond it would ask them for the details below
# and store the details in variables called value_house, monthly_interest, num_months.
# it would then calculate bond_repayment and print out a statement with the answer.
elif menu_select == "bond":
print("You have selected bond. \n")
value_house = int(input("Enter present value of house eg. 100000:"))
interest_rate = int(input("Enter the interest rate eg. 7:"))
num_months = int(input("Enter number of months in which bond will be repaid eg. 120:"))
monthly_interest = (interest_rate / 100)
monthly_interest = (monthly_interest / 12)
bond_repayment =(monthly_interest*value_house) / (1 - (1 + monthly_interest)**(- num_months))
bond_repayment = round(bond_repayment, 2)
print("Your bond repayment is R",bond_repayment)
# i used an else statement so if user does not input bond or interest correctly.
# it would print out an error message saying that.
else:
print("You have not selected one of the available options.")
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 9 13:47:18 2019
@author: monre
"""
#USED
import os
import open3d as o3d
import numpy as np
registered = 'C:/Users/monre/OneDrive - Imperial College London/ME4/FYP/DATA/OAI-ZIB/processed_data/04.registered_scaled_PCDs'
corresponded = 'C:/Users/monre/OneDrive - Imperial College London/ME4/FYP/DATA/OAI-ZIB/processed_data/05.corresponded_deformable_scaled_PCDs'
#centered = 'C:/Users/monre/OneDrive - Imperial College London/ME4/FYP/DATA/OAI-ZIB/processed_data/06.corresponded_deformable_centered_PCDs'
unscaled = 'C:/Users/monre/OneDrive - Imperial College London/ME4/FYP/DATA/OAI-ZIB/processed_data/06.corresponded_deformable_unscaled_PCDs'
i = 0
scaling = []
# READ SCALING FACTOR AND CREATE SCALING LIST
with open('C:/Users/monre/OneDrive - Imperial College London/ME4/FYP/DATA/OAI-ZIB/processed_data/04.registered_scaled_PCDs/scaling.txt') as f:
for item in f:
scaling.insert(i, float(item[:-1]))
i += 1
origin = 0
i = 0
for filename in os.listdir(corresponded):
if filename[0:10] == 'femur_bone':
ID = filename[-11:-4]
# CENTER THE FEMURS ABOUT THE ORIGIN - Save to centered:
femur_corresponded_load = o3d.io.read_point_cloud(corresponded + '/' + filename)
femur_corresponded = np.asarray(femur_corresponded_load.points)
if ID == '9001104':
origin = np.mean(femur_corresponded, axis=0)
femur_centered = femur_corresponded - origin
'''
os.chdir(centered)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(femur_centered)
o3d.io.write_point_cloud('femur_bone_corresponded_centered_' + ID + '.ply', pcd)
'''
# REVERT SCALING FACTORS ON EACH FEMUR - Save to unscaled:
femur_unscaled = femur_centered / scaling[i]
i += 1
os.chdir(unscaled)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(femur_unscaled)
o3d.io.write_point_cloud('femur_bone_corresponded_unscaled_' + ID + '.ply', pcd)
'''
# REVERT SCALING FACTORS ON EACH FEMUR - Save to unscaled:
i = 0
for filename in os.listdir(centered):
if filename[0:10] == 'femur_bone':
ID = filename[-11:-4]
femur_centered_load = o3d.io.read_point_cloud(centered + '/' + filename)
femur_centered = np.asarray(femur_centered_load.points)
femur_unscaled = femur_centered / scaling[i]
i += 1
os.chdir(unscaled)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(femur_unscaled)
o3d.io.write_point_cloud('femur_bone_corresponded_unscaled_' + ID + '.ply', pcd)
'''
# FOR CHECKING PURPOSES - Center the initial femurs too
'''
normal = 'C:/Users/monre/OneDrive - Imperial College London/ME4/FYP/DATA/OAI-ZIB/processed_data/04.registered_centered_PCDs'
# CENTER THE FEMURS ABOUT THE ORIGIN - ON THE FEMURS THAT DON'T HAVE A POINT TO POINT CORRESPONDENCE AND HAVEN'T BEEN SCALED, TO MAKE SURE THEY'RE SIMILAR TO THE UNSCALED ONES LATER
femur_correspond_load = o3d.io.read_point_cloud(corresponded + '/femur_bone_corresponded_9001104.ply')
femur_correspond = np.asarray(femur_correspond_load.points)
origin = np.mean(femur_correspond, axis=0)
for filename in os.listdir(registered):
if filename[0:10] == 'femur_bone':
ID = filename[-11:-4]
femur_correspond_load = o3d.io.read_point_cloud(registered + '/' + filename)
femur_correspond = np.asarray(femur_correspond_load.points)
femur_correspond = femur_correspond - origin
os.chdir(normal)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(femur_correspond)
o3d.io.write_point_cloud('femur_bone_registered_centered_' + ID + '.ply', pcd)
'''
|
# -*- coding: utf-8 -*-
class Connection:
def __init__(self,sock,addr):
self.sock = sock
self.addr = addr
self.readsize = 4096
self.timeout = 5
def handle(self):
for data in self.read():
print data
print len(data)
def read(self):
while True:
buf = self.sock.recv(self.readsize)
if buf:
yield buf
else:
break
def writ(self,buf):
self.sock.send(buf)
def close(self):
self.sock.close()
def set_timeout(self):
self.sock.settimeout(self.timeout)
|
from .de import DE, AsyncDE
|
def degree_centrality(G, nodes): ...
def betweenness_centrality(G, nodes): ...
def closeness_centrality(G, nodes, normalized: bool = True): ...
|
from __future__ import unicode_literals
from django.db import models
class internal_key_data(models.Model):
key=models.CharField(max_length=200,blank=True,null=True)
value=models.CharField(max_length=200,blank=True,null=True)
# Create your models here.
|
"""
*********************************************************************
This file is part of:
The Acorn Project
https://wwww.twistedfields.com/research
*********************************************************************
Copyright (c) 2019-2021 Taylor Alexander, Twisted Fields LLC
Copyright (c) 2021 The Acorn Project contributors (cf. AUTHORS.md).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*********************************************************************
"""
import redis
import time
import pickle
from scipy.interpolate import CubicSpline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as mp_colors
import sys
from scipy.interpolate import splprep, splev
sys.path.append('../vehicle')
from remote_control_process import EnergySegment
_SMOOTH_MULTIPLIER = 0.00000000001
r = redis.Redis(
host='192.168.1.170',
port=6379)
for key in r.scan_iter():
#print(key)
if 'energy_segment' in str(key):
orig_x = []
orig_y1 = []
orig_y2 = []
orig_y3 = []
orig_y4 = []
orig_z = []
colors = []
min_colorval = 9999999
max_colorval = -9999999
print(key)
list_length = r.llen(key)
print("List Length {}".format(list_length))
first_stamp = pickle.loads(r.lindex(key, 0)).start_gps.time_stamp
colorby = ""
watt_seconds = False
now = time.time()
today = time.localtime(now)
power_vals = [[],[],[],[]]
total_meters_traveled = 0
while True:
print("loop")
day_index = 0
last_total = total_meters_traveled
total_meters_traveled = 0
list_length = r.llen(key)
for idx in range(list_length-1, 0, -1):
# print(idx)
segment = pickle.loads(r.lindex(key, idx))
this_stamp = segment.start_gps.time_stamp
stamp_localtime = time.localtime(this_stamp)
if stamp_localtime.tm_year == today.tm_year and stamp_localtime.tm_yday == today.tm_yday:
total_meters_traveled += segment.distance_sum
else:
print(total_meters_traveled)
if total_meters_traveled > last_total:
print("Still rollin.")
else:
print("##############################################################")
print("##############################################################")
print("##############################################################")
print("##############################################################")
print("##############################################################")
print("##############################################################")
print("##############################################################")
print("##############################################################")
print("##############################################################")
print("##############################################################")
time.sleep(5)
break
|
import re
import os
import math
import typing
import pathlib
import logging
from .logginglib import log_debug
from .logginglib import get_logger
# the regular expression that matches any valid format specification, each
# group contains one specification item
format_reg = re.compile(r"^((?:.(?=(?:<|>|\^)))?)([<>=^]?)([\-+ ]?)(#?)(0?)([\d]*)([_,]?)((?:\.[\d]+)?)([bcdeEfFgGnosxX%]?)$")
class Datatype:
"""A class providing a datatype encapsulation that allows parsing and
formatting values, mostly int or float values, into different input and
display formats.
Class attributes
----------------
int : Datatype
A int datatype returning always an int but taking also floats and
strings that contain floats, digits after the separator will be lost
(always rounding down)
hex_int : Datatype
A int that reads and formats to hex output and input, the parsed value
will always be a normal int, the formating output and parsing input
will be a hexadecimal number (with leading "0x")
dirpath : Datatype
A directory path that will format to an absolute path of a directory
always, to set the base path for relative paths use
`Datatype.dirpath.withbase("<base>")`
filepath : Datatype
A path that will format to an absolute path always, GUIs will show a
file, to set the base path for relative paths use
`Datatype.dirpath.withbase("<base>")`
options : class
The `OptionDatatype` class so it can be used more intuitively like the
other datatypes, this expresses a list of values that are valid, use
with `Datatype.options(("opt 1", "opt 2"))`
float_np : function
A float with the precision of `<n>`, the parsed value will always be a
normal float, the formatted output is rounded to the `<n>` digits
default_parse : any
The default parsed value for this datatype if a value is not parsable,
has to be anything else than None, default: None
"""
def __init__(self, name: str,
format: typing.Callable[[typing.Union[int, float, str], str], typing.Union[int, float, str]],
parse: typing.Optional[typing.Callable[[typing.Union[int, float, str]], typing.Union[int, float, str]]]=None,
**kwargs) -> None:
"""Create a new datatype.
The `format` callback gets two arguments. The first one is the value,
the second one is the format string (between the two curly brackets,
including the leading colon). It has to return the value as a string
representation. The value should be formatted as good as possible. If
it is impossible to format, an empty string should be returned.
```
str_value = format(value, str_format)
```
The `parse` callback will get the value to parse as the only parameter.
This can be of any type. The `parse` function has to return the value
in the correct format, preferrable a number. If the value is not
parsable, it has to raise a ValueError.
```
try:
value = parse(value)
except ValueError:
pass
```
Example:
-------
```python
>>> Datatype("string",
... lambda v, f: ("{" + f + "}").format(v),
... lambda x: str(x))
```
Parameters
----------
name : str
The name to show
format : callable
A function that will get the value to format as the first argument
and a format string (as defined in
https://docs.python.org/library/string.html#formatspec, with just
the arguments after the colon) as the second argument, note that
this can be empty too
parse : callable
A function that will get the value (can be any type) as the
parameter and has to return the parsed value in the datatype this
object is representing
Keyword Arguments
-----------------
Any keyword arguments are stored in the `Datatype.data` attribute which
is used by some datatypes.
"""
self.name = name
self._format = format
self._parse = parse
self.default_parse = None
self._logger = get_logger(self)
self.data = kwargs
def parse(self, value: typing.Any) -> typing.Any:
"""Parse the `value`.
Raises
------
ValueError
If the `value` is not parsable
Parameters
----------
value : any
The value to parse
Returns
-------
any
The parsed value as the current datatype defines
"""
if callable(self._parse):
ret = self._parse(value)
log_debug(self._logger, "Parsing value '{}' to '{}'".format(value, ret))
return ret
else:
return value
def format(self, value: typing.Any, format: typing.Optional[str]="") -> str:
"""Format the `value`.
Parameters
----------
value : any
The value as the current datatype
format : str
The string format without curly brackets as defined in
https://docs.python.org/library/string.html#formatspec
Returns
-------
str
The string representation of the `value` in the current datatype
"""
ret = self._format(value, format)
log_debug(self._logger, "Formatting value '{}' to '{}'".format(value, ret))
return ret
def __call__(self, *args: typing.Any) -> typing.Any:
"""Parse the `value` to this datatype.
Parameters
----------
value : any
The value to parse
Returns
-------
any
The parsed value as the current datatype defines
"""
return self.parse(args[0])
@staticmethod
def split_format_spec(format_spec: str) -> typing.Tuple[str, str, str, str, str, typing.Union[str, int], str, typing.Union[str, int], str]:
"""Split the format specification as it is defined in
https://docs.python.org/library/string.html#formatspec.
Parameters
----------
format_spec : str
The format specification
Returns
-------
tupl or None
A tuple that represents the specification or None if the
secification is invalid, the tuple has the following indices:
- 0: fill character or an empty string
- 1: alignment character or an empty string
- 2: sign character or an empty string
- 3: "#" for alternative representation, empty string for normal
- 4: "0" for zeros between sign and number, empty string otherwise
- 5: width as an int or an empty string
- 6: thousands grouping option ("_" or ",") or empty string for no
grouping
- 7: precision as an int or an empty string
- 8: type specifier
"""
matches = format_reg.match(format_spec)
if matches is not None:
pattern = []
for i, v in enumerate(matches.groups()):
if (i == 5 or i == 7) and v != "":
if v.startswith("."):
v = v[1:]
v = int(v)
pattern.append(v)
pattern = tuple(pattern)
else:
pattern = None
return pattern
@staticmethod
def join_format_spec(format_tuple: typing.Tuple[str, str, str, str, str, typing.Union[str, int], str, typing.Union[str, int], str], keyname: typing.Optional[str]="") -> str:
"""Join the `format_tuple` of the form as returned by
`Datatype::split_format_tuple()` to a valid format string **including
the curly brackets**.
This returns the format string including the curly brackets and the
colon. For using a key you can set the `keyname`.
The `format_tuple` must have the following form. At index:
- 0: fill character or an empty string
- 1: alignment character or an empty string
- 2: sign character or an empty string
- 3: "#" for alternative representation, empty string for normal
- 4: "0" for zeros between sign and number, empty string otherwise
- 5: width as an int or an empty string
- 6: thousands grouping option ("_" or ",") or empty string for no
grouping
- 7: precision as an int or an empty string
- 8: type specifier
Parameters
----------
format_tuple : tuple
The format tuple of the form as the `Datatype::split_format_tuple()`
returns
keyname : str, optional
The name of the key for the format string, this goes before the
colon (`{keyname:format_tuple}`)
Returns
-------
str
A valid format string
"""
format_tuple = list(format_tuple)
if format_tuple[8] in ("b", "c", "d", "o", "x", "X", "n"):
# precision is not allowed for integer types
format_tuple[7] = ""
elif format_tuple[7] != "":
format_tuple[7] = "." + str(format_tuple[7])
return "{" + str(keyname) + ":" + "".join(map(str, format_tuple)) + "}"
class OptionDatatype(Datatype):
"""A datatype that allows the selection of a variant of values.
Attributes
----------
options : sequence
The options that are allowed
exact_comparism : bool, optional
Whether to do exact comparisms or not, for strings exact is case
sensitive, for floats exact is all digits, default: False
ignore_chars : list of strings
The list of strings to replace before comparing (and before converting
to lower case) if `OptionDatatype.exact_comparism` is False and the
option is a string, this can be for example white space to compare with
ignore whitespace, default: []
rel_tol : float
The relative tolerance to use if `OptionDatatype.exact_comparism` is
False and the option is a float, default: 0
abs_tol : float
The absolute tolerance to use if `OptionDatatype.exact_comparism` is
False and the option is a float, default: 1e-6
"""
def __init__(self, options: typing.Sequence,
exact_comparism: typing.Optional[bool]=False,
**kwargs) -> None:
"""Create a new option datatype.
The `exact_comparism` tells, whether to compare exactly or not. What
this means depends on the type of the options.
- `options` are strings: Exact is case sensitive, inexact is case
insensitive, `datatype.ignore_chars` can be a list of characters that
will be removed in both, the value and the option to compare before
comparism
- `options` are floats: Exact is all digits, inexact uses
`math.isclose()`, `datatype.rel_tol` and `datatype.abs_tol` are given
directly to the function if they are ints
Parameters
----------
options : sequence
The options
exact_comparism : bool, optional
Whether to do exact comparisms or not, for strings exact is case
sensitive, for floats exact is all digits, default: False
Keyword Arguments
-----------------
ignore_chars : list of strings
The list of strings to replace before comparing (and before
converting to lower case) if `OptionDatatype.exact_comparism` is
False and the option is a string, this can be for example white
space to compare with ignore whitespace, default: []
rel_tol : float
The relative tolerance to use if `OptionDatatype.exact_comparism`
is False and the option is a float, default: 0
abs_tol : float
The absolute tolerance to use if `OptionDatatype.exact_comparism`
is False and the option is a float, note that this is similar to
rounding *down*(!) at 0.5, default: 1e-6
"""
self.ignore_chars = []
self.rel_tol = 0
self.abs_tol = 1e-6
self.options = list(options)
self.exact_comparism = exact_comparism
if ("ignore_chars" in kwargs and
isinstance(kwargs["ignore_chars"], (list, tuple))):
self.ignore_chars = list(kwargs["ignore_chars"])
if ("rel_tol" in kwargs and
isinstance(kwargs["rel_tol"], (int, float))):
self.rel_tol = kwargs["rel_tol"]
if ("abs_tol" in kwargs and
isinstance(kwargs["abs_tol"], (int, float))):
self.abs_tol = kwargs["abs_tol"]
super().__init__("optionslist", self.format_options, self.parse_options)
if len(self.options) > 0:
self.default_parse = self.options[0]
def format_options(self, v: typing.Any, f: typing.Optional[str]="") -> str:
"""Format the given value for the given format.
Parameters
----------
v : any
The value to format
f : str
The format specification
Returns
-------
str
The formatted value
"""
return self.parse_options(v)
def parse_options(self, v: typing.Any):
"""Parse the given value.
Raises
------
ValueError
When the value `v` is not an allowed option.
Parameters
----------
v : int, float, str, any
If int or float are given, the number is returned as an int, if a
string is given it is treated as a hex number (values after the decimal
separator are ignored), everything else will be tried to convert to a
16 base int
Returns
-------
int
The converted int
"""
for o in self.options:
if (not self.exact_comparism and isinstance(v, (int, float)) and
isinstance(o, (int, float))):
c = math.isclose(v, o, abs_tol=self.abs_tol, rel_tol=self.rel_tol)
elif (not self.exact_comparism and isinstance(v, str) and
isinstance(o, str)):
vm = v
om = o
for r in self.ignore_chars:
vm = vm.replace(r, "")
om = om.replace(r, "")
c = vm.lower() == om.lower()
elif not self.exact_comparism:
c = o == v or str(o) == str(v)
else:
c = o == v
if c:
return o
raise ValueError("The value '{}' is not in the options.".format(v))
Datatype.options = OptionDatatype
class PathDatatype(Datatype):
def __init__(self, kind: str, base: typing.Optional[str]=None) -> None:
"""Get a path datatype of the given `kind`.
Parameters
----------
kind : str
The kind, use 'file' for files only and 'dir' for directories only
base : path-like, optional
The base path to format the path to if a relative path is given
"""
self.kind = kind
if self.kind == "file":
name = "filepath"
elif self.kind == "dir":
name = "dirpath"
else:
raise ValueError(("The `kind` '{}' is not supported for the " +
"PathDatatype. Use 'file' or 'dir'.").format(kind))
super().__init__(name, self.format_path, self.parse_path)
self.default_parse = ""
self.data["base"] = base
@property
def base(self):
try:
return self.data["base"]
except (TypeError, KeyError):
return None
@base.setter
def base(self, base):
self.data["base"] = base
def withbase(self, base: str) -> "PathDatatype":
"""Return a new `PathDatatype` object of the same `kind` but with the
given `base`.
This function is used for a better usage within the `Datatype` class
attribute.
Parameters
----------
base : path-like, optional
The base path to format the path to if a relative path is given
Returns
-------
PathDatatype
The new datatype object
"""
return PathDatatype(self.kind, base)
def parse_path(self, v: typing.Any) -> str:
"""Parse the given value to be the absolute path of the directory.
If a file is given, the directory name (the containing directory) is
returned, the absolute path of the `v` otherwise.
Raises
------
ValueError
When the value `v` could not be parsed
Parameters
----------
v : any
The path expression to parse
Returns
-------
str
The directory path
"""
try:
try:
base = self.data["base"]
except (TypeError, KeyError):
base = None
path_like = [str, pathlib.PurePath]
if hasattr(os, "PathLike"):
# keep support for python 3.5.6, os.PathLike is invented in python 3.6
path_like.append(os.PathLike)
if not isinstance(base, tuple(path_like)):
base = os.getcwd()
v = os.path.abspath(os.path.normpath(
os.path.join(base, os.path.expandvars(
os.path.expanduser(v)))))
v = str(v)
# if self.kind == "dir" and os.path.isfile(v):
# v = os.path.dirname(v)
# elif self.kind == "file" and not os.path.isfile(v):
# raise ValueError()
if self.kind == "dir" and (not v.endswith("/") and not v.endswith("\\")):
v += os.path.sep
return v
except (TypeError, ValueError) as e:
print(e)
raise ValueError(("The value '{}' could not be parsed to a directory " +
"path").format(v)) from e
def format_path(self, v: typing.Any, f: typing.Optional[str]="") -> str:
"""Format the given value to an absolute directory path.
If the value is a plain string, it will be relative to the current working
directory.
Parameters
----------
v : any
The value to format
f : str
The format specification
Returns
-------
str
The formatted value or an empty string if it is not formattable
"""
try:
return str(self.parse_path(v))
except ValueError:
return ""
Datatype.dirpath = PathDatatype("dir")
Datatype.filepath = PathDatatype("file")
from .default_datatypes import int_type
Datatype.int = int_type
from .default_datatypes import hex_int_type
Datatype.hex_int = hex_int_type
from .default_datatypes import float_np
Datatype.float_np = float_np |
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_pymongo import PyMongo
from flask_mongoengine import MongoEngine
mongo = PyMongo()
me = MongoEngine()
db = SQLAlchemy()
ma = Marshmallow()
def create_app():
app = Flask(__name__)
secret_key = os.getenv('FLASK_SECRET_KEY')
app.config['SECRET_KEY'] = secret_key
app.config['SQLALCHEMY_DATABASE_URI'] = \
'postgresql+psycopg2://likit@localhost/education_dev'
app.config['MONGO_DBNAME'] = 'employees'
app.config['MONGODB_DB'] = 'employees'
me.init_app(app)
mongo.init_app(app)
db.init_app(app)
with app.app_context():
db.create_all()
from api import education_bp
app.register_blueprint(education_bp, url_prefix='/api')
return app |
import pbPlots as pbPlots
import supportLib as supportLib
series = pbPlots.GetDefaultScatterPlotSeriesSettings()
series.xs = [-2, -1, 0, 1, 2]
series.ys = [2, -1, -2, -1, 2]
series.linearInterpolation = True
series.lineType = "dashed"
series.lineThickness = 2
series.color = pbPlots.GetGray(0.3)
settings = pbPlots.GetDefaultScatterPlotSettings()
settings.width = 800
settings.height = 600
settings.autoBoundaries = True
settings.autoPadding = True
settings.title = "x^2 - 2"
settings.xLabel = "X axis"
settings.yLabel = "Y axis"
settings.scatterPlotSeries = [series]
imageReference = pbPlots.CreateRGBABitmapImageReference()
pbPlots.DrawScatterPlotFromSettings(imageReference, settings)
pngdata = pbPlots.ConvertToPNG(imageReference.image)
supportLib.WriteToFile(pngdata, "example2.png")
pbPlots.DeleteImage(imageReference.image)
|
class Animal:
def run(self):
print('我是一个动物')
class Dog(Animal):
pass
class Cat(Animal):
pass
dog = Dog()
dog.run() |
from app.DAOs.MasterDAO import MasterDAO
from app.DAOs.AuditDAO import AuditDAO
from psycopg2 import sql, errors
from flask import jsonify
import re
def Find(string):
"""
Private Method to verify if string is a website link (URL)
Uses :func:`~app.re.findall`
:param string: string to check URL for
:returns string: URL
"""
url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+] |[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)
return url
class WebsiteDAO(MasterDAO):
"""
All Methods in this DAO close connections upon proper completion.
Do not instantiate this class and assign it, as running a method
call will render it useless afterwards.
"""
def getWebsiteByID(self, wid):
"""
Query Database for an Website, given a website ID
:param wid: The website ID
:type wid: int
:return Tuple: SQL result of Query as a tuple.
"""
cursor = self.conn.cursor()
query = sql.SQL("select {fields} from {table1} "
"where {pkey1} = %s ").format(
fields=sql.SQL(',').join([
sql.Identifier('wid'),
sql.Identifier('url')
]),
table1=sql.Identifier('websites'),
pkey1=sql.Identifier('wid'))
cursor.execute(query, (int(wid),))
result = cursor.fetchone()
return result
def getWebsitesByEventID(self, eid):
"""
Query Database for an Website, given a event ID
:param eid: The event ID
:type eid: int
:return Tuple: SQL result of Query as a tuple.
"""
cursor = self.conn.cursor()
query = sql.SQL("select {fields} from {table1} "
"natural join {table2} "
"where {pkey1} = %s;").format(
fields=sql.SQL(',').join([
sql.Identifier('wid'),
sql.Identifier('url'),
sql.Identifier('wdescription')
]),
table1=sql.Identifier('eventwebsites'),
table2=sql.Identifier('websites'),
pkey1=sql.Identifier('eid'))
cursor.execute(query, (int(eid),))
result = []
for row in cursor:
result.append(row)
return result
def getWebsitesByServiceID(self, sid):
"""
Query Database for an Website, given a service ID
:param sid: The service ID
:type sid: int
:return Tuple: SQL result of Query as a tuple.
"""
cursor = self.conn.cursor()
query = sql.SQL("select {fields} from {table1} "
"natural join {table2} "
"where {pkey1} = %s and {pkey2} = %s and isdeleted = false ").format(
fields=sql.SQL(',').join([
sql.Identifier('wid'),
sql.Identifier('url'),
sql.Identifier('wdescription'),
sql.Identifier('isdeleted')
]),
table1=sql.Identifier('servicewebsites'),
table2=sql.Identifier('websites'),
pkey1=sql.Identifier('sid'),
pkey2=sql.Identifier('isdeleted'))
cursor.execute(query, (int(sid), False))
self.conn.commit()
result = []
for row in cursor:
result.append(row)
return result
#TODO: VERIFY WHY THIS METHOD DOES NOT COMMIT.
def createWebsite(self, url, uid):
"""
Create a new entry for service websites.
CURRENTLY DOES NOT COMMIT ANYTHING.
:param url: The website link
:type url: string
:param uid: The user id of the route caller
:type uid: int
:return Tuple: SQL result of Query as a tuple.
"""
try:
if url is not None and url != "":
cursor = self.conn.cursor()
audit = AuditDAO()
tablename = "websites"
pkey = "url"
oldValue = audit.getTableValueByIntID(
table=tablename, pkeyname=pkey, pkeyval=url, cursor=cursor)
query = sql.SQL("insert into {table1} "
"({insert_fields}) "
"values (%s) "
"on CONFLICT (url) do update "
"set url=%s "
"returning wid ").format(
table1=sql.Identifier('websites'),
insert_fields=sql.SQL(',').join([
sql.Identifier('url'),
]))
cursor.execute(query, (url, url))
result = cursor.fetchone()
newValue = audit.getTableValueByIntID(table=tablename, pkeyname=pkey, pkeyval=url, cursor=cursor)
audit.insertAuditEntry(changedTable=tablename, changeType=audit.INSERTVALUE, oldValue=oldValue,
newValue=newValue, uid=uid, cursor=cursor)
return result
else:
result = [None, None]
except:
result = [None, None]
return result
def addWebsite(self, url, cursor, uid):
"""
Create a new entry for a website
Uses :func:`~app.DAOs.AuditDAO.AuditDAO.getTableValueByIntID` &
:func:`~app.DAOs.AuditDAO.AuditDAO.insertAuditEntry`
:param url: The website link
:type url: string
:param uid: The user id of the route caller
:type uid: int
:param cursor: addWebsite method call connection cursor to database.
:type sname: connection cursor
:return Tuple: SQL result of Query as a tuple.
"""
temp =url
url = Find(url)
cursor = cursor
if (url is not None) and (url != "") and len(url) > 0:
audit = AuditDAO()
tablename = "websites"
pkey = "url"
oldValue = audit.getTableValueByIntID(
table=tablename, pkeyname=pkey, pkeyval=temp, cursor=cursor)
query = sql.SQL("insert into {table1} "
"({insert_fields}) "
"values (%s) "
"on CONFLICT (url) do update "
"set url=%s "
"returning wid, url").format(
table1=sql.Identifier('websites'),
insert_fields=sql.SQL(',').join([sql.Identifier('url')]))
cursor.execute(query, (temp, temp))
result = cursor.fetchone()
newValue = audit.getTableValueByIntID(table=tablename, pkeyname=pkey, pkeyval=temp, cursor=cursor)
audit.insertAuditEntry(changedTable=tablename, changeType=audit.INSERTVALUE, oldValue=oldValue,
newValue=newValue, uid=uid, cursor=cursor)
return result
else:
raise ValueError("URL not valid: "+str(temp))
def addWebsitesToService(self, sid, wid, wdescription, cursor, uid):
"""
Create a new entry for a website services table.
Uses :func:`~app.DAOs.AuditDAO.AuditDAO.getTableValueByPkeyPair` &
:func:`~app.DAOs.AuditDAO.AuditDAO.insertAuditEntry`
:param sid: The Service ID
:type sid: int
:param wid: The website ID
:type wid: int
:param wdescription: Description for the website
:type wdescription: string
:param cursor: Method call connection cursor to database.
:type cursor: connection cursor
:param uid: User ID
:type uid: int
:return Tuple: SQL result of Query as a tuple.
"""
cursor =cursor
audit = AuditDAO()
tablename = 'servicewebsites'
pkeys = ["sid", "wid"]
oldValue = audit.getTableValueByPkeyPair(table=tablename, pkeyname1=pkeys[0], pkeyname2=pkeys[1],
pkeyval1=sid, pkeyval2=wid, cursor=cursor)
query = sql.SQL("insert into {table1} "
"({insert_fields}) "
"values (%s, %s,%s,%s) "
"on CONFLICT (wid,sid) do update set isdeleted = %s "
"returning wid, sid, isdeleted, wdescription").format(
table1=sql.Identifier('servicewebsites'),
insert_fields=sql.SQL(',').join([
sql.Identifier('sid'),
sql.Identifier('wid'),
sql.Identifier('wdescription'),
sql.Identifier('isdeleted')
]))
try:
cursor.execute(query, (int(sid), str(wid), str(wdescription), False, False))
result = cursor.fetchone()
newValue = audit.getTableValueByPkeyPair(table=tablename, pkeyname1=pkeys[0], pkeyname2=pkeys[1],
pkeyval1=sid, pkeyval2=wid, cursor=cursor)
audit.insertAuditEntry(changedTable=tablename, changeType=audit.INSERTVALUE, oldValue=oldValue,
newValue=newValue, uid=uid, cursor=cursor)
return result
except:
return None
def addWebsitesToEvent(self, eid, wid, wdescription, cursor, uid):
"""
Create a new entry for a website events table.
Uses :func:`~app.DAOs.AuditDAO.AuditDAO.getTableValueByPkeyPair` &
:func:`~app.DAOs.AuditDAO.AuditDAO.insertAuditEntry`
:param eid: The Event ID
:type eid: int
:param wid: The website ID
:type wid: int
:param wdescription: Description for the website
:type wdescription: string
:param uid: The user id of the route caller
:type uid: int
:param cursor: Method call connection cursor to database.
:type sname: connection cursor
:return Tuple: SQL result of Query as a tuple.
"""
cursor = cursor
audit = AuditDAO()
tablename = 'eventwebsites'
pkeys = ["eid", "wid"]
oldValue = audit.getTableValueByPkeyPair(table=tablename, pkeyname1=pkeys[0], pkeyname2=pkeys[1],
pkeyval1=eid, pkeyval2=wid, cursor=cursor)
query = sql.SQL("insert into {table1} "
"({insert_fields}) "
"values (%s, %s, %s);").format(
table1=sql.Identifier('eventwebsites'),
insert_fields=sql.SQL(',').join([
sql.Identifier('eid'),
sql.Identifier('wid'),
sql.Identifier('wdescription')
]))
cursor.execute(query, (int(eid), int(wid), wdescription))
newValue = audit.getTableValueByPkeyPair(table=tablename, pkeyname1=pkeys[0], pkeyname2=pkeys[1],
pkeyval1=eid, pkeyval2=wid, cursor=cursor)
audit.insertAuditEntry(changedTable=tablename, changeType=audit.INSERTVALUE, oldValue=oldValue,
newValue=newValue, uid=uid, cursor=cursor)
return
def insertWebsiteToService(self, sites, sid, uid):
"""
Create a new entry for a website services table.
Uses :func:`~app.DAOs.WebsiteDAO.WebsiteDAO.addWebsite` &
:func:`~app.DAOs.WebsiteDAO.WebsiteDAO.addWebsitesToService`
:param sid: The Service ID
:type sid: int
:param sites: List of wids and wdescriptions
:type sites: array
:param uid: User ID
:type uid: int
:return Tuple: SQL result of Query as a tuple.
"""
print("START")
websites = []
cursor = self.conn.cursor()
for site in sites:
website = self.addWebsite(cursor=cursor, url=site['url'], uid=uid)
if website:
websites.append({"url": website[1], "wid": website[0], "wdescription": site['wdescription']})
else:
return jsonify(Error="Error creating website "+str(site['url']))
for site in websites:
result = self.addWebsitesToService(cursor=cursor, sid=sid, wid=site['wid'],
wdescription=site['wdescription'], uid=uid)
if result is None:
return jsonify(Error="Error assigning website to sid: "+str(sid)), 400
self.conn.commit()
return {"websites": websites}
def removeWebsitesGivenServiceID(self, wid, sid, uid):
"""
Remove an entry from a website services table
Uses :func:`~app.DAOs.AuditDAO.AuditDAO.getTableValueByPkeyPair` &
:func:`~app.DAOs.AuditDAO.AuditDAO.insertAuditEntry`
:param sid: The Service ID
:type sid: int
:param wid: The website ID
:type wid: int
:param uid: The user id of the route caller
:type uid: int
:return Tuple: SQL result of Query as a tuple.
"""
cursor = self.conn.cursor()
audit = AuditDAO()
tablename = 'servicewebsites'
pkeys = ["sid", "wid"]
oldValue = audit.getTableValueByPkeyPair(table=tablename, pkeyname1=pkeys[0], pkeyname2=pkeys[1],
pkeyval1=sid, pkeyval2=wid, cursor=cursor)
query = sql.SQL("update {table1} set isdeleted = True "
"where ( {pkey1} = %s AND {pkey2} = %s ) "
"returning {pkey1} ,sid,isdeleted,wdescription ").format(
table1=sql.Identifier('servicewebsites'),
pkey1=sql.Identifier('wid'),
pkey2=sql.Identifier('sid'))
try:
cursor.execute(query, (int(wid), int(sid)))
result = cursor.fetchone()
newValue = audit.getTableValueByPkeyPair(table=tablename, pkeyname1=pkeys[0], pkeyname2=pkeys[1],
pkeyval1=sid, pkeyval2=wid, cursor=cursor)
if oldValue and newValue:
audit.insertAuditEntry(changedTable=tablename, changeType=audit.UPDATEVALUE, oldValue=oldValue,
newValue=newValue, uid=uid, cursor=cursor)
self.conn.commit()
except errors.ForeignKeyViolation as e:
result = e
if result is None:
return None
return result[0]
|
from flask import Flask, render_template, url_for, send_from_directory
app = Flask(__name__)
app.debug = True
@app.route('/user/<username>')
def show_user_profile(username):
# show the user profile for that user
return 'User %s' % username
@app.route('/post/<int:post_id>')
def show_post(post_id):
# show the post with the given id, the id is an integer
return 'Post %d' % post_id
@app.route('/')
def index():
return render_template('Index.html')
@app.route('/about')
def about():
return render_template('Groups.html')
@app.route('/group/<name>')
def group(name=None):
if name == 'alkali':
return render_template('alkaliLayout.html', name=name)
elif name =='alkaline-earth':
return render_template('alkalinearthLayout.html', name=name)
elif name =='halogen':
return render_template('halogenLayout.html', name=name)
else:
return "Page not found!"
@app.route('/element/<name>')
def element(name=None):
if name == 'helium':
return render_template('Helium.html', name=name)
elif name =='hydrogen':
return render_template('Hydrogen.html', name=name)
elif name =='fluorine':
return render_template('Fluorine.html', name=name)
else:
return "Page not found!"
# @app.route('/css/<name>')
# def sstatic(name=None):
# return send_from_directory('css', filename=name)
#
# @app.route('/images/<name>')
# def images(name=None):
# return send_from_directory('images', filename=name)
# @app.route('/scripts/<name>')
# def scripts(name=None):
# return send_from_directory('scripts', filename=name)
if __name__ == '__main__':
app.run()#//host='0.0.0.0') |
# -*- coding: utf-8 -*-
"""Top-level package for juliet."""
__author__ = """Raphael Gyory"""
__email__ = 'raphael@gyory.net'
__version__ = '0.1.0'
|
import cv2
import numpy as np
from PIL import Image
import time
def S_cut(img):
flag1 = 0
flag2 = 0
x1_list = []
x2_list = []
print(img.shape)#(height_Y,weith_X)
#print(img[...,31])
print(img[...,33].all())# == False
print(img[...,34].any())# == True
#print(img[...,33].all()==0)
#print(img[...,34].any()!=0)
#print(img[...,33].all()==0 and img[...,34].any()!=0)
'''
for i in range(img.shape[1]-1):
if not(img[...,i].all()) and img[...,i+1].any() and flag1 == 0:
x1_list.append(i)
flag1 = 1
if img[...,i].any() and not(img[...,i+1].all()) and flag1 ==1:
x2_list.append(i+1)
flag1 = 0
print(x1_list,x2_list)
'''
for x in range(1,img.shape[1]-1):
if not(img[...,x-1].all()and img[...,x].any())and img[...,x+1].any() and flag1 == 0 :
x1_list.append(x)
flag1 = 1
if img[...,x].any() and not(img[...,x+1].all()) and (img[...,x-1].any()) and flag1 == 1:
x2_list.append(x+1)
flag1 = 0
print(x1_list,x2_list)
def Z_cut(img):
print(img.shape)
ls1 = []
ls2 = []
flag_left = 1
flag_right = 0
for x in range(img.shape[1]):
if img[...,x].any():
ls1.append(1)
else:
ls1.append(0)
#print(ls1)
### It seems that it has worked ###
flag_BJ = 1
i = 0
while True:
if i >(len(ls1)-1):
break
if ls1[i] == 1 and flag_BJ:
flag_BJ = 0
x_left = i
for j in range(i+1,len(ls1)):
i = j
if ls1[j] == 0 and not(flag_BJ):
x_right = j
flag_BJ = 1
ls2.append((x_left,x_right))
break
i+=1
print(ls2)
# if img[...,x].any() and flag_left==1:
# flag_left = 0
# flag_right = 1
# ls1.append(x)
# else :
# if flag_right == 1 and not(img[...,x].all()):
# flag_left = 1
# flag_right = 0
# ls2.append(x)
# print(ls1,"\n",ls2)
# """传入二值化后的图片进行垂直投影"""
# pixdata = img.load()
# w,h = img.size
# print(w,h)
# ver_list = []
# # 开始投影
# for x in range(w):
# black = 0
# for y in range(h):
# if pixdata[x,y] == 0:
# black += 1
# ver_list.append(black)
# # 判断边界
# l,r = 0,0
# flag = False
# cuts = []
# for i,count in enumerate(ver_list):
# # 阈值这里为0
# if flag is False and count > 0:
# l = i
# flag = True
# if flag and count == 0:
# r = i-1
# flag = False
# cuts.append((l,r))
# return cuts
t1 = time.time()
img = cv2.imread("cut_downer.png",0)
#img = Image.open("cut_upper.png")
Z_cut(img)
t2 = time.time()
print(t2-t1)
#img = cv2.imread("cut_upper.png",0)
# cv2.imshow("win",img)
# cv2.waitKey(0)
#S_cut(img)
#Z_cut() |
import sys
import time
import os
import random
### Terminal TIC TAC TOE ###
# v 1.0
# single player
# multiplayer soon
# add game stats: avg time to move, total length, number of invalid moves
# add help option
# 0 = blank, 1 = 'O', 9 = 'X'
def rand_start():
r = random.random()
if r < 0.5:
return 1
return 9
def name(num_player, players):
# use player names
if num_player == 9:
return players[0]
elif num_player == 1:
return players[1]
else:
return "NOBODY"
def change_player():
if player == 1:
return 9
return 1
def type_writer(string, t=0.1):
for letter in string:
print(letter, end="")
sys.stdout.flush()
time.sleep(random.uniform(0, t))
print("")
def clean_type_writer(string, t=0.03):
for letter in string:
print(letter, end="")
sys.stdout.flush()
time.sleep(t)
print("")
def game_check(board, size):
# check columns and rows in one pass
s = [0 for _ in range(size)]
zeros = 0
for i in range(size):
for j in range(size):
row = board[j]
s[i] += row[i]
if row[i] == 0:
zeros += 1
# check row at the beginning
if i == 0:
val = sum(row)
if val == 27:
return (True, 9)
elif val == 3:
return (True, 1)
# check diagonals
td = board[0][0] + board[1][1] + board[2][2]
du = board[0][2] + board[1][1] + board[2][0]
s.append(td)
s.append(du)
for elem in s:
if elem == 27:
# X wins
return (True, 9)
elif elem == 3:
# O wins
return (True, 1)
# check for catsgame
if zeros == 0:
return (True, 'Nobody')
return [False]
def conversion(char):
if char == 0:
return ' '
elif char == 1:
return 'O'
elif char == 'Nobody':
return char
else:
return "X"
def print_board(board, size):
rows = []
# O | O | X
line = "|-----------|"
for row in board:
string = "|"
for char in row:
string += " " + conversion(char) + " |"
rows.append(string)
print(line)
for r in rows:
print(r)
print(line)
def coord_translate(coord):
try:
if len(coord) != 2:
return "INVALID MOVE"
elif (int(coord[0]) >= size or int(coord[0]) < 0) or (int(coord[1]) >= size or int(coord[1]) < 0):
return "INVALID MOVE"
else:
return [size - 1 - int(coord[1]), int(coord[0])]
except:
return "INVALID MOVE"
def start_game():
os.system("clear")
h = open("header.txt", "r")
r = open("rules.txt", "r")
text = h.read()
rules = r.read()
clean_type_writer(text, 0.002)
time.sleep(0.2)
type_writer(rules)
type_writer("All Good?")
response = input()
count = 0
while response.lower() not in ["yup", "yes", "yeah", "ye", "y", "yea", "yep", "yee", "yeee", "yus", "yas"]:
# add regex
if count > 7:
type_writer("\nAlright this is ridiculous...")
time.sleep(2)
break
else:
time.sleep(1)
type_writer("\nHow About Now?")
count += 1
response = input()
print("\nPlayer X name:")
x = input()
print("\nPlayer O name:")
o = input()
os.system("clear")
type_writer("LETS DO THIS!")
return (x, o)
def end_game(winner):
e = open("end.txt", "r")
txt = e.read()
time.sleep(0.1)
os.system("clear")
print_board(board, size)
type_writer("THE WINNER IS " + winner + "!!!!")
clean_type_writer(txt, 0.004)
def best_move(board, size, player):
# find the best location of a move given current board and player
return "11"
if __name__ == "__main__":
moves = []
turns = 0
board = [[0,0,0],
[0,0,0],
[0,0,0]]
size = len(board)
player = rand_start()
players = start_game()
print_board(board, size)
while (True):
result = game_check(board, size)
if (result[0]):
end_game(name(result[1], players))
break
# Wait for valid move
print("\n" * 13)
type_writer(name(player, players) + "'s (" + conversion(player) + ") Move:")
raw_move = input()
move = coord_translate(raw_move)
while raw_move in moves or type(move) == str:
type_writer("INVALID MOVE " + str(raw_move))
print_board(board, size)
print("\n" * 19)
type_writer(name(player, players) + "'s (" + conversion(player) + ") Move:")
raw_move = input()
move = coord_translate(raw_move)
moves.append(raw_move)
board[move[0]][move[1]] = player
turns += 1
player = change_player()
os.system("clear")
print_board(board, size)
|
"""initializing the sqlite database with all the tables
Revision ID: 2df7283702ad
Revises:
Create Date: 2020-08-31 18:57:40.394646
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2df7283702ad'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('firstname', sa.String(length=60), nullable=False),
sa.Column('lastname', sa.String(length=60), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('username', sa.String(length=120), nullable=False),
sa.Column('password_hash', sa.String(length=120), nullable=False),
sa.Column('create_date', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('password_hash')
)
op.create_index(op.f('ix_user_create_date'), 'user', ['create_date'], unique=False)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.create_table('lecture',
sa.Column('employee_number', sa.String(length=10), nullable=False),
sa.Column('office_number', sa.String(length=10), nullable=True),
sa.Column('telephone_number', sa.String(length=12), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('employee_number'),
sa.UniqueConstraint('office_number'),
sa.UniqueConstraint('telephone_number')
)
op.create_table('student',
sa.Column('student_number', sa.String(length=10), nullable=False),
sa.Column('year_of_study', sa.String(length=2), nullable=False),
sa.Column('phone_number', sa.String(length=10), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('student_number')
)
op.create_table('tutor',
sa.Column('id_number', sa.String(length=10), nullable=False),
sa.Column('account_type', sa.String(length=60), nullable=True),
sa.Column('account_number', sa.String(length=60), nullable=True),
sa.Column('bank_name', sa.String(length=60), nullable=True),
sa.Column('branch_code', sa.String(length=20), nullable=True),
sa.Column('phone_number', sa.String(length=10), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('status', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id_number')
)
op.create_table('course',
sa.Column('course_code', sa.String(length=50), nullable=False),
sa.Column('name', sa.String(length=120), nullable=False),
sa.Column('venue', sa.String(length=120), nullable=False),
sa.Column('start_time', sa.String(length=120), nullable=False),
sa.Column('end_time', sa.String(length=120), nullable=False),
sa.Column('day', sa.String(length=120), nullable=False),
sa.Column('number_of_tutors', sa.Integer(), nullable=False),
sa.Column('Lecture_employee_number', sa.String(length=20), nullable=False),
sa.Column('key', sa.String(length=120), nullable=True),
sa.ForeignKeyConstraint(['Lecture_employee_number'], ['lecture.employee_number'], ),
sa.PrimaryKeyConstraint('course_code')
)
op.create_table('students_and_courses',
sa.Column('student_number', sa.String(length=20), nullable=False),
sa.Column('course_code', sa.String(length=20), nullable=False),
sa.ForeignKeyConstraint(['course_code'], ['course.course_code'], ),
sa.ForeignKeyConstraint(['student_number'], ['student.student_number'], ),
sa.PrimaryKeyConstraint('student_number', 'course_code')
)
op.create_table('tutors_and_courses',
sa.Column('id_number', sa.String(length=20), nullable=False),
sa.Column('course_code', sa.String(length=20), nullable=False),
sa.ForeignKeyConstraint(['course_code'], ['course.course_code'], ),
sa.ForeignKeyConstraint(['id_number'], ['tutor.id_number'], ),
sa.PrimaryKeyConstraint('id_number', 'course_code')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tutors_and_courses')
op.drop_table('students_and_courses')
op.drop_table('course')
op.drop_table('tutor')
op.drop_table('student')
op.drop_table('lecture')
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_create_date'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.