text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" BaseProducer class
All producer must be inherit form this class
"""
from abc import ABCMeta, abstractmethod
from typing import Union, Awaitable, List, Dict
from tonga.models.records.base import BaseRecord
from tonga.models.store.store_record import StoreRecord
from tonga.models.structs.positioning import BasePositioning
__all__ = [
'BaseProducer',
]
class BaseProducer(metaclass=ABCMeta):
""" BaseProducer all producer must be inherit form this class
"""
@abstractmethod
async def start_producer(self) -> None:
"""
Start producer
Raises:
NotImplementedError: Abstract def
Returns:
None
"""
raise NotImplementedError
@abstractmethod
async def stop_producer(self) -> None:
"""
Stop producer
Raises:
NotImplementedError: Abstract def
Returns:
None
"""
raise NotImplementedError
@abstractmethod
def is_running(self) -> bool:
"""
Get is running
Raises:
NotImplementedError: Abstract def
Returns:
bool
"""
raise NotImplementedError
@abstractmethod
async def send_and_wait(self, msg: Union[BaseRecord, StoreRecord], topic: str) -> BasePositioning:
"""
Send a message and await an acknowledgments
Args:
msg (Union[BaseRecord, StoreRecord]): Event
topic (str): topics name
Raises:
NotImplementedError: Abstract def
Returns:
None
"""
raise NotImplementedError
@abstractmethod
async def send(self, msg: Union[BaseRecord, StoreRecord], topic: str) -> Awaitable:
"""
Send a message and await an acknowledgments
Args:
msg (Union[BaseRecord, StoreRecord]: Event to send in Kafka, inherit form BaseRecord
topic (str): Topic name to send massage
Returns:
None
"""
raise NotImplementedError
@abstractmethod
async def partitions_by_topic(self, topic: str) -> List[int]:
"""
Get partitions by topic name
Args:
topic (str): topic name
Returns:
List[int]: list of partitions
"""
raise NotImplementedError
@abstractmethod
def init_transaction(self):
"""
Sugar function, inits transaction
Raises:
NotImplementedError: Abstract def
"""
raise NotImplementedError
@abstractmethod
async def end_transaction(self, committed_offsets: Dict[str, BasePositioning], group_id: str) -> None:
"""
Ends transaction
Args:
committed_offsets (Dict[str, BasePositioning]): Committed offsets during transaction
group_id (str): Group_id to commit
Returns:
None
"""
raise NotImplementedError
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
logs= sys.stderr
import mxnet as mx
import numpy as np
import time
import math
import logging
from collections import namedtuple
import data_iter
ReLUModel = namedtuple("ReLUModel", ['relu_exec', 'symbol',
'data', 'label', 'param_blocks'])
class GreedyParser(object):
"""Builds a Chen & Manning style greedy neural net parser.
Args:
num_actions: int size of the set of parser actions.
num_features: int list of dimensions of the feature vectors.
num_feature_ids: int list of same length as num_features corresponding to the sizes of the input feature spaces.
embedding_sizes: int list of same length as num_features of the desired embedding layer sizes.
"""
def __init__(self,
num_actions,
num_features,
num_feature_ids,
embedding_sizes,
hidden_layer_sizes,
learning_rate=0.1,
max_grad_norm=5.0,
epoch=13,
optimizer='sgd',
decay_steps=4000):
self._num_actions = num_actions
self._num_features = num_features
self._num_feature_ids = num_feature_ids
self._embedding_sizes = embedding_sizes
self._hidden_layer_sizes = hidden_layer_sizes
self._learning_rate = learning_rate
self._max_grad_norm = max_grad_norm
self._optimizer = optimizer
self._epoch = epoch
self._hidden_layer_sizes = hidden_layer_sizes
self._feature_size = len(embedding_sizes)
self._decay_steps = decay_steps
self._step = 0
self._decay_rate = 0.96
@property
def embedding_size(self,):
size = 0
for i in range(self._feature_size):
size += self._num_features[i] * self._embedding_sizes[i]
return size
def _AddParam(self, shape, initializer=None, return_average=False):
pass
def _AddEmbedding(self, num_features, vocab_size, embedding_size, name):
data = mx.sym.Variable('feature_%s_data' % name)
embed_weight = mx.sym.Variable('%s_embed_weight' % name)
hidden = mx.sym.Embedding(data=data, weight=embed_weight,
input_dim=vocab_size, output_dim=embedding_size)
hidden = mx.sym.Reshape(hidden, target_shape=(0, num_features * embedding_size))
return hidden
def _BuildNetwork(self,):
# Create embedding layer.
embeddings = []
for i in range(self._feature_size):
embeddings.append(self._AddEmbedding(self._num_features[i],
self._num_feature_ids[i], self._embedding_sizes[i], i))
last_layer = mx.sym.Concat(*embeddings, dim=1)
last_layer_size = self.embedding_size
# Create ReLU layers.
for i, hidden_layer_size in enumerate(self._hidden_layer_sizes):
i2h_weight = mx.sym.Variable('t_%d_i2h_weight' % i)
i2h_bias = mx.sym.Variable('t_%d_i2h_bias' % i)
last_layer = mx.sym.FullyConnected(data=last_layer,
weight= i2h_weight,
bias = i2h_bias,
num_hidden=last_layer_size)
last_layer = mx.sym.Activation(data=last_layer, act_type='relu')
last_layer_size = hidden_layer_size
# Create Softmax layer.
label = mx.sym.Variable('label')
softmax_weight = mx.sym.Variable('softmax_weight')
softmax_bias = mx.sym.Variable('softmax_bias')
fc = mx.sym.FullyConnected(data=last_layer, weight=softmax_weight,
bias=softmax_bias, num_hidden=self._num_actions)
sm = mx.sym.SoftmaxOutput(data=fc, label=label)
return sm
def _IsParameter(self, name):
return name.endswith('weight') or name.endswith('bias')
def SetupModel(self, ctx, batch_size, initializer=mx.initializer.Uniform(0.2)):
self._batch_size = batch_size
relu_model = self._BuildNetwork()
arg_names = relu_model.list_arguments()
# Setup input data shape
input_shapes = {}
for i in range(self._feature_size):
input_shapes['feature_%d_data' % i] = (batch_size, self._num_features[i])
# Infer shape
arg_shape, out_shape, aux_shape = relu_model.infer_shape(**input_shapes)
arg_arrays = [mx.nd.zeros(s, ctx) for s in arg_shape]
arg_grads = {}
for shape, name in zip(arg_shape, arg_names):
if self._IsParameter(name):
arg_grads[name] = mx.nd.zeros(shape, ctx)
relu_exec = relu_model.bind(ctx=ctx, args=arg_arrays,
args_grad=arg_grads, grad_req='add')
param_blocks = []
arg_dict = dict(zip(arg_names, relu_exec.arg_arrays))
for i, name in enumerate(arg_names):
if self._IsParameter(name):
initializer(name, arg_dict[name])
param_blocks.append( (i, arg_dict[name], arg_grads[name], name) )
out_dict = dict(zip(relu_model.list_outputs(), relu_exec.outputs))
data = [relu_exec.arg_dict['feature_%d_data' % i] for i in range(self._feature_size)]
label = relu_exec.arg_dict['label']
self._relu_model = ReLUModel(relu_exec=relu_exec, symbol=relu_model,
data=data, label=label, param_blocks=param_blocks)
def TrainModel(self, X_train, y_train):
m = self._relu_model
# Create optimizer
opt = mx.optimizer.create(self._optimizer)
opt.lr = self._learning_rate
opt.wd = 0.0001
opt.momentum = 0.9
updater = mx.optimizer.get_updater(opt)
print >> logs, "start training..."
for iteration in range(self._epoch):
tic = time.time()
num_correct = 0
num_total = 0
# TODO:: use dataIter instead (for padding).
for begin in range(0, X_train.shape[0], self._batch_size):
batchX = X_train[begin:begin+self._batch_size]
batchY = y_train[begin:begin+self._batch_size]
if batchX.shape[0] != self._batch_size:
continue
# decay learning rate.
if self._step > self._decay_steps and self._step % self._decay_steps == 0:
self._learning_rate *= self._decay_rate ** (int(self._step /
self._decay_steps))
opt.lr = self._learning_rate
print >> logs, 'decay learning rate, now lr is [%.6f], global step [%d]' % (opt.lr, self._step)
# accumlating step.
self._step += 1
start = 0
for i in range(self._feature_size):
m.data[i][:] = batchX[:,start:start+self._num_features[i]]
start = start + self._num_features[i]
m.label[:] = batchY
m.relu_exec.forward(is_train=True)
m.relu_exec.backward()
num_correct += sum(batchY == np.argmax(m.relu_exec.outputs[0].asnumpy(), axis=1))
num_total += len(batchY)
# update weights
norm = 0
for idx, weight, grad, name in m.param_blocks:
grad /= self._batch_size
l2_norm = mx.nd.norm(grad).asscalar()
norm += l2_norm * l2_norm
norm = math.sqrt(norm)
for idx, weight, grad, name in m.param_blocks:
if norm > self._max_grad_norm:
grad *= self._max_grad_norm / norm
updater(idx, grad, weight)
# Reset gradient to zero
grad[:] = 0.0
# End of training loop
toc = time.time()
train_time = toc - tic
train_acc = num_correct * 100 / float(num_total)
print >> logs, 'Iter [%d] Train: Time: %.3fs, Training Accuracy: %.3f' % (iteration, train_time, train_acc)
if iteration == 9:
prefix = 'greedy'
self._relu_model.symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v for k, v in self._relu_model.relu_exec.arg_dict.items()
if self._IsParameter(k) }
save_dict.update({('aux:%s' % k) : v for k, v in self._relu_model.relu_exec.aux_dict.items() })
param_name = '%s-%04d.params' % (prefix, iteration)
mx.nd.save(param_name, save_dict)
print >> logs, 'Saved model %s' % param_name
def plot_network(self):
symbol = self._relu_model.symbol
dot = mx.viz.plot_network(symbol, title='greedy_parser')
dot.render(filename='greedy_parser')
def main():
num_actions = 37
num_features = [20, 20, 12]
num_feature_ids = [34346, 34, 21]
embedding_sizes = [64, 32, 32]
hidden_layer_sizes = [200, 200]
batch_size = 32
xdata, ydata = data_iter.read_data(sys.argv[1])
parser = GreedyParser(num_actions, num_features, num_feature_ids, embedding_sizes, hidden_layer_sizes)
parser.SetupModel(mx.gpu(0), batch_size=batch_size)
parser.TrainModel(xdata, ydata)
# parser.plot_network()
if __name__ == '__main__':
main()
|
import json
import logging
from constants import *
import utils
class DialogState:
"""State of dialog agent.
Attributes:
trigger (dict): Slots and values related to Triggers. The structure can
be found in `state-example.py`.
action (dict): Slots and values related to Actions. The structure can be
found in `state-example.py`.
"""
def __init__(self):
self.trigger = {}
self.action = {}
self._initialize_state()
def __repr__(self):
return "IF:\n{}\n\nTHEN:\n{}".format(
json.dumps(self.trigger, indent=4),
json.dumps(self.action, indent=4))
def update_non_fields_slots(self, parse):
"""Updates Trigger- and Action-related slots, except Fields, in the
state.
Args:
parse (dict): Dictionary mapping:
1. `Slot.trigger_channel` to the Trigger Channel parsed from
utterance, and/or
2. `Slot.trigger_fn` to the Trigger Function parsed from
utterance, and/or
3. `Slot.action_channel` to the Action Channel parsed from
utterance, and/or
4. `Slot.action_fn` to the Action Function parsed from
utterance.
"""
if Slot.trigger_channel in parse:
self.trigger = self._new_channel_entry(parse[Slot.trigger_channel])
if Slot.trigger_fn in parse:
self.trigger[FUNCTION] = self._new_function_entry(
self.trigger, parse[Slot.trigger_fn])
if Slot.action_channel in parse:
self.action = self._new_channel_entry(parse[Slot.action_channel])
if Slot.action_fn in parse:
self.action[FUNCTION] = self._new_function_entry(
self.action, parse[Slot.action_fn])
def update_from_confirmation(self, slot, value, response):
"""Updates state from a confirmation-type user-response.
Args:
slot (`constants.Slot`): Slot being confirmed.
value (str): Value of the slot being confirmed.
response (`constants.Confirmation`): Type of response: yes or no.
"""
if response is Confirmation.yes:
self._update_from_affirm(slot, value)
elif response is Confirmation.no:
self._update_from_deny(slot, value)
elif response is Confirmation.unknown:
# User neither said yes nor no. Don't update the state.
pass
else:
logging.error("%s: Illegal response type %s",
self.update_from_confirmation.__name__, response)
raise TypeError
def _initialize_state(self):
"""Initializes state dictionaries."""
self.trigger = utils.channel_template()
self.action = utils.channel_template()
def _new_channel_entry(self, parsed_channel):
"""Returns a new entry for the state using the Channel in
`parsed_channel`.
Args:
parsed_channel (tuple): Tuple containing Channel and the
associated confidence value.
Returns:
dict: New state dictionary with the new Channel.
"""
new_entry = utils.channel_template()
new_entry[ID] = parsed_channel[0]
new_entry[CONF] = parsed_channel[1]
return new_entry
def _new_function_entry(self, state_component, parsed_fn):
"""Returns a new entry for the `FUNCTION` part of the state with the
one of the Functions in `parsed_fn`.
If the parsed Function is not compatible with the Channel value already
present in the `state_component`, the parsed Trigger Function is ignored
and a Function entry with empty `ID` and zero `CONF` is returned.
This is because Channels take precedence over Functions, and Channels
will always be confirmed before Functions, thereby making their values
in the state more authoritative. Even if their values are not yet
confirmed, the accuracy of channel-predictor models is higher than that
of function-predictor models, further justifying this design choice.
Args:
state_component (dict): One of the two components of the state, i.e.
either `self.trigger` or `self.action`.
parsed_fn (`tuple` of `str`, `float`): Tuple containing Function
(in dot format, such as "facebook.upload_photo") and the
associated confidence value.
Returns:
dict: New dictionary for the `FUNCTION` slot in state.
"""
channel, fn = parsed_fn[0].split('.')
new_entry = utils.functions_template()
if channel == state_component[ID]:
new_entry[ID] = parsed_fn[0]
new_entry[CONF] = parsed_fn[1]
else:
new_entry[ID] = ""
new_entry[CONF] = 0.0
return new_entry
def _update_from_affirm(self, slot, value):
"""Updates state from a affirmative response from the user.
Args:
slot (`constants.Slot`): Slot being confirmed.
value (str): Value of the slot being confirmed as correct.
"""
method_name = self._update_from_affirm.__name__
def affirm_helper(state_component):
if state_component[ID] == value:
state_component[CONF] = 1.0
else:
logging.error("%s: Trying to affirm the value %s for slot %s "
"which does not exist in state %s.",
method_name, value, slot, state_component)
if slot is Slot.trigger_channel:
affirm_helper(self.trigger)
elif slot is Slot.trigger_fn:
affirm_helper(self.trigger[FUNCTION])
elif slot is Slot.action_channel:
affirm_helper(self.action)
elif slot is Slot.action_fn:
affirm_helper(self.action[FUNCTION])
else:
logging.error("%s: Illegal slot type %s.", method_name, slot)
raise TypeError
def _update_from_deny(self, slot, value):
"""Updates state from a denial from the user.
Args:
slot (`constants.Slot`): Slot under consideration.
value (str): Value of the slot being denied.
"""
method_name = self._update_from_deny.__name__
def deny_helper(state_component):
if state_component[ID] == value:
state_component[ID] = ""
state_component[CONF] = 0.0
else:
logging.error(
"%s: Trying to affirm the value %s for slot %s "
"which does not exist in state %s.",
method_name, value, slot, state_component)
if slot is Slot.trigger_channel:
deny_helper(self.trigger)
elif slot is Slot.trigger_fn:
deny_helper(self.trigger[FUNCTION])
elif slot is Slot.action_channel:
deny_helper(self.action)
elif slot is Slot.action_fn:
deny_helper(self.action[FUNCTION])
else:
logging.error("%s: Illegal slot type %s.", method_name, slot)
raise TypeError
|
import os
sg1 = "sg-0964887a556e1b9ff"
sg2 = "sg-0e42e8154106ab899"
sg3 = "sg-04b98dd0a613e4873"
sg4 = "sg-034aef491d100e6a1"
sg5 = "sg-0d778ff6c38911f20"
os.system("aws ec2 delete-security-group --group-id {}".format(sg1))
os.system("aws ec2 delete-security-group --group-id {}".format(sg2))
os.system("aws ec2 delete-security-group --group-id {}".format(sg3))
os.system("aws ec2 delete-security-group --group-id {}".format(sg4))
os.system("aws ec2 delete-security-group --group-id {}".format(sg5))
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 31 10:18:40 2017
@author: lcao
"""
import pandas as pd
from WeiboSpyder import weibo
Users_id = pd.read_csv('WeiboStat\Weibo_users_id.csv')
Users_id = Users_id['User_id']
print len(Users_id)
for i in range(2,len(Users_id)):
#使用实例,输入一个用户id,所有信息都会存储在wb实例中
user_id = Users_id[i] #可以改成任意合法的用户id(爬虫的微博id除外)
filter = 1 #值为0表示爬取全部的微博信息(原创微博+转发微博),值为1表示只爬取原创微博
wb = weibo(user_id,filter) #调用weibo类,创建微博实例wb
wb.start() #爬取微博信息
print '用户名:' + wb.userName
print '全部微博数:' + str(wb.weiboNum)
print '关注数:' + str(wb.following)
print '粉丝数:' + str(wb.followers)
#print '最新一条微博为:' + wb.weibos[0] #若filter=1则为最新的原创微博,如果该用户微博数为0,即len(wb.weibos)==0,打印会出错,下同
#print '最新一条微博获得的点赞数:' + str(wb.num_zan[0])
#print '最新一条微博获得的转发数:' + str(wb.num_forwarding[0])
#print '最新一条微博获得的评论数:' + str(wb.num_comment[0])
wb.writeTxt() #wb.writeTxt()只是把信息写到文件里,大家可以根据自己的需要重新编写writeTxt()函数
if i == 0:
df = pd.DataFrame(data={'User_name':[wb.userName],
'User_id':[str(user_id)],
'Num_post':[wb.weiboNum],
'Num_original_post':[wb.weiboNum2],
'Num_following':[wb.following],
'Num_follower':[wb.followers],
'Num_comment':[sum(wb.num_comment)],
'Num_forward':[sum(wb.num_forwarding)],
'Num_good':[sum(wb.num_zan)]})
else:
df2 = pd.DataFrame(data={'User_name':[wb.userName],
'User_id':[str(user_id)],
'Num_post':[wb.weiboNum],
'Num_original_post':[wb.weiboNum2],
'Num_following':[wb.following],
'Num_follower':[wb.followers],
'Num_comment':[sum(wb.num_comment)],
'Num_forward':[sum(wb.num_forwarding)],
'Num_good':[sum(wb.num_zan)]})
df = df.append(df2, ignore_index = True)
df.to_csv('WeiboStat\Weibo_Stat.csv')
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# get df data from weibo txt files
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from os import listdir
from os.path import isfile, join
files = [f for f in listdir('Weibo') if isfile(join('Weibo', f))]
print len(files)
import csv
for i in range(len(files)):
with open(join('Weibo', files[i])) as f:
reader = csv.reader(f)
row1 = next(reader)
row2 = next(reader) # User_name
row3 = next(reader) # User_id
row4 = next(reader) # Num_post
row5 = next(reader) # Num_folloing
row6 = next(reader) # Num_follower
row7 = next(reader) # Num_comment
row8 = next(reader) # Num_forward
row9 = next(reader) # Num_good
|
for i in range(0,5):
print(str(i)*5)
i+=1
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from mock import patch
from wadebug.config import Config, ConfigLoadError
from yaml.parser import ParserError
class TestConfig(unittest.TestCase):
def test_should_not_be_in_dev_mode(self):
assert (
Config().development_mode is False
), "Config().development_mode should be False before committing code"
def test_network_should_not_be_disabled(self):
assert (
Config().disable_send_data is False
), "Config().disable_send_data should be False before committing code"
def test_should_return_correct_config(self):
mock_config = {"mock": "config"}
with patch(
"wadebug.config.Config._load_config_from_file", return_value=mock_config
):
assert Config().values == mock_config
assert Config().load_error == ConfigLoadError.NONE
def test_should_return_config_invalid(self):
mock_parse_exception = ParserError("something goes wrong!")
with patch(
"wadebug.config.Config._load_config_from_file",
side_effect=mock_parse_exception,
):
assert Config().load_error == ConfigLoadError.CONFIG_INVALID
assert Config().values == {}
def test_should_return_config_missing(self):
mock_exception = Exception("something goes wrong!")
with patch(
"wadebug.config.Config._load_config_from_file", side_effect=mock_exception
):
assert Config().load_error == ConfigLoadError.CONFIG_MISSING
assert Config().values == {}
|
#!/usr/bin/env python3
import argparse
import sys
import json
from genderbias import ALL_SCANNED_DETECTORS, Document
def main():
parser = argparse.ArgumentParser(
description="CLI for gender-bias detection"
)
parser.add_argument(
"--file", "-f", dest="file", required=False,
help="The file to check"
)
parser.add_argument(
"--json", "-j", dest="json", required=False, default=False, action='store_true',
help="Enable JSON output, instead of text"
)
parser.add_argument(
"--list-detectors", dest="list_detectors", default=False, action="store_true",
help="List the available detectors")
parser.add_argument(
"--detectors", dest="detectors", default="",
help="Use specific detectors, not all available")
args = parser.parse_args()
if args.list_detectors:
print("AVAILABLE DETECTORS:")
for class_name in ALL_SCANNED_DETECTORS:
print(" " + class_name)
sys.exit(0)
if args.detectors:
if args.detectors in ALL_SCANNED_DETECTORS:
detectors = [ALL_SCANNED_DETECTORS[args.detectors]]
else:
print("Detector named '{}' not available.".format(args.detectors))
sys.exit(1)
else:
detectors = ALL_SCANNED_DETECTORS.values()
if not args.file:
# If no file is passed, then read from stdin
doc = Document(sys.stdin.read())
else:
# Otherwise, load the file
doc = Document(args.file)
reports = []
for detector in detectors:
reports.append(detector().get_report(doc))
if not args.json:
print("\n".join(str(report) for report in reports))
else:
reports_data = [report.to_dict() for report in reports]
print(json.dumps(reports_data))
if __name__ == "__main__":
main()
|
#!/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import rootpy.ROOT as ROOT
class readIn:
############Constructor#########################
def __init__(self, filename):
self.pdfs = {}
self.voltDict={}
self.peakDict={}
self.nameList=[]
self.yDict={}
self.yDictTot={}
self.maxIvals={}
self.minIvals={}
f = ROOT.TFile(filename)
self.keyDict={}
self.rejected=[]
self.rejected2=[]
self.rejectedMpe=[]
self.mean=[]
self.sigma=[]
self.mean2=[]
self.sigma2=[]
self.meanMpe=[]
self.sigmaMpe=[]
self.ratio=[]
self.ratioSpe=[]
self.ratioBack=[]
#hist keys
self.keys = f.GetListOfKeys()
#list that contains hist
self.histList=[]
for key in self.keys:
hists = key.ReadObj()
self.keyDict[key.GetName()]=key
self.histList.append(hists)
######list of key name
self.nameList.append(key.GetName())
self.pdfs[key.GetName()]=hists
######Same as nameList but only for hist over 1700
self.label=[]
############serial number
self.sn=[]
self.snDict={}
for i in range(0,len(self.nameList)):
b=self.nameList[i][-41:]
c= b[-10:-6]
d=b[-14:-11]
volt=int(c)
self.voltDict[self.nameList[i]]=volt
if volt >= 1700:
self.sn.append(int(d))
self.snDict[self.nameList[i]]=int(d)
self.label.append(self.nameList[i])
#########Get a histogram of spe peaks#############
def getSpes(self):
print "Getting SPE Peaks"
for lab in self.label:
self.__reduceRes(lab)
for lab in self.label:
print "finding peak "+str(lab)
self.__spilt(lab)
self.__fitSpePeek(lab)
for name in self.rejected:
print name+" not fitted"
for name in self.rejected2:
print name+" not fitted again"
for lab in self.label:
self.__findMpe(lab)
for name in self.rejectedMpe:
print name+" not fitted"
plt.hist(self.sigmaMpe,50)
for i in xrange(0,len(self.mean2)):
m=self.mean2[i]
s=self.sigma2[i]
rat=m/s
self.ratioSpe.append(rat)
for i in xrange(0,len(self.mean)):
m=self.mean[i]
s=self.sigma[i]
rat=m/s
self.ratioBack.append(rat)
xy=(10,-5),
xycoords='axes points'
ave=np.array(self.sigmaMpe).mean()
print "sigma mpe" + str(ave)
plt.title("sigma MPE")
plt.show()
ave=np.array(self.meanMpe).mean()
print "mean Mpe"+str( ave)
plt.hist(self.meanMpe,50)
plt.title("mean MPE")
plt.show()
ave=np.array(self.ratio).mean()
print "ratio2 mean sigma"+str(ave)
plt.hist(self.ratio,50)
plt.title("ratio mean/sigma MPE")
plt.show()
plt.title("mean SPE")
ave=np.array(self.mean2).mean()
print "mean SPE"+str( ave)
plt.hist(self.mean2,50)
plt.show()
plt.title("sigma SPE")
ave=np.array(self.sigma2).mean()
print"sigma SPE" + str(ave)
plt.hist(self.sigma2,50)
plt.show()
plt.title("mean ratio mean/sigma SPE")
ave=np.array(self.ratioSpe).mean()
print "ratio"+str( ave)
plt.hist(self.ratioSpe,50)
plt.show()
plt.title("mean background")
ave=np.array(self.mean).mean()
print "mean background"+str(ave)
plt.hist(self.mean,50)
plt.show()
plt.title("sigma background")
ave=np.array(self.sigma).mean()
print "sigma background"+str(ave)
plt.hist(self.sigma,50)
plt.show()
plt.title("mean ratio mean/sigma background")
ave=np.array(self.ratioBack).mean()
print "ratio2"+str( ave)
plt.hist(self.ratioBack,50)
plt.show()
#########Find MPE peak#############################
def __findMpe(self,name):
hist=self.pdfs[name]
hist.Fit("gaus","","",1000,10000)
mean=hist.GetFunction("gaus").GetParameter(1)
sigma=hist.GetFunction("gaus").GetParameter(2)
if mean <0:
hist.Fit("gaus","","",500,2000)
mean=hist.GetFunction("gaus").GetParameter(1)
sigma=hist.GetFunction("gaus").GetParameter(2)
self.meanMpe.append(mean)
self.sigmaMpe.append(sigma)
rat=mean/sigma
self.ratio.append(rat)
return
#########Private increase bin size by 20###########
def __reduceRes(self,name):
hist=self.pdfs[name]
y=[]
x=[]
for i in xrange(0,1000,20):
tot=0
for j in xrange(i,i+20):
tot=tot+hist.GetBinContent(j)
y.append(tot/20)
x.append(i)
self.yDict[name]=y
#########Private increase bin size by 20###########
def __reduceResTot(self,name):
hist=self.pdfs[name]
y=[]
x=[]
for i in xrange(0,10000,20):
tot=0
for j in xrange(i,i+20):
tot=tot+hist.GetBinContent(j)
y.append(tot/20)
x.append(i)
self.yDictTot[name]=y
#########Private this was spilt because matplot bug###########
def __spilt(self,name):
y=self.yDict[name]
yMins=[]
yMaxs=[]
self.__findPeaks(y,'max',1,yMins,yMaxs)
self.maxIvals[name]=yMaxs
self.minIvals[name]=yMins
#########Private function to find Peaks###########
def __findPeaks(self,y,minmax,start,yMins,yMaxs):
while minmax =='min' or minmax == 'max':
if minmax is 'min':
res=self.__findMin(y,start)
if res !=0:
yMins.append(res)
start=res[-1]
minmax='max'
else:
minmax='0'
else:
res=self.__findMax(y,start)
if res !=0:
yMaxs.append(res)
start=res[-1]
minmax='min'
else:
minmax='0'
#########Find max ##############################################
def __findMax(self,y,start):
if start >= len(y)-1:
print "start hit end"
return 0
yStart=y[start]
i=start+1
end=i
while y[i]>yStart:
i+=1
end=i
if i>=len(y):
break
a = np.array(y[start:end], int)
res=[a.max(),a.argmax(),start,end]
start=end
return res
#########Find min ##############################################
def __findMin(self,y,start):
if start >= len(y)-1:
print "start hit end"
return 0
yStart=y[start]
min=yStart
index=start
count =0
for j in xrange(start+1,len(y)):
diff=min-y[j]
if diff > -1:
min=y[j]
index=j
count =0
else:
if index !=start:
count+=1
if count==5:
break
if index !=start:
end=index
else:
end=50
a = np.array(y[start:end], int)
res=[a.min(),a.argmin(),start,end]
start=end
return res
#########fitting gaussian ######################################
def __fitSpePeek(self,name):
iVal=self.maxIvals[name][0]
start=iVal[2]*20
end=iVal[3]*20
hist=self.pdfs[name]
try:
hist.Fit("gaus","","",start,end)
self.mean.append(hist.GetFunction("gaus").GetParameter(1))
self.sigma.append(hist.GetFunction("gaus").GetParameter(2))
except TypeError:
print name+" was not fitted"
self.rejected.append(name)
if len(self.minIvals[name]) ==2:
iVal=self.maxIvals[name][1]
start=iVal[2]*20
end=iVal[3]*20
try:
hist.Fit("gaus","","",start,end)
self.mean2.append(hist.GetFunction("gaus").GetParameter(1))
self.sigma2.append(hist.GetFunction("gaus").GetParameter(2))
except TypeError:
print name+" was not fitted"
self.rejected2.append(name)
return
#########Juest Sanity check delete after########################
def __showYDict(self,name):
y=self.yDictTot[name]
plt.title(name)
plt.plot(y)
plt.show()
#########Juest Sanity check delete after########################
def __showYDictTot(self,name):
y=self.yDictTot[name]
plt.title(name)
plt.plot(y)
plt.show()
#########Get the numth hist########################
def getHist(self,num):
if type(num) is not int:
print "second arguement needs to be type int"
return
if num>=len(self.pdfs) or num <0:
print str(num)+" is outside range "+str(len(self.pdfs)-1)
return
return self.histList[num]
###########Get the number of Hists##################
def getNumOfHist(self):
return len(self.histList)
###########Get the number of Hists##################
def drawHist(self,num):
self.histList[num].Draw()
input("")
##################################################
|
import json
from Expedia import mainExpedia
import Redis
from models import Flight
def get_cheapest_flight(source, destination, start_date, end_date):
key = "%s:%s:%s:%s" % (source, destination, start_date, end_date)
cached_cheapest_flight = Redis.get_from_db(key)
if cached_cheapest_flight is None:
cheapest_flight = mainExpedia()
value = json.dumps(cheapest_flight.to_dict())
Redis.add_to_db(key, value)
print "from expedia"
return cheapest_flight
else:
print "from redis"
return Flight.from_dict(json.loads(cached_cheapest_flight))
|
number_list = int(input("Enter the number of element in the list-:"))
alist = []
for number in range(number_list):
in_list = int(input("Enter the number on the list-:"))
alist.append(in_list)
def sort_list(alist):
for index in range(0, len(alist)):
current = alist[index]
position = index
while position > 0 and alist[position-1] > current:
alist[position] = alist[position-1]
position -=1
alist[position] = current
return alist
print("The sorted list is :", sort_list(alist))
|
"""
Ambience
This is a system for sending intermittent messages to a room to provide
ambiance.
A series of Mixins, allows all objects to optionally hold messages which
have a chance to be intermittently displayed to the objects around them.
These messages are collected with the return_ambient_msgs() function.
By default:
Objects only return their own messages.
Characters return their own messages + the messages of worn clothing.
Rooms return their own messages + the messages returned by their contents.
A global script set at 120 second intervals determines which rooms have
players in them and triggers an ambient message picked at random by the
returned options per room.
Messages are stored in a dictioary on the object: {message:weight,..}
TO DO:
- No repeats
- Ambience messages are tagged
- Switch to not return ambient_msgs
- Add origin to ambient messages
"""
from evennia import DefaultObject, DefaultCharacter, DefaultRoom, DefaultScript
from evennia import TICKER_HANDLER as tickerhandler
import random
from evennia.server.sessionhandler import SESSIONS
# -----------------------------------------------------------------------------
# Ambient Message Storage
# -----------------------------------------------------------------------------
class AmbientObj():
"""
Basic Mixin for the Ambient Objects.
Adds Database Attributes:
ambient_msgs (dict): Dict of ambient message strings and weighting.
Eg. {"The sun shines brightly": 1}
"""
def return_ambient_msgs(self):
"""
In the basic typeclass, merely returns the raw ambient_msgs dictionary.
"""
msgs = self.db.ambient_msgs
return msgs if msgs else {}
class AmbientChararacter():
"""
Typeclass for the Ambient Character.
Adds Database Attributes:
ambient_msgs (dict): Dict of ambient message strings and weighting.
Eg. {"The sun shines brightly": 1}
"""
def return_ambient_msgs(self):
"""
Collects the ambient messages from the characters worn equipment and
adds them to the characters own messages
"""
msgs = self.db.ambient_msgs
# Append equipment messages here.
return msgs if msgs else {}
class AmbientRoom():
"""
Typeclass for the Ambient Room.
Database Attributes:
ambient_msgs (dict): Dict of ambient message strings and weighting.
Eg. {"The sun shines brightly": 1}
"""
def return_ambient_msgs(self):
"""
Collects the ambient messages from room contents and
adds them to the Rooms own messages.
"""
msgs = self.db.ambient_msgs
msgs = msgs if msgs else {}
for obj in self.contents_get():
try:
msgs.update(obj.return_ambient_msgs())
except:
continue
return msgs
def display_ambient_msg(self, target = None):
"""
Displays an ambient message selected at random from list returned by
return_ambient_msgs().
"""
msgs = self.return_ambient_msgs()
if msgs:
# If single target, message target only.
if target:
target.msg(random.choices(list(msgs.keys()),
weights=list(msgs.values()),
k=1)[0])
return
# Otherwise mesage whole room.
self.msg_contents(random.choices(list(msgs.keys()),
weights=list(msgs.values()),
k=1)[0])
# -----------------------------------------------------------------------------
# Ambient Message Triggers
# -----------------------------------------------------------------------------
class AmbientScript(DefaultScript):
"""
This is a Global Script. At each interval it collects a list of rooms
which contains players. It then displays an ambiance message to it's
contents selected from the messages returned by it's return_ambient_msgs
function.
"""
def at_script_creation(self):
self.key = "ambiance_script"
self.desc = "Triggers ambient messages in rooms from contents."
self.interval = 120
self.persistent = True
def at_repeat(self):
"""
Called every self.interval seconds.
"""
# Get puppets with online players connected (and thus have a location)
online_chars = [session.puppet for session in SESSIONS
if session.puppet]
# Get puppet locations with no repeats
inhabited_rooms = list(set([puppet.location for puppet in online_chars]))
# Message room with random ambient message
for room in inhabited_rooms:
try:
room.display_ambient_msg()
except:
continue
|
from node import Node
from bst import BST
import random
class RedBlackNode(Node):
RED = True
BLACK = False
def __init__(self, key, val, color=RED):
self.color = color
super().__init__(key, val)
def __str__(self):
return "({},{},{})".format(self.key, self.val, "RED" if self.color else "BLACK")
def __repr__(self):
return "({},{},{})".format(self.key, self.val, "RED" if self.color else "BLACK")
class RedBlackBST(BST):
def put(self, key, val):
self.root = self.__put(self.root, key, val)
def __put(self, node, key, val):
if not node:
return RedBlackNode(key, val)
if node.key > key:
node.left = self.__put(node.left, key, val)
elif node.key < key:
node.right = self.__put(node.right, key, val)
else:
node.val = val
if self.__is_red(node.right) and not self.__is_red(node.left):
node = self.__rotate_left(node)
if self.__is_red(node.left) and self.__is_red(node.left.left):
node = self.__rotate_right(node)
if self.__is_red(node.left) and self.__is_red(node.right):
self.__flip_color(node)
return node
def __rotate_left(self, node):
assert self.__is_red(node.right) == True
new_root = node.right
node.right = new_root.left
new_root.left = node
new_root.color = node.color
node.color = RedBlackNode.RED
return new_root
def __rotate_right(self, node):
assert self.__is_red(node.left) == True
new_root = node.left
node.left = new_root.right
new_root.right = node
new_root.color = node.color
node.color = RedBlackNode.RED
return new_root
def __flip_color(self, node):
# assert self.__is_red(node) == False
assert self.__is_red(node.left) == True
assert self.__is_red(node.right) == True
node.left.color = RedBlackNode.BLACK
node.right.color = RedBlackNode.BLACK
node.color = RedBlackNode.RED
def __is_red(self, node):
if not node:
return False
return node.color == RedBlackNode.RED
if __name__ == "__main__":
rb_bst = RedBlackBST.build_random_tree(30)
print(rb_bst.get_vertical_order())
|
INF = float("inf")
class WEN: #Weighted edge node
def __init__(self,nodde,weigght=0):
self.node = nodde
self.weight = weigght
class WG: #Weighted graph
def __init__(self,everticies):
self.evertices = everticies
self.adjencylist = {}
self.vertices = []
self.distance = {}
self.p = {}
for x in range(1,everticies+1): #listojen "alustukset"
self.adjencylist[x] = []
self.vertices.append(x)
self.distance[x] = INF
self.p[x] = None
def dijkstra(data): #tavallaan tämän dijkstratoteutuksen main funktio, hoitaa muiden kutsumiset ja addaa edget yms.
citiesnroads = data.readline().split()
cities = int(citiesnroads[0]) #montako kaupunkia
roads = int(citiesnroads[1]) #montako tietä
print('')
print("Löytyi {} kaupunkia ja {} tietä".format(cities, roads))
print('')
graph = WG(cities) #graafi
print('Suoritetaan algoritmia...')
print('')
for i in range(roads): #lisätään edget
rivi = data.readline().split()
solmu1 = int(rivi[0])
solmu2 = int(rivi[1])
paino = int(rivi[2])
graph.adjencylist[solmu1].append(WEN(solmu2,paino))
graph.adjencylist[solmu2].append(WEN(solmu1,paino))
dest = int(data.readline())
djikstraalgo(graph,1)
print("Matalimman kohdan sisältämä polku reitillä 1 -> {}:".format(dest))
route = []
heights = [] #korkeuksien erot, jota ei itseasiassa käytetä mihinkään (lisää tästä selostuksessa)
maxi = path(graph,dest,route,heights) #tehdään route (ja heights) listat
print(route)
#print(heights)
#maxi = highest(heights,len(route)-1)
print('')
print("Korkein kohta tällä reitillä on",maxi)
def djikstraalgo(gr,start): #varsinaisen dijkstra algoreitmin suoritus
for i in gr.vertices:
gr.distance[i] = INF
gr.p[i] = 0
gr.distance[start]=0
queue = [i for i in gr.vertices]
#print(queue)
while len(queue) > 0:
minv = INF
u = 0
for v in queue:
if gr.distance[v] < minv:
minv = gr.distance[v]
u = v
queue.remove(u)
for e in gr.adjencylist[u]:
v = e.node
if gr.distance[v] > max(gr.distance[u],e.weight): #pääasiallinen ero perusmuotoon tässä: ei etsitä lyhintä polkua vaan sitä, missä pienin maksimi paino
gr.distance[v] = max(gr.distance[u],e.weight) #tämä kohta oli ongelmallinen, mutta ratkaisu löytyi: https://cseweb.ucsd.edu/classes/sp00/cse202/graphs1.pdf
gr.p[v] = u
def path(gr,u,route,heights): #Lista kaupungeista, jotka reitille tulee sekä epämääräinen lista painoista. Käytännössä siitä näkee aina, missä kohtaa maksimipaino on muuttunut
if gr.p[u] != 0:
path(gr,gr.p[u],route,heights)
#print(u,gr.distance[u])
route.append(u)
heights.append(gr.distance[u])
return gr.distance[u]
def noroute(): #Jos djikstraalgo aiheuttaa erroria (eli reittiä ei voida muodostaa), tullaan tänne
print("Reitin muodostus ei onnistu!")
print("Onko kaupunkien välillä varmasti yhtenäinen polku?")
|
import math
import zbar
import cv2
import numpy
from PIL import Image
from img_data import QrData
MS = 10.0 # model side size
class ImgProcessor:
def __init__(self):
self.scanner = zbar.ImageScanner()
self.scanner.parse_config('enable')
self.qr_3d_model = numpy.array([
(0.0, 0.0, 0.0),
(MS, 0.0, 0.0),
(MS, MS, 0.0),
(0.0, MS, 0.0)])
self.camera_matrix = numpy.array(
[[5.2899351181828501e+02, 0., 2.9450258403806163e+02],
[0., 5.2899351181828501e+02, 2.2097639018482772e+02],
[0.0, 0.0, 1.0]])
self.rotation_matrix = numpy.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]], dtype=float)
self.distortion_coefs = numpy.array(
[1.1393838013330945e-01, 1.2711065646876812e-01,
-3.4306406160909859e-02, -1.0243554211321552e-02,
-1.1529950378308689e+00])
self.window_height = 480
self.fov = 90.0
self.fov_rad = (self.fov / 180.0) * math.pi
self.real_side_size = 27.5
# self.real_side_size = 19.2
# self.real_side_size = 9.7
self.axis_3d_model = numpy.array([(10, 0, 0), (0, 10, 0), (0, 0, 10)], dtype=float)
def extract_data(self, frame):
"""
Detects qr codes on image
:param frame: array of pixels
:return: data extracted from image
:rtype: QrData
"""
pil_image = ImgProcessor.convert_to_pil_format(frame)
width, height = pil_image.size
raw = pil_image.tobytes()
zbar_image = zbar.Image(width, height, 'Y800', raw)
qr_data = QrData()
self.scanner.scan(zbar_image)
# we assume there is only one code at a time
qr_code = [code for code in zbar_image]
if len(qr_code) == 0:
return qr_data
else:
qr_code = qr_code[0]
qr_data.set_text(qr_code.data)
qr_location = numpy.array(qr_code.location, dtype=float)
retval, rvec, tvec = cv2.solvePnP(self.qr_3d_model, qr_location, self.camera_matrix, self.distortion_coefs)
qr_data.set_rotation_and_translation(rvec, tvec)
self.draw_xyz_axis(qr_code.location, qr_data.r_vec, qr_data.t_vec, frame)
qr_data.set_camera_coordinates(self.get_camera_coordinates(qr_data))
qr_data.set_distance(self.distance_from_xyz(camera_position=qr_data.camera_coordinates))
return qr_data
@staticmethod
def average_side_size(qr_corners):
sides_lengths = []
n = len(qr_corners)
for idx in range(0, n):
x1 = qr_corners[idx][0]
y1 = qr_corners[idx][1]
x2 = qr_corners[(idx + 1) % n][0]
y2 = qr_corners[(idx + 1) % n][1]
sides_lengths.append(math.sqrt(math.pow(x2 - x1, 2.0) + (math.pow(y2 - y1, 2.0))))
average_size = sum(sides_lengths) / n
return average_size
def draw_xyz_axis(self, qr_corners, r_vec, t_vec, image):
corners = numpy.array(qr_corners, dtype=float)
projected, jac = cv2.projectPoints(self.axis_3d_model, r_vec, t_vec,
self.camera_matrix, self.distortion_coefs, corners)
axis_end_x = tuple(projected[0].ravel())
axis_end_y = tuple(projected[1].ravel())
axis_end_z = tuple(projected[2].ravel())
axis_end_x = tuple(int(coord) for coord in axis_end_x)
axis_end_y = tuple(int(coord) for coord in axis_end_y)
axis_end_z = tuple(int(coord) for coord in axis_end_z)
axis_ends = [axis_end_x, axis_end_y, axis_end_z]
cv2.line(image, qr_corners[0], axis_ends[0], (255, 0, 0), 5)
cv2.line(image, qr_corners[0], axis_ends[1], (0, 255, 0), 5)
cv2.line(image, qr_corners[0], axis_ends[2], (0, 0, 255), 5)
rotation_matrix = cv2.Rodrigues(r_vec)[0]
camera_position = - rotation_matrix.transpose() * t_vec
for i in range(len(camera_position[0])):
for j in range(len(camera_position[0])):
camera_position[i][j] = int(camera_position[i][j])
def get_camera_coordinates(self, qr_code):
"""
Calculates coordinates in object world.
:return: coordinates in qr code coordinate system
"""
r_vec = qr_code.r_vec
t_vec = qr_code.t_vec
cv2.Rodrigues(r_vec, self.rotation_matrix)
inv_rot = self.rotation_matrix.transpose()
camera_position = -numpy.matrix(inv_rot) * numpy.matrix(t_vec)
camera_position = camera_position.item(0), camera_position.item(1), camera_position.item(2)
return tuple(self.position_in_centimeters(camera_position))
@staticmethod
def convert_to_pil_format(gray):
pil = Image.fromarray(gray)
return pil
@staticmethod
def distance_from_xyz(camera_position):
return math.sqrt(
math.pow(camera_position[0], 2) + math.pow(camera_position[1], 2) + math.pow(camera_position[2], 2)
)
def position_in_centimeters(self, camera_position):
"""
Calculates real distance taking into account size of QR code
:param camera_position: coordinates calculated from rotation and translation
:return: coordinates of a camera with correct distance
"""
proportion = self.real_side_size / MS
camera_position_with_distance = [camera_position[0] * proportion,
camera_position[1] * proportion,
camera_position[2] * proportion]
return camera_position_with_distance
|
from __future__ import print_function
import numpy as np
import time
import tensorflow as tf
import ops as my_ops
import os
import re
import itertools as it
class Agent:
def __init__(self, sess, args):
'''Agent - powered by neural nets, can infer, act, train, test.
'''
self.sess = sess
# input data properties
self.state_imgs_shape = args['state_imgs_shape']
self.state_meas_shape = args['state_meas_shape']
self.meas_for_net = args['meas_for_net']
self.meas_for_manual = args['meas_for_manual']
self.discrete_controls = args['discrete_controls']
self.discrete_controls_manual = args['discrete_controls_manual']
self.opposite_button_pairs = args['opposite_button_pairs']
self.prepare_controls_and_actions()
# preprocessing
self.preprocess_input_images = args['preprocess_input_images']
self.preprocess_input_measurements = args['preprocess_input_measurements']
self.postprocess_predictions = args['postprocess_predictions']
# net parameters
self.conv_params = args['conv_params']
self.fc_img_params = args['fc_img_params']
self.fc_meas_params = args['fc_meas_params']
self.fc_joint_params = args['fc_joint_params']
self.target_dim = args['target_dim']
self.build_model()
def prepare_controls_and_actions(self):
self.discrete_controls_to_net = np.array([i for i in range(len(self.discrete_controls)) if not i in self.discrete_controls_manual])
self.num_manual_controls = len(self.discrete_controls_manual)
self.net_discrete_actions = []
if not self.opposite_button_pairs:
for perm in it.product([False, True], repeat=len(self.discrete_controls_to_net)):
self.net_discrete_actions.append(list(perm))
else:
for perm in it.product([False, True], repeat=len(self.discrete_controls_to_net)):
# remove actions where both opposite buttons are pressed
act = list(perm)
valid = True
for bp in self.opposite_button_pairs:
if act[bp[0]] == act[bp[1]] == True:
valid=False
if valid:
self.net_discrete_actions.append(act)
self.num_net_discrete_actions = len(self.net_discrete_actions)
self.action_to_index = {tuple(val):ind for (ind,val) in enumerate(self.net_discrete_actions)}
self.net_discrete_actions = np.array(self.net_discrete_actions)
self.onehot_discrete_actions = np.eye(self.num_net_discrete_actions)
def preprocess_actions(self, acts):
to_net_acts = acts[:,self.discrete_controls_to_net]
return self.onehot_discrete_actions[np.array([self.action_to_index[tuple(act)] for act in to_net_acts.tolist()])]
def postprocess_actions(self, acts_net, acts_manual=[]):
out_actions = np.zeros((acts_net.shape[0], len(self.discrete_controls)), dtype=np.int)
out_actions[:,self.discrete_controls_to_net] = self.net_discrete_actions[acts_net]
#print(acts_net, acts_manual, self.discrete_controls_to_net, out_actions)
if len(acts_manual):
out_actions[:,self.discrete_controls_manual] = acts_manual
return out_actions
def random_actions(self, num_samples):
acts_net = np.random.randint(0, self.num_net_discrete_actions, num_samples)
acts_manual = np.zeros((num_samples, self.num_manual_controls), dtype=np.bool)
return self.postprocess_actions(acts_net, acts_manual)
def make_net(self, input_images, input_measurements, input_actions, reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
self.fc_val_params = np.copy(self.fc_joint_params)
self.fc_val_params['out_dims'][-1] = self.target_dim
self.fc_adv_params = np.copy(self.fc_joint_params)
self.fc_adv_params['out_dims'][-1] = len(self.net_discrete_actions) * self.target_dim
print(len(self.net_discrete_actions) * self.target_dim)
p_img_conv = my_ops.conv_encoder(input_images, self.conv_params, 'p_img_conv', msra_coeff=0.9)
p_img_fc = my_ops.fc_net(my_ops.flatten(p_img_conv), self.fc_img_params, 'p_img_fc', msra_coeff=0.9)
p_meas_fc = my_ops.fc_net(input_measurements, self.fc_meas_params, 'p_meas_fc', msra_coeff=0.9)
p_val_fc = my_ops.fc_net(tf.concat(1, [p_img_fc,p_meas_fc]), self.fc_val_params, 'p_val_fc', last_linear=True, msra_coeff=0.9)
p_adv_fc = my_ops.fc_net(tf.concat(1, [p_img_fc,p_meas_fc]), self.fc_adv_params, 'p_adv_fc', last_linear=True, msra_coeff=0.9)
p_adv_fc_nomean = p_adv_fc - tf.reduce_mean(p_adv_fc, reduction_indices=1, keep_dims=True)
self.pred_all_nomean = tf.reshape(p_adv_fc_nomean, [-1, len(self.net_discrete_actions), self.target_dim])
self.pred_all = self.pred_all_nomean + tf.reshape(p_val_fc, [-1, 1, self.target_dim])
self.pred_relevant = tf.boolean_mask(self.pred_all, tf.cast(input_actions, tf.bool))
def build_model(self):
# prepare the data
self.input_images = tf.placeholder(tf.float32, [None] + [self.state_imgs_shape[1], self.state_imgs_shape[2], self.state_imgs_shape[0]],
name='input_images')
self.input_measurements = tf.placeholder(tf.float32, [None] + list(self.state_meas_shape),
name='input_measurements')
self.input_actions = tf.placeholder(tf.float32, [None, self.num_net_discrete_actions],
name='input_actions')
if self.preprocess_input_images:
self.input_images_preprocessed = self.preprocess_input_images(self.input_images)
if self.preprocess_input_measurements:
self.input_measurements_preprocessed = self.preprocess_input_measurements(self.input_measurements)
# make the actual net
self.make_net(self.input_images_preprocessed, self.input_measurements_preprocessed, self.input_actions)
# make the saver, lr and param summaries
self.saver = tf.train.Saver()
tf.initialize_all_variables().run(session=self.sess)
def act(self, state_imgs, state_meas, objective):
return self.postprocess_actions(self.act_net(state_imgs, state_meas, objective), self.act_manual(state_meas)), None # last output should be predictions, but we omit these for now
def act_net(self, state_imgs, state_meas, objective):
#Act given a state and objective
predictions = self.sess.run(self.pred_all, feed_dict={self.input_images: state_imgs,
self.input_measurements: state_meas[:,self.meas_for_net]})
objectives = np.sum(predictions[:,:,objective[0]]*objective[1][None,None,:], axis=2)
curr_action = np.argmax(objectives, axis=1)
return curr_action
def act_manual(self, state_meas):
if len(self.meas_for_manual) == 0:
return []
else:
assert(len(self.meas_for_manual) == 13) # expected to be [AMMO2 AMMO3 AMMO4 AMMO5 AMMO6 AMMO7 WEAPON2 WEAPON3 WEAPON4 WEAPON5 WEAPON6 WEAPON7 SELECTED_WEAPON]
assert(self.num_manual_controls == 6) # expected to be [SELECT_WEAPON2 SELECT_WEAPON3 SELECT_WEAPON4 SELECT_WEAPON5 SELECT_WEAPON6 SELECT_WEAPON7]
curr_act = np.zeros((state_meas.shape[0],self.num_manual_controls), dtype=np.int)
for ns in range(state_meas.shape[0]):
# always pistol
#if not state_meas[ns,self.meas_for_manual[12]] == 2:
#curr_act[ns, 0] = 1
# best weapon
curr_ammo = state_meas[ns,self.meas_for_manual[:6]]
curr_weapons = state_meas[ns,self.meas_for_manual[6:12]]
#print(curr_ammo,curr_weapons)
available_weapons = np.logical_and(curr_ammo >= np.array([1,2,1,1,1,40]), curr_weapons)
if any(available_weapons):
best_weapon = np.nonzero(available_weapons)[0][-1]
if not state_meas[ns,self.meas_for_manual[12]] == best_weapon+2:
curr_act[ns, best_weapon] = 1
return curr_act
def load(self, checkpoint_dir):
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
return True
else:
return False
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2018-01-18 02:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import helpers.base.jsonfield
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ApplyTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=200, verbose_name='\u540d\u79f0')),
('desp', models.TextField(blank=True, verbose_name='\u63cf\u8ff0')),
('file', models.CharField(blank=True, max_length=500, verbose_name='\u6587\u4ef6\u6750\u6599')),
],
),
migrations.CreateModel(
name='CunWei',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=200, verbose_name='\u6751\u59d4')),
('zhureng', models.CharField(blank=True, max_length=100, verbose_name='\u4e3b\u4efb')),
('phone', models.CharField(blank=True, max_length=100, verbose_name='\u7535\u8bdd\u53f7\u7801')),
],
),
migrations.CreateModel(
name='JianFangInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100, verbose_name='\u59d3\u540d')),
('date', models.DateField(auto_now_add=True, verbose_name='\u7533\u8bf7\u65e5\u671f')),
('state', models.IntegerField(blank=True, choices=[(1, '\u9547\u89c4\u4fdd\u529e\u521d\u5ba1'), (2, '\u8054\u5e2d\u4f1a\u8bae\u5ba1\u6838'), (3, '\u8054\u5408\u5ba1\u6279(\u89c4\u4fdd\u529e)'), (4, '\u8054\u5408\u5ba1\u6279(\u89c4\u571f\u6240)')], verbose_name='\u5f53\u524d\u6d41\u7a0b')),
('shenqing', helpers.base.jsonfield.JsonField(default=[], verbose_name='\u7533\u8bf7\u6750\u6599')),
('xieyi', helpers.base.jsonfield.JsonField(default=[], verbose_name='\u534f\u8bae')),
('cunwei', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='liantang.CunWei', verbose_name='\u6751\u59d4')),
],
),
migrations.CreateModel(
name='Policy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=200, verbose_name='\u540d\u79f0')),
('desp', models.TextField(blank=True, verbose_name='\u63cf\u8ff0')),
('file', models.CharField(blank=True, max_length=500, verbose_name='\u6587\u4ef6\u6750\u6599')),
],
),
migrations.CreateModel(
name='YinJiZhengGai',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.IntegerField(blank=True, choices=[(1, '\u9547\u89c4\u4fdd\u529e\u521d\u5ba1'), (2, '\u8054\u5e2d\u4f1a\u8bae\u5ba1\u6838'), (3, '\u8054\u5408\u5ba1\u6279(\u89c4\u4fdd\u529e)'), (4, '\u8054\u5408\u5ba1\u6279(\u89c4\u571f\u6240)')], verbose_name='\u6574\u6539\u72b6\u6001')),
('date', models.DateField(blank=True, verbose_name='\u65e5\u671f')),
('desp', models.TextField(blank=True, verbose_name='\u8fdd\u89c4\u9879\u76ee')),
('file', models.CharField(blank=True, max_length=500, verbose_name='\u6838\u5b9a\u8bc1\u660e')),
('jianfang', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='liantang.JianFangInfo', verbose_name='\u5efa\u623f\u4fe1\u606f')),
],
),
]
|
import os
import numpy as np
from imageio import imwrite
from ..io import segRelabel, mkdir, segToVast
class zwDecoder(object):
def __init__(self, aff_file, output_folder='./', job_id = 0, job_num = 1):
self.aff_file = aff_file
self.output_folder = output_folder
self.job_id = job_id
self.job_num = job_num
def affToSeg(self, vol_key = 'vol0', num_slice = -1, filename_suf = '', \
T_low = 0.15, T_high = 0.9, T_rel = False, \
T_thres = 150, T_dust = 150, \
T_dust_merge = 0.2, T_mst_merge = 0.7):
import h5py
from zwatershed import zwatershed
# sa2 zw2
aff = h5py.File(self.aff_file, 'r')[vol_key]
aff_size = aff.shape
output = self.output_folder + 'zw2d-%s/' % (vol_key)
mkdir(output)
num_slice = aff_size[1] if num_slice < 0 else num_slice
for zi in range(self.job_id, num_slice, self.job_num):
output_file = output + '%04d%s.png' % (zi, filename_suf)
if not os.path.exists(output_file):
print(zi)
aff_s = np.array(aff[:,zi:zi+1]).astype(np.float32)/255.
if aff_size[0] == 2:
# 2D: need add a channel
aff_s = np.concatenate([np.zeros([1, 1, aff_size[2], aff_size[3]], \
np.float32), aff_s], axis=0)
out = zwatershed(aff_s, T_threshes=[T_thres],
T_dust=T_dust, T_aff=[T_low,T_high,T_dust_merge],
T_aff_relative=T_rel, T_merge=T_mst_merge)[0][0][0]
imwrite(output_file, segToVast(segRelabel(out, do_type=True)))
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
from kafka.partitioner.hashed import murmur2
from uuid import uuid4
import random
def partioner(key, all_partitions, available):
idx = (murmur2(key) & 0x7fffffff) % len(all_partitions)
return idx
all_parts = [0, 1, 2, 3, 4, 5]
available_parts = [0, 1, 2, 3, 4, 5]
p0 = 0
p1 = 0
p2 = 0
p3 = 0
p4 = 0
p5 = 0
for i in range(0, 10000):
key = bytes(uuid4().hex, 'utf-8')
part = partioner(key, all_parts, available_parts)
if part == 0:
p0 += 1
elif part == 1:
p1 += 1
elif part == 2:
p2 += 1
elif part == 3:
p3 += 1
elif part == 4:
p4 += 1
elif part == 5:
p5 += 1
print(f'p0: {p0}\np1: {p1}\np2: {p2}\np3: {p3}\np4: {p4}\np5: {p5}\n')
|
from ._surface import Surface
from ._stream import Stream
from ._spaceframe import Spaceframe
from ._slices import Slices
from plotly.graph_objs.isosurface import slices
from ._lightposition import Lightposition
from ._lighting import Lighting
from ._hoverlabel import Hoverlabel
from plotly.graph_objs.isosurface import hoverlabel
from ._contour import Contour
from ._colorbar import ColorBar
from plotly.graph_objs.isosurface import colorbar
from ._caps import Caps
from plotly.graph_objs.isosurface import caps
|
import numpy as np
import pandas as pd
import matrix_factorization_utilities
# Load user ratings from both the training and testing csv files
raw_training_dataset_df = pd.read_csv('movie_ratings_data_set_training.csv')
raw_testing_dataset_df = pd.read_csv('movie_ratings_data_set_testing.csv')
# Convert the running list of user ratings into a matrix
ratings_training_df = pd.pivot_table(raw_training_dataset_df, index='user_id', columns='movie_id', aggfunc=np.max)
ratings_testing_df = pd.pivot_table(raw_testing_dataset_df, index='user_id', columns='movie_id', aggfunc=np.max)
# Apply matrix factorization on only the training data
# Regularization amount sets how much weight wil be placed on a single attribute during matrix factorization
U, M = matrix_factorization_utilities.low_rank_matrix_factorization(ratings_training_df.as_matrix(),
num_features=11,
regularization_amount=1.1)
# Find all predicted ratings by multiplying U and M
predicted_ratings = np.dot(U, M)
# Calculate the error rates by calculating RMSE
rmse_training = matrix_factorization_utilities.RMSE(ratings_training_df.as_matrix(), predicted_ratings)
rmse_testing = matrix_factorization_utilities.RMSE(ratings_testing_df.as_matrix(), predicted_ratings)
print("Training RMSE: {}".format(rmse_training))
print("Testing RMSE: {}".format(rmse_testing))
|
number = int(input("Input your number: "))
result =""
while number !=0:
remainder = number % 2
number = number // 2
result = str(remainder) + result
print(result)
|
# -*- coding: utf-8 -*-
import calendar, datetime, logging, uuid, pytz
import inject
from model.systems.assistance.date import Date
from model.systems.assistance.logs import Logs
from model.systems.assistance.justifications.exceptions import *
from model.systems.offices.offices import Offices
from model.systems.assistance.justifications.AAJustification import AAJustification
from model.systems.assistance.justifications.BSJustification import BSJustification
from model.systems.assistance.justifications.CJustification import CJustification
from model.systems.assistance.justifications.LAOJustification import LAOJustification
from model.systems.assistance.schedule import Schedule
from model.systems.assistance.schedule import ScheduleData
class Overtime:
offices = inject.attr(Offices)
date = inject.attr(Date)
schedule = inject.attr(Schedule)
logs = inject.attr(Logs)
"""
obtiene el ultimo estado del pedido de horas extras indicado por reqId
"""
def _getOvertimeRequestStatus(self,con,reqId):
cur = con.cursor()
cur.execute('select jrs.status from assistance.overtime_requests_status as jrs, (select request_id,max(created) as created from assistance.overtime_requests_status group by request_id) as r where r.created = jrs.created and r.request_id = jrs.request_id')
if cur.rowcount <= 0:
return None
return cur.fetchone()[0]
"""
obtiene todas los pedidos de horas extras que estan como ultimo estado en la lista de estados pasada como parametro.
status = una lista de estados posibles.
retora un dict con los ids como key y el estado como value
{ id: status }
"""
def _getOvertimesInStatus(self,con,status=[]):
cur = con.cursor()
if status is None or len(status) <= 0:
cur.execute('select jrs.request_id,jrs.status from assistance.overtime_requests_status as jrs, (select request_id,max(created) as created from assistance.overtime_requests_status group by request_id) as r where r.created = jrs.created and r.request_id = jrs.request_id')
else:
cur.execute('select jrs.request_id,jrs.status from assistance.overtime_requests_status as jrs, (select request_id,max(created) as created from assistance.overtime_requests_status group by request_id) as r where r.created = jrs.created and r.request_id = jrs.request_id and jrs.status in %s',(tuple(status),))
if cur.rowcount <= 0:
return {}
statusR = {}
for rs in cur:
statusR[rs[0]] = rs[1]
return statusR
def getWorkedOvertime(self, con, userId, date):
'''
Definir horas extras trabajadas para una determinada fecha
@param con Conexion con la base de datos
@param userId Identificacion de usuario
@param date Fecha para la cual se quiere calcular el tiempo extra trabajado
'''
#calcular overtimes del dia
overtimeRequests = self.getOvertimeRequests(con, ['APPROVED'], None, [userId], date)
if len(overtimeRequests) == 0:
return 0
#definir fecha inicial para el calculo de logs
schedules = None
dateAux = date
while schedules is None or len(schedules) == 0:
dateAux = dateAux - datetime.timedelta(days=1)
schedules = self.schedule.getSchedule(con, userId, dateAux)
datetimeAux = schedules[-1].getEnd(dateAux)
datetimePre = datetimeAux + datetime.timedelta(hours=3) #FALTARIA CALCULAR EL MAXIMO ENTRE EL OVERTIME DEFINIDO ENTRE dateAux y date -1 (dia) (SI EXISTE) y datetimeAux
#definir fecha final para el calculo de logs
schedules = None
dateAux = date
while schedules is None or len(schedules) == 0:
dateAux = dateAux + datetime.timedelta(days=1)
schedules = self.schedule.getSchedule(con, userId, dateAux)
datetimeAux = schedules[0].getStart(dateAux)
datetimePos = datetimeAux - datetime.timedelta(hours=3) #FALTARIA CALCULAR EL MINIMO ENTRE EL OVERTIME DEFINIDO ENTRE date + 1 dia Y dateAux (SI EXISTE) y datetimeAux
#obtener worked hours en base a las fechas definidas de los schedules anterior y posterior
logs = self.logs.findLogs(con, userId, datetimePre, datetimePos)
(workedHours, attlogs) = self.logs.getWorkedHours(logs)
sum = 0
for o in overtimeRequests:
for wh in workedHours:
if(wh["start"] is None or wh["end"] is None):
continue
if (wh["start"] <= o["begin"] and wh["end"] >= o["begin"]) or (wh["end"] <= o["end"] and wh["start"] >= o["begin"]):
start = o["begin"] if (o["begin"] - wh["start"]).total_seconds() >= 0 else wh["start"]
end = o["end"] if (o["end"] - wh["end"]).total_seconds() <= 0 else wh["end"]
else:
continue
sum += (end - start).total_seconds()
return sum
def getOvertimeRequests(self, con, status=[], requestors=None, users=None, begin=None, end=None):
"""
obtiene todas los pedidos de horas extras con cierto estado
status es el estado a obtener. en el caso de que no sea pasado entonces se obtienen todas, en su ultimo estado
users es una lista de ids de usuarios para los que se piden los requests, si = None o es vacío entonces retorna todas.
requestors es una lista de ids de usuarios que piden los requests, si = None o es vacío entonces no se toma en cuenta.
"""
statusR = self._getOvertimesInStatus(con,status)
#logging.debug('in status = {} req {}'.format(status,statusR))
if len(statusR) <= 0:
return []
ids = tuple(statusR.keys())
params = (ids, )
sql = "select id,user_id,requestor_id,jbegin,jend,reason from assistance.overtime_requests where id in %s"
if users is not None and len(users) > 0:
users = tuple(users)
params = params + (users, )
sql += " AND user_id IN %s"
if requestors is not None and len(requestors) > 0:
requestors = tuple(requestors)
params = params + (requestors, )
sql += " AND requestor_id in %s"
if begin is not None and end is None:
params = params + (begin, )
sql += " AND jbegin::date = %s"
if begin is not None and end is not None:
params = params + (begin, end )
sql += " AND jbegin >= %s AND jend <= %s"
if end is not None and begin is None:
params = params + (end, )
sql += " AND jend::date = %s"
sql += ";"
cur = con.cursor()
cur.execute(sql, params)
if cur.rowcount <= 0:
return []
requests = []
for j in cur:
jid = j[0]
requests.append(
{
'id':jid,
'user_id':j[1],
'requestor_id':j[2],
'begin':j[3],
'end':j[4],
'reason':j[5],
'status':statusR[jid]
}
)
return requests
return []
"""
obtiene todos los pedidos de horas extras que tiene permisos de manejar, en cierto estado.
group = ROOT|TREE --> ROOT = oficinas directas, TREE = oficinas directas y todas las hijas
"""
def getOvertimeRequestsToManage(self,con,userId,status,group='ROOT'):
tree = False
if group == 'TREE':
tree = True
offices = self.offices.getOfficesByUserRole(con,userId,tree,'horas-extras')
logging.debug('officesByUserRole : {}'.format(offices))
if offices is None or len(offices) <= 0:
return []
officesIds = list(map(lambda o: o['id'], offices))
users = self.offices.getOfficesUsers(con,officesIds)
logging.debug('getOfficesUsers : {}'.format(users))
while userId in users:
users.remove(userId)
if users is None or len(users) <= 0:
return []
overtimes = self.getOvertimeRequests(con,status,users=users)
return overtimes
"""
cambia el estado de un pedido al nuevo estado especificado por status.
retorna los eventos a ser disparados
estados posibles a cambiar : PENDING|APPROVED|REJECTED|CANCELED
"""
def updateOvertimeRequestStatus(self,con,userId,requestId,status):
cur = con.cursor()
cur.execute('insert into assistance.overtime_requests_status (request_id,user_id,status) values (%s,%s,%s)',(requestId,userId,status))
events = []
e = {
'type':'OvertimeStatusChangedEvent',
'data':{
'overtime_id':requestId,
'user_id':userId,
'status':status
}
}
events.append(e)
return events
"""
realiza el pedido de horas extras para ser aprobado
estado inicial del pedido = PENDING, con la fecha actual del servidor.
"""
def requestOvertime(self,con,requestorId,userId,begin,end,reason):
oid = str(uuid.uuid4())
cur = con.cursor()
cur.execute('set timezone to %s',('UTC',))
cur.execute('insert into assistance.overtime_requests (id,requestor_id,user_id,jbegin,jend,reason) values (%s,%s,%s,%s,%s,%s)',(oid,requestorId,userId,begin,end,reason))
events = []
e = {
'type':'OvertimesUpdatedEvent',
'data':{
'overtime_id': oid,
'user_id':userId,
'requestor_id':requestorId
}
}
events.append(e)
events.extend(self.updateOvertimeRequestStatus(con,requestorId,oid,'PENDING'))
return events
|
#checks validity of a date
def main():
right = True
months_with_31days = [1, 3, 5, 7, 8, 10, 12]
months_with_30days = [4, 6, 9, 11]
months_with_28days = [2]
date=input("Enter the date(mm/dd/yy format): ")
mm,dd,yy=date.split('/')
mm=int(mm)
dd=int(dd)
yy=int(yy)
if mm > 12 or mm < 1:
right = False
elif dd > 31 or dd < 1:
right = False
elif mm not in months_with_31days and dd==31:
right = False
elif mm in months_with_28days and dd > 28:
right = False
if right:
print("The date", date, "is valid")
else:
print("The date", date, "is invalid")
main()
|
class CoordValue:
def __get__(self, instance, owner):
return self.__value
def __set__(self, instance, value):
self.__value = value
def __delete__(self, obj):
del self.__value
class Point:
coordX = CoordValue()
coordY = CoordValue()
def __init__(self, x = 0, y = 0):
self.coordX = x
self.coordY = y
# self.__x = x
# self.__y = y
# print("Создали экземпляр")
#
# def __checkValue(x):
# if isinstance(x, int) or isinstance(x, float):
# return True
# return False
# @property
# def coordX(self):
# print("вызов __getCoordX")
# return self.__x
# @coordX.setter
# def coordX(self, x):
# if Point.__checkValue(x):
# print("вызов __setCoordX")
# self.__x = x
# else:
# raise ValueError("Неверный формат данных")
# @coordX.deleter
# def coordX(self):
# print("Удаление свойства")
# del self.__x
# coordX = property(__getCoordX, __setCoordX, __delCoordX)
pt = Point(1, 2)
# print(pt.coordX)
# pt.coordX = 100
# a = pt.coordX
# print(a)
# del pt.coordX
# print(a)
# print(pt)
pt.coordX = 5
print(pt.coordX, pt.coordY)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import pytest
from kudu.schema import INT32
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.kudu_test_suite import KuduTestSuite
KUDU_MASTER_HOSTS = pytest.config.option.kudu_master_hosts
LOG = logging.getLogger(__name__)
class TestKuduOperations(CustomClusterTestSuite, KuduTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args=\
"--use_local_tz_for_unix_timestamp_conversions=true")
def test_local_tz_conversion_ops(self, vector, unique_database):
"""IMPALA-5539: Test Kudu timestamp reads/writes are correct with the
use_local_tz_for_unix_timestamp_conversions flag."""
# These tests provide enough coverage of queries with timestamps.
self.run_test_case('QueryTest/kudu-scan-node', vector, use_db=unique_database)
self.run_test_case('QueryTest/kudu_insert', vector, use_db=unique_database)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args="-kudu_master_hosts=")
def test_kudu_master_hosts(self, cursor, kudu_client):
"""Check behavior when -kudu_master_hosts is not provided to catalogd."""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % (kudu_table.name)
try:
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (table_name,
props))
assert False
except Exception as e:
assert "Table property 'kudu.master_addresses' is required" in str(e)
cursor.execute("""
CREATE EXTERNAL TABLE %s STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses' = '%s',
'kudu.table_name'='%s')
""" % (table_name, KUDU_MASTER_HOSTS, kudu_table.name))
cursor.execute("DROP TABLE %s" % table_name)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args="-kudu_error_buffer_size=1024")
def test_error_buffer_size(self, cursor, unique_database):
"""Check that queries fail if the size of the Kudu client errors they generate is
greater than kudu_error_buffer_size."""
table_name = "%s.test_error_buffer_size" % unique_database
cursor.execute("create table %s (a bigint primary key) stored as kudu" % table_name)
# Insert a large number of a constant value into the table to generate many "Key
# already present" errors. 50 errors should fit inside the 1024 byte limit.
cursor.execute(
"insert into %s select 1 from functional.alltypes limit 50" % table_name)
try:
# 200 errors should overflow the 1024 byte limit.
cursor.execute(
"insert into %s select 1 from functional.alltypes limit 200" % table_name)
assert False, "Expected: 'Error overflow in Kudu session.'"
except Exception as e:
assert "Error overflow in Kudu session." in str(e)
class TestKuduClientTimeout(CustomClusterTestSuite, KuduTestSuite):
"""Kudu tests that set the Kudu client operation timeout to 1ms and expect
specific timeout exceptions. While we expect all exercised operations to take at
least 1ms, it is possible that some may not and thus the test could be flaky. If
this turns out to be the case, specific tests may need to be re-considered or
removed."""
@classmethod
def get_workload(cls):
return 'functional-query'
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args="-kudu_operation_timeout_ms=1")
def test_impalad_timeout(self, vector):
"""Check impalad behavior when -kudu_operation_timeout_ms is too low."""
self.run_test_case('QueryTest/kudu-timeouts-impalad', vector)
|
from _typeshed import Incomplete
def group_betweenness_centrality(
G,
C,
normalized: bool = True,
weight: Incomplete | None = None,
endpoints: bool = False,
): ...
def prominent_group(
G,
k,
weight: Incomplete | None = None,
C: Incomplete | None = None,
endpoints: bool = False,
normalized: bool = True,
greedy: bool = False,
): ...
def group_closeness_centrality(G, S, weight: Incomplete | None = None): ...
def group_degree_centrality(G, S): ...
def group_in_degree_centrality(G, S): ...
def group_out_degree_centrality(G, S): ...
|
"""holds all the independent functions for the secret_messages.py game.
Specifically, when a cipher is selected from the pick_a_cipher function,
it runs the corresponding function from this module.
"""
import os
# Need a way to handle selections based on the chosen cipher
# one function per cipher with a description and specific handling
# define clear screen functions
def clear_screen():
"""Clears terminal/console screen.
Description:
Get a user selection of encryption method (encrypt or decrypt)
and send that method to the pick_a_cipher function
prints the return from pick_a_cipher.
Return:
Nothing.
Keyword Arguments:
*none*
"""
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def affine_selected():
"""Use for when the Affine Cipher is selected.
Description:
Gets all of the necessary information from a user when the Affine
Cipher is selected.
Return:
Tuple with the a and b variables. To be used in the
AffineCipher.encrypt()/decrypt() method.
Keyword Arguments:
*none*
"""
coprimes = [2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14,
16, 17, 18, 21, 22, 23, 24, 26, 27, 28,
29, 31, 32, 33, 34, 36, 37, 39, 41, 42,
43, 44, 46, 47, 48, 49, 51, 52, 53, 54,
56, 58, 59, 61, 62, 63, 64, 66, 67, 68,
69, 71, 72, 73, 74, 77, 78, 79, 81, 82,
83, 84, 86, 87, 88, 89, 91, 92, 93, 94]
clear_screen()
print("""\nYou've selected the Affine Cipher.\n
The affine cipher is a type of monoalphabetic substitution cipher,
wherein each letter in an alphabet is mapped to its numeric equivalent,
encrypted using a simple mathematical function:
E(x) = (ax + b) mod m
where modulus m is the size of the alphabet
and a and b are the key of the cipher,
and converted back to a letter.\n
We need some values for a & b:\n
Please enter a coprime of 95 as the 'a' keyword...\n
If you need a list of coprimes, type 'L'
""")
try:
affine_a = input("a: ")
if str(affine_a.upper()) == 'L':
clear_screen()
print("\nPlease enter a coprime of 95 as the 'a' keyword...\n")
print("{}\n".format(', '.join(map(str, coprimes))))
affine_a = int(input("a: "))
print("""
Now choose your shift value 'b' (e.g. number between 2-100):\n""")
affine_b = int(input("b: "))
else:
affine_a = int(affine_a)
print("""
Now choose your shift value 'b' (e.g. number between 2-100):\n""")
affine_b = int(input("b: "))
except ValueError:
input("You must select a number...\n\nPress any key to try again...")
affine_a, affine_b = affine_selected()
return affine_a, affine_b
def alberti_selected():
"""Use for when the Alberti Cipher is selected.
Description:
Gets all of the necessary information from a user when the Alberti
Cipher is selected.
Return:
Index. To be used in the AlbertiCipher.encrypt()/decrypt() method.
Keyword Arguments:
*none*
"""
clear_screen()
print("""\nYou've selected the Alberti Cipher.\n
The Alberti cipher is a type of polyalphabetic substitution cipher,
with mixed alphabets and variable period, and is made up of two
concentric disks, attached by a common pin, which can rotate one
with respect to the other.
We need an index:\n""")
index = input("Index: ")
clear_screen()
# print("\nPlease enter the second keyword:")
# keyword2 = input("\nKeyword 2: ")
return index # keyword1, keyword2
def atbash_selected():
"""Use for when the Atbash Cipher is selected.
Description:
Simply shows what the Atbash Cipher is without asking for input.
Return:
*none*
Keyword Arguments:
*none*
"""
clear_screen()
input("""\nYou've selected the Atbash Cipher.\n
The Atbash cipher is a mono-alphabetic substitution cipher
that works by substituting the first letter of an alphabet for the last letter,
the second letter for the second to last and so on,
effectively reversing the alphabet\n
Press any key to continue...\n
""")
clear_screen()
def caesar_selected():
"""Use for when the Caesar Cipher is selected.
Description:
Gets the shift value the user wants to use.
Return:
Shift numerical value. To be used in the Caesar.encrypt()/decrypt() method
Keyword Arguments:
*none*
"""
clear_screen()
print("""\nYou've selected the Caesar Cipher.\n
The Caesar cipher is a type of substitution cipher
in which each letter in the plaintext is replaced by a
letter some fixed number of positions (shift) down the alphabet.\n""")
print("What would you like the shift to be?\n")
try:
shift = int(input("Shift: "))
except ValueError:
shift = int(input("You must select a number:\n\n> "))
clear_screen()
return shift
def kw_selected():
"""Use for when the Keyword Cipher is selected.
Description:
Gets the user's keyword to be used in creating the character map.
Return:
Keyword. To be used in the KeywordCipher.encrypt()/decrypt() methods
Keyword Arguments:
*none*
"""
clear_screen()
print("""\nYou've selected the Keyword Cipher.\n
The keyword cipher is a form of monoalphabetic substitution wherein a keyword
is used as the key, and it determines the letter matchings of the cipher
alphabet to the plain alphabet. Repeats of letters in the word are removed,
then the cipher alphabet is generated with the keyword matching to A,B,C etc.
until the keyword is used up, whereupon the rest of the ciphertext letters
are used in alphabetical order, excluding those already used in the key.\n""")
print("What would you like the keyword to be?\n")
keyword = input("Keyword: ")
return keyword
|
#!/usr/binv/env python3
import functools
import os
import csv
import boto3
def ssm_describe_instance_information(client):
instances = []
paginator = boto3.client('ssm').get_paginator('describe_instance_information')
page = paginator.paginate()
for response in page:
for instanceinfo in response['InstanceInformationList']:
if (instanceinfo['InstanceId']):
instances.append(instanceinfo['InstanceId'])
return instances
def main():
client = boto3.client('ssm', 'eu-central-1')
ssm_describe_instance_information(client)
instance = ssm_describe_instance_information(client)
for i in instance:
print(i)
if __name__ == '__main__':
main()
|
import os
import re
import jieba
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import collections
import gensim
from interval import Interval
try:
from langconv import *
except ImportError:
from data.langconv import *
#os.chdir(r'cnn_sentence_classification\data')
def clear_comment(path='./data'):
files = ['best','new','popular']
files = ['popular']
for file in files:
df = pd.read_csv(os.path.join(path,'comment_full_'+file+'.csv'), header=None, names=['username','score','comment','pub_time','votes'], encoding='utf-8')
df['comment'] = df['comment'].map(lambda com: re.sub(r'\s+',' ', str(com)))
#将df中score列所有空值赋值为'null',并删掉
df['score']=df['score'].fillna('NaN')
df=df[~df['score'].isin(['NaN'])]
df.drop(['username','pub_time','votes'], axis=1, inplace=True)
df['score'] =df['score'].astype(int)
# socre设为0-4
df['score'] =df['score'].map(lambda score: score-1)
df.to_csv(os.path.join(path,'comment_' + file+ '.csv'), sep='\t', index=False, header=False)
if os.path.exists(os.path.join(path,'comment.csv')):
os.remove(os.path.join(path,'comment.csv'))
with open(os.path.join(path,'comment.csv'), 'a+', encoding='utf-8') as cfile:
for file in files:
temp = open(os.path.join(path,'comment_' + file + '.csv'),'r', encoding='utf-8').readlines()
cfile.writelines(temp)
def Traditional2Simplified(sentence):
'''
将sentence中的繁体字转为简体字
:param sentence: 待转换的句子
:return: 将句子中繁体字转换为简体字之后的句子
'''
sentence = Converter('zh-hans').convert(sentence)
return sentence
# 将 socre 和 content写入out_path文件
def df_to_csv(socre, content, out_path):
data = {'score':socre, 'content':content}
df_data = pd.DataFrame(data, columns=['score', 'content'])
df_data.to_csv(out_path, sep='\t', index=False, header=False)
return df_data
# 去除csv文件中的重复行
# 将近10W条数据划分:测试集1W,验证集2W,剩余近7W数据归为训练集
# 将score>2的归为一类:正向评价;score<=2的归为一类:负面评价。{4:"力荐",3:"推荐",2:"还行",1:"较差",0:"很差"}
# 返回test,train,val
def unique_divide(file_csv, out_path='./data', is_2c=True):
df = pd.read_csv(file_csv, header=None, names=['score_content'])
data_unique = df.drop_duplicates()
# data_unique.to_csv(file_csv)
split_df = pd.DataFrame(data_unique.score_content.str.split('\t').tolist(), columns=["score", "content"])
# 只保留中文和英文,且繁体字转为简体字,英文小写
split_df['content'] = split_df['content'].map(lambda com: Traditional2Simplified(re.sub(r'[^\u4e00-\u9fa5^A-Z^a-z^\']', ' ', com)).lower().strip())
split_df['content'] = split_df['content'].map(lambda com: re.sub(r'\s+', ' ', com).strip())
split_df.info()
print(split_df['score'].value_counts(normalize=True,ascending=True))
# 删除评论字数小于60的
# split_df = split_df[~(split_df.content.str.len() < 60)]
print('\nafter deleted================\n')
# 读取停用词
stopwords = [line.strip() for line in open('data/stopwords.txt', encoding='utf-8').readlines()]
zoom_5_100 = Interval(5, 50)
# 同时对每条评论使用jieba分词,去掉停用词,去掉词数小于5的行
rows=[i for i in split_df.index if len([j for j in jieba.lcut(str(split_df.iat[i,1]).strip()) if j not in stopwords and j.strip()]) not in zoom_5_100]
split_df=split_df.drop(rows,axis=0)
split_df.info()
print(split_df['score'].value_counts(normalize=True,ascending=True))
# 将score>2的归为一类:正向评价;score<=2的归为一类:负面评价
# 5分类,{4:"力荐",3:"推荐",2:"还行",1:"较差",0:"很差"}
if is_2c:
split_df['score']=split_df['score'].map(lambda s:1 if int(s[0])>2 else 0)
# split_df['score'] = np.where(split_df['score'] > 2, 1, 0)
# 数据预览
# split_df.head()
#将数据分为输入数据和输出数据
score = split_df.iloc[:, 0:1].values
content = split_df.iloc[:, 1].values
#划分训练集和测试集
score_tv, score_test, content_tv, content_test = train_test_split(score, content, test_size=0.1, random_state=20)
#划分训练集和验证集
score_train, score_val, content_train, content_val = train_test_split(score_tv, content_tv, test_size=0.1, random_state=20)
test = df_to_csv([s[0] for s in score_test], np.array(content_test).flatten(), os.path.join(out_path,'test.txt'))
train = df_to_csv([s[0] for s in score_train], np.array(content_train).flatten(), os.path.join(out_path, 'train.txt'))
val = df_to_csv([s[0] for s in score_val], np.array(content_val).flatten(), os.path.join(out_path,'val.txt'))
# print(train_2c[train_2c['score']>0].count())
print('train:',str(train[train['score']>0].size/train.size))
print('val:',str(val[val['score']>0].size/val.size))
print('test:',str(test[test['score']>0].size/test.size))
return train,val,test
# 获取分类类别和对应词典
# {4:"力荐",3:"推荐",2:"还行",1:"较差",0:"很差"}
def class_to_id(is_2c=True):
classes = ['neg', 'pos']
if not is_2c:
classes = ["很差","较差","还行","推荐","力荐"]
class_to_id = dict(zip(classes, range(len(classes))))
return classes,class_to_id
# 构建词汇表
def build_word_dict(train_df, val_df, out_path, stopwords_path, vocab_size=0):
# 读取停用词
stopwords = [line.strip() for line in open(stopwords_path, encoding='utf-8').readlines()]
df = [train_df["content"], val_df["content"]]
max_sen_len = 0
sum_sen_len = 0
words = list()
for contents in df:
for content in contents:
if content.strip():
token = jieba.lcut(content.strip())
token = [i for i in token if i not in stopwords and i.strip()]
# print(token)
sum_sen_len += len(token)
max_sen_len = max(max_sen_len, len(token))
for word in token:
words.append(word)
print('>>> ave_sen_len:'+str(sum_sen_len/(len(df[0])+len(df[0]))))
print('>>> max_sen_len:'+str(max_sen_len))
# 统计词频
word_counter = collections.Counter(words)
if vocab_size!=0 and len(word_counter) > vocab_size-3:
word_counter=word_counter.most_common(vocab_size-3)
else:
word_counter = word_counter.most_common()
word_dict = dict()
word_dict["<pad>"] = 0
word_dict["<unk>"] = 1
word_dict["<eos>"] = 2
for word, _ in word_counter:
word_dict[word] = len(word_dict)
vocab_size = len(word_dict)
print('>>> vocab_size:'+str(vocab_size))
with open(out_path, 'w', encoding='utf-8') as wd:
for w in word_dict:
wd.write(w+'\t')
wd.write(str(word_dict[w]))
wd.write('\n')
return word_dict,max_sen_len,vocab_size
# 加载词汇表
def load_word_dict(word_dict_path):
word_dict_df = pd.read_csv(word_dict_path, sep='\t', header=None, names=['word','id'], encoding='utf-8')
word_dict = dict()
for indexs in word_dict_df.index:
word_dict[word_dict_df.loc[indexs].values[0]] = word_dict_df.loc[indexs].values[1]
#print(word_dict)
return word_dict
# 构建word2vec
def build_word2vec(fname, word_dict, save_to_path=None):
"""
:param fname: 预训练的word2vec.
:param word_dict: 语料文本中包含的词汇集.
:param save_to_path: 保存训练语料库中的词组对应的word2vec到本地
:return: 语料文本中词汇集对应的word2vec向量{id: word2vec}.
"""
n_words = max(word_dict.values()) + 1
model = gensim.models.KeyedVectors.load_word2vec_format(fname, binary=True)
word_vecs = np.array(np.random.uniform(-1., 1., [n_words, model.vector_size]))
for word in word_dict.keys():
try:
word_vecs[word_dict[word]] = model[word]
except KeyError:
pass
if save_to_path:
with open(save_to_path, 'w', encoding='utf-8') as f:
for vec in word_vecs:
vec = [str(w) for w in vec]
f.write(' '.join(vec))
f.write('\n')
return word_vecs
def load_corpus_word2vec(path):
"""加载语料库word2vec词向量,相对wiki词向量相对较小"""
word2vec = []
with open(path, encoding='utf-8') as f:
for line in f.readlines():
sp = [float(w) for w in line.strip().split()]
word2vec.append(sp)
return np.asarray(word2vec)
# 加载语料库:train/val/test
# x为构成一条评论的词所对应的id。 y为one-hot表示pos-[0, 1], neg-[1, 0]
# {4:"力荐"-[0,0,0,0,1],3:"推荐"-[0,0,0,1,0],2:"还行"-[0,0,1,0,0],1:"较差"-[0,1,0,0,0],0:"很差"-[0,0,0,0,0]}
def build_word_dataset(df_data, word_dict, stopwords_path, max_sen_len=870, num_class=2):
# Shuffle dataframe
df = df_data.sample(frac=1)
# 读取停用词
stopwords = [line.strip() for line in open(stopwords_path, encoding='utf-8').readlines()]
x = [[j for j in jieba.lcut(df.iat[i,1].strip()) if j not in stopwords and str(j).strip()] for i in df.index]
x = list(map(lambda d: list(map(lambda w: word_dict.get(w, word_dict["<unk>"]), d)), x))
x = list(map(lambda d: d + [word_dict["<eos>"]], x))
x = list(map(lambda d: d[:max_sen_len], x))
x = list(map(lambda d: d + (max_sen_len - len(d)) * [word_dict["<pad>"]], x))
y = list(map(lambda d: [1 if i==d else 0 for i in range(num_class)], list(df["score"])))
return x, y
def cut_dataset(df_data, stopwords_path):
# 读取停用词
stopwords = [line.strip() for line in open(stopwords_path, encoding='utf-8').readlines()]
# 通过jieba分词
def word_clean(content):
return ' '.join([j for j in jieba.lcut(content.strip()) if j not in stopwords and j.strip()])
df_data['content_cut'] = df_data.content.apply(word_clean)
score = df_data.iloc[:, 0].values
content = df_data.iloc[:, 2].values
return content,score
def build_tfidvec_dataset(train_df, val_df, stopwords_path, max_df=0.95, min_df=2, num_class=2):
train_content, train_score = cut_dataset(train_df, stopwords_path)
val_content, val_score = cut_dataset(val_df, stopwords_path)
# 数据的TF-IDF信息计算
# sublinear_tf=True 时生成一个近似高斯分布的特征,可以提高大概1~2个百分点
vectorizer = TfidfVectorizer(max_df=max_df, min_df=min_df, smooth_idf=True, sublinear_tf=True)
# 对数据训练
train_vec_data = vectorizer.fit_transform(train_content)
# 训练完成之后对验证数据转换
val_vec_data = vectorizer.transform(val_content)
n_dim = len(vectorizer.get_feature_names())
print("关键词个数:"+str(n_dim))
return train_vec_data, train_score, val_vec_data, val_score, vectorizer
def load_tfidvec_dataset(df_data, stopwords_path, vectorizer):
test_content, test_score = cut_dataset(df_data, stopwords_path)
# 训练完成之后对测试数据转换
test_vec_data = vectorizer.transform(test_content)
return test_vec_data, test_score
def batch_index(length, batch_size, is_shuffle=True):
"""
生成批处理样本序列id.
:param length: 样本总数
:param batch_size: 批处理大小
:param is_shuffle: 是否打乱样本顺序
:return:
"""
index = [idx for idx in range(length)]
if is_shuffle:
np.random.shuffle(index)
for i in range(int(np.ceil(length / batch_size))):
yield index[i * batch_size:(i + 1) * batch_size]
if __name__ == "__main__":
# clear_comment('temp')
# train,val,test = unique_divide('./comment.csv', out_path='', is_2c=True)
# word_dict,max_sen_len,vocab_size = build_word_dict(train,val,"temp/word_dict.txt", './stopwords.txt')
# word_vecs = build_word2vec('./wiki_word2vec_50.bin', word_dict, './word_vecs.txt')
# x,y=build_word_dataset(train, word_dict, './stopwords.txt', max_sen_len=max_sen_len+1, num_class=2)
# cla,class2id=class_to_id(is_2c=True)
# print(class2id)
train_df = pd.read_csv('./train.txt', sep='\t', header=None, names=["score", "content"], encoding='utf-8')
val_df = pd.read_csv('./val.txt', sep='\t', header=None, names=["score", "content"], encoding='utf-8')
#word_dict,max_sen_len,vocab_size = build_word_dict(train_df,val_df,"./word_dict.txt", './stopwords.txt')
build_tfidvec_dataset(train_df, './stopwords.txt', max_df=0.95, min_df=2, num_class=2)
|
# BP神经网络Python实现
import numpy as np
from numpy import random
import math
import copy
import sklearn.datasets
from sklearn.preprocessing import scale
import matplotlib.pyplot as plt
# 获取数据并分为训练集与测试集
trainingSet, trainingLabels = sklearn.datasets.make_moons(400, noise=0.20)
plt.scatter(trainingSet[trainingLabels==1][:,0], trainingSet[trainingLabels==1][:,1], s=40, c='r', marker='x',cmap=plt.cm.Spectral)
plt.scatter(trainingSet[trainingLabels==0][:,0], trainingSet[trainingLabels==0][:,1], s=40, c='y', marker='+',cmap=plt.cm.Spectral)
plt.show()
testSet = trainingSet[320:]
testLabels = trainingLabels[320:]
trainingSet = trainingSet[:320]
trainingLabels = trainingLabels[:320]
# 设置网络参数
layer =[2,3,1] # 设置层数和节点数
Lambda = 0.005 # 正则化系数
alpha = 0.2 # 学习速率
num_passes = 10000 # 迭代次数
m = len(trainingSet) # 样本数量
# 建立网络
# 网络采用列表存储每层的网络结构,网络的层数和各层节点数都可以自由设定
b = [] # 偏置元,共layer-1个元素,b[0]代表第一个隐藏层的偏置元(向量形式)
W = []
for i in range(len(layer)-1):
W.append(random.random(size = (layer[i+1],layer[i]))) # W[i]表示网络第i层到第i+1层的转移矩阵(NumPy数组),输入层是第0层
b.append(np.array([0.1]*layer[i+1])) # 偏置元,b[i]的规模是1*第i+1个隐藏层节点数
a = [np.array(0)]*(len(W)+1) # a[0] = x,即输入,a[1]=f(z[0]),a[len(W)+1] = 最终输出
z = [np.array(0)]*len(W) # 注意z[0]表示是网络输入层的输出,即未被激活的第一个隐藏层
W = np.array(W)
def costfunction(predict,labels):
# 不加入正则化项的代价函数
# 输入参数格式为numpy的向量
return sum((predict - labels)**2)
def error_rate(predict,labels):
# 计算错误率,针对二分类问题,分类标签为0或1
# 输入参数格式为numpy的向量
error =0.0
for i in range(len(predict)):
predict[i] = round(predict[i])
if predict[i]!=labels[i]:
error+=1
return error/len(predict)
def sigmoid(z): # 激活函数sigmoid
return 1/(1+np.exp(-z))
def diff_sigmoid(z): # 激活函数sigmoid的导数
return sigmoid(z)*(1-sigmoid(z))
activation_function = sigmoid # 设置激活函数
diff_activation_function = diff_sigmoid # 设置激活函数的导数
# 开始训练BP神经网络
a[0] = np.array(trainingSet).T # 改一列为一个样本,一行代表一个特征
y = np.array(trainingLabels)
for v in range(num_passes):
# 前向传播
for i in range(len(W)):
z[i] = np.dot(W[i],a[i])
for j in range(m):
z[i][:,j]+=b[i] # 加上偏置元
a[i+1] = activation_function(z[i]) # 激活节点
predict = a[-1][0] # a[-1]是输出层的结果,即为预测值
# 反向传播
delta = [np.array(0)]*len(W) # delta[0]是第一个隐藏层的残差,delta[-1]是输出层的残差
# 计算输出层的残差
delta[-1] = -(y-a[-1])*diff_activation_function(z[-1])
# 计算第二层起除输出层外的残差
for i in range(len(delta)-1):
delta[-i-2] = np.dot(W[-i-1].T,delta[-i-1])*diff_activation_function(z[-i-2]) # 这里是倒序遍历
# 设下标-i-2代表第L层,则W[-i-1]是第L层到L+1层的转移矩阵,delta[-i-1]是第L+1层的残差,而z[-i-2]是未激活的第L层
# 计算最终需要的偏导数值
delta_w = [np.array(0)]*len(W)
delta_b = [np.array(0)]*len(W)
for i in range(len(W)):
# 使用矩阵运算简化公式,下面2行代码已将全部样本反向传播得到的偏导数值求和
delta_w[i] = np.dot(delta[i],a[i].T)
delta_b[i] = np.sum(delta[i],axis=1)
# 更新权重参数
for i in range(len(W)):
W[i] -= alpha*(Lambda*W[i]+delta_w[i]/m)
b[i] -= alpha/m*delta_b[i]
print('训练样本的未正则化代代函数值:',costfunction(predict,np.array(trainingLabels)))
print('训练样本错误率:',error_rate(predict,np.array(trainingLabels)))
# 使用测试集测试网络
a[0] = np.array(testSet).T # 改一列为一个样本,一行代表一个特征
# 前向传播
m = len(testSet)
for i in range(len(W)):
z[i] = np.dot(W[i],a[i])
for j in range(m):
z[i][:,j]+=b[i].T[0]
a[i+1] = activation_function(z[i])
predict = a[-1][0]
print('测试样本的未正则化代代函数值:',costfunction(predict,np.array(testLabels)))
print('测试样本错误率:',error_rate(predict,np.array(testLabels)))
|
import PySimpleGUI as sg
from process import process_crawl, validate_start_url
sg.theme('DarkAmber')
# Inside the window
layout = [
[sg.Text('Start URL:', key='start-url'), sg.InputText(tooltip='https://example.com')],
[sg.Text('Output:'), sg.InputText(), sg.FolderBrowse()],
[sg.Button('Start')]
]
# Create the window
window = sg.Window('SEO Spider', layout)
# Event Loop
while True:
event, values = window.read()
start_url = values[0]
output_location = values[1]
if event == 'Start':
if validate_start_url(start_url):
process_crawl(start_url, output_location)
else: # Throws error if http protocol not included
sg.Popup('Please include the http protocol', title='Error')
|
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import CreateModelMixin, ListModelMixin, UpdateModelMixin, RetrieveModelMixin, DestroyModelMixin
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.exceptions import ValidationError, PermissionDenied
from rest_framework.decorators import action
from rest_framework.response import Response
from ...orders.v1.serializers import OrderSerializer
from ..models import Menu, Option
from ..permissions import IsPublicMenuAvailable, OptionBelongsToMenu, BelongsToMe
from .serializers import MenuSerializer, OptionSerializer
class MenuViewSet(CreateModelMixin, ListModelMixin, UpdateModelMixin, RetrieveModelMixin, GenericViewSet):
serializer_class = MenuSerializer
queryset = Menu.objects.all()
permission_classes = [IsAuthenticated, IsAdminUser, BelongsToMe]
def get_queryset(self):
return Menu.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
def update(self, request, *args, **kwargs):
kwargs['partial'] = True
return super().update(request, *args, **kwargs)
@action(detail=True, url_path='orders')
def get_orders(self, request, pk=None):
instance = self.get_object()
orders = Menu.objects.get_orders(pk=instance.id)
serializer = OrderSerializer(orders, **{ 'context': self.get_serializer_context(), 'many': True})
return Response(serializer.data)
class OptionViewSet(CreateModelMixin, UpdateModelMixin, DestroyModelMixin, GenericViewSet):
serializer_class = OptionSerializer
queryset = Option.objects.all()
permission_classes = [IsAuthenticated, IsAdminUser, BelongsToMe, OptionBelongsToMenu]
def perform_create(self, serializer):
serializer.save(menus_pk=self.kwargs.get('menus_pk'))
def perform_update(self, serializer):
serializer.save(menus_pk=self.kwargs.get('menus_pk'))
def perform_destroy(self, instance):
if Menu.objects.is_editable(pk=self.kwargs.get('menus_pk')):
super().perform_destroy(instance)
class PublicMenuViewSet(RetrieveModelMixin, GenericViewSet):
serializer_class = MenuSerializer
queryset = Menu.objects.all()
permission_classes = [IsPublicMenuAvailable]
def permission_denied(self, request, message=None):
"""
Override the permission_denied method to avoid raising an AuthenticationError in a public handler
"""
raise PermissionDenied(detail=message)
|
import sys
import numpy as np
from Modules.helper import EventTimer
def MAP(T, R):
def AP(T, R):
precisions = []
cnt = 0
for i, d in enumerate(R):
if d in T:
cnt += 1
precisions.append(cnt / (i + 1))
return sum(precisions) / len(T)
APs = [AP(a, b) for a, b in zip(T, R)]
return np.mean(APs)
def main():
def getRetrievedDocs(path):
with open(path) as f:
lines = f.readlines()[1:]
return [line.split(',')[1].split(' ') for line in lines]
answerFile, predictionFile = sys.argv[1], sys.argv[2]
with EventTimer('Calculating MAP') as f:
groundTruth = getRetrievedDocs(answerFile)
rankedList = getRetrievedDocs(predictionFile)
print('MAP:', MAP(groundTruth, rankedList))
if __name__ == '__main__':
main()
|
import copy
Class Solution:
def combinationSum(self, candidates, target):
self.line=[]
self.res=[]
return res;
def helper(self, data, target):
if 0== target:
return
for ie in data:
temp = target - ie
if temp > 0:
self.line.append(ie)
child = self.helper(data,temp)
self.line.pop(ie)
elif temp == 0:
self.line.append(buff)
buff=copy.deepcopy(self.line)
self.res.append(self.line)
self.line.pop()
self.
return
else:
return;
|
from workprogramsapp.models import WorkProgram, EducationalProgram, AcademicPlan
from rest_framework import serializers
from dataprocessing.serializers import userProfileSerializer
from dataprocessing.serializers import userProfileSerializer
from workprogramsapp.models import WorkProgram, WorkProgramInFieldOfStudy
from workprogramsapp.workprogram_additions.models import StructuralUnit
from dataprocessing.serializers import userProfileSerializer
from workprogramsapp.models import WorkProgram, WorkProgramInFieldOfStudy, AcademicPlan,ImplementationAcademicPlan
from workprogramsapp.workprogram_additions.models import StructuralUnit
from workprogramsapp.workprogram_additions.serializers import ShortStructuralUnitSerializer
class ImplementationAcademicPlanForStatisticSerializer(serializers.ModelSerializer):
class Meta:
model = ImplementationAcademicPlan
fields = ['id', 'year']
class WorkProgramDescriptionOnlySerializer(serializers.ModelSerializer):
class Meta:
model = WorkProgram
fields = ['id', 'title', 'description']
class AcademicPlansDescriptionWpSerializer(serializers.ModelSerializer):
wp_in_academic_plan = serializers.SerializerMethodField()
academic_plan_in_field_of_study = ImplementationAcademicPlanForStatisticSerializer(many=True)
def get_wp_in_academic_plan(self, instance):
return WorkProgramSerializerForStatistic(
instance=WorkProgram.objects.filter(
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan=instance).distinct(),
many=True).data
class Meta:
model = AcademicPlan
fields = ['id', 'academic_plan_in_field_of_study', 'wp_in_academic_plan', ]
class ShortAcademicPlan(serializers.ModelSerializer):
"""
Cериализатор УП
"""
class Meta:
model = AcademicPlan
fields = ["id", "educational_profile"]
class ShortStructuralUnitSerializer(serializers.ModelSerializer):
"""
Cериализатор подразделения разработчика РПД
"""
class Meta:
model = StructuralUnit
fields = "__all__"
class WorkProgramSerializerForStatistic(serializers.ModelSerializer):
editors = userProfileSerializer(many=True)
class Meta:
model = WorkProgram
fields = ['id', 'title', 'discipline_code', "editors"]
class WorkProgramSerializerForStatisticExtended(serializers.ModelSerializer):
editors = userProfileSerializer(many=True)
academic_plans = serializers.SerializerMethodField()
structural_unit = ShortStructuralUnitSerializer(many=False)
def get_academic_plans(self, instance):
return AcademicPlansStatisticSerializer(
instance=AcademicPlan.objects.filter(
discipline_blocks_in_academic_plan__modules_in_discipline_block__change_blocks_of_work_programs_in_modules__work_program=instance.id),
many=True).data
class Meta:
model = WorkProgram
fields = ['id', 'title', "structural_unit", 'discipline_code', "editors", "academic_plans"]
class Meta:
model = WorkProgram
fields = ['id', 'title', 'discipline_code', "editors", "academic_plans"]
class WorkProgramInFieldOfStudySerializerForStatistic(serializers.ModelSerializer):
work_program = WorkProgramSerializerForStatistic(many=False)
class Meta:
model = WorkProgramInFieldOfStudy
fields = "__all__"
class StructuralUnitWithWpSerializer(serializers.ModelSerializer):
workprogram_in_structural_unit = WorkProgramSerializerForStatistic(many=True)
class Meta:
model = StructuralUnit
fields = ["id", "title", "workprogram_in_structural_unit"]
class SuperShortWorkProgramSerializer(serializers.ModelSerializer):
class Meta:
model = WorkProgram
fields = ['id', 'title', 'discipline_code']
class AcademicPlansStatisticSerializer(serializers.ModelSerializer):
class Meta:
model = AcademicPlan
fields = ['id', 'educational_profile', 'number', 'approval_date', 'year', 'education_form', 'qualification',
'author']
class RecordWorkProgramSerializer(serializers.ModelSerializer):
# editors = userProfileSerializer(many=True)
class Meta:
model = WorkProgram
fields = ['id', 'title', 'structural_unit', 'editors', 'language', 'discipline_sections',
'work_program_in_change_block']
class RecordAcademicPlanSerializer(serializers.ModelSerializer):
print()
class Meta:
model = AcademicPlan
fields = ['number']
|
# insult simulator
import random as r
insult = r.randint(100)
if 0 <= insult < 50:
print('your fingers are fatter than your toes')
if 50 < insult <= 75:
print('uganda knuckles are more patriotic to mexico than you are to russia you communist')
if 75 < insult <= 90:
print('belgium makes decent waffles. They are good waffles. They are popular waffles')
if 90 < insult <= 100:
print('Say goodbye to your ankles, and your knuckles')
if insult == 1:
for i in range(1000):
print('hey guys its your boy eat that pussy 445 and about 30 to 45 minutes ago I beat the fuck out of my dick so god damn hard that I cant even feel my left leg')
|
import os
import re
from math import floor
class WorkOut:
def __init__(self, gender):
# temporary variables for workout names
TAKING_A_WALK = "taking a walk"
GOING_FOR_A_RUN = "going for a run"
RIDING_A_BIKE = "riding a bike"
self.DEFAULT_WORKOUTS = [TAKING_A_WALK, GOING_FOR_A_RUN, RIDING_A_BIKE]
# check if file exists
self.filePath = "./user/workout.txt"
if os.path.isfile(self.filePath):
f = open(self.filePath, "r")
s = f.readlines()
lines = [line[:-1] for line in s]
# member constants for calorie ranges
self.BELOW = "below"
self.ABOVE = "above"
# member variables
self.RANGE = 10 # calorie range
self.calorieRanges = [self.BELOW, "55", "65", "75", self.ABOVE]
self.coefficient = [0.7, 0.8, 0.9, 1, 1.1]
self.countCalories = 5
self.limit = 10 # maximal number of workouts that can be in workOutList
self.workOutList = []
for line in lines:
words = line.split('\t')
name = words.pop(0)
self.workOutList.append([name, {self.calorieRanges[calorieIndex]: float(words[calorieIndex]) for calorieIndex in range(self.countCalories)}])
self.countWorkouts = len(self.workOutList)
# self.workOutList.append([words[0], {self.calorieRanges[calorieIndex]: int(words[calorieIndex]) for calorieIndex in range(self.countCalories)}])
# self.workOutList = [[words[0], {self.calorrieRanges[calorieIndex]: int(words[calorieIndex])
# for calorieIndex in range(self.countCalories)}] for words in line.split('\t') for line in lines]
return
# temporary variables for calorie consumption
if gender == "Male":
caloriesWalk = [3.4, 4.4, 5.4, 6.3, 7.2]
caloriesRun = [11.4, 12.4, 14.4, 16.5, 19.6]
caloriesBike = [4.4, 5.4, 6.1, 6.9, 7.6]
elif gender == "Female":
caloriesWalk = [2.4, 3.4, 4.4, 5.3, 6.2]
caloriesRun = [10.4, 11.4, 13.4, 15.5, 18.6]
caloriesBike = [3.4, 4.4, 5.1, 5.9, 6.6]
defaultCalories = [caloriesWalk, caloriesRun, caloriesBike]
# member constants for calorie ranges
self.BELOW = "below"
self.ABOVE = "above"
# member variables
self.RANGE = 10 # calorie range
self.calorieRanges = [self.BELOW, "55", "65", "75", self.ABOVE]
self.coefficient = [0.7, 0.8, 0.9, 1, 1.1]
self.countWorkouts = 3
self.countCalories = 5
self.limit = 10 # maximal number of workouts that can be in workOutList
self.workOutList = [[self.DEFAULT_WORKOUTS[workOutIndex], {self.calorieRanges[calorieIndex]: defaultCalories[workOutIndex][calorieIndex] for calorieIndex in range(self.countCalories)}] for workOutIndex in range(self.countWorkouts)]
# example of workOutList
# [[name1, {below: consumption1, 55: consumption2, 65: consumption3, 75: consumption3, above: consumption3}],
# [name2, {below: consumption1, 55: consumption2, 65: consumption3, 75: consumption3, above: consumption3}], ...]
self.rewrite()
def view(self):
OPTION1 = "1. View Exercise"
OPTION2 = "2. Add Exercise"
OPTION3 = "3. Back"
options = [OPTION1, OPTION2, OPTION3]
SELECT_MENU = "select menu: "
while True:
print("<View and Modify list of exercise>")
self.viewWorkOutList()
print()
for option in options:
print(option)
sel = input(SELECT_MENU)
os.system('cls')
p = re.search(r"^(1|2|3)$", sel)
if not p:
print("Invalid input. please try agian.")
input()
os.system('cls')
continue
break
return sel
def viewWorkOutList(self):
for index, workOut in enumerate(self.workOutList):
index_on_display = index
workOutName = workOut[0]
print(f"{index_on_display}. {workOutName}")
# Contains invalid characters!
# The number does not exist in the list!
def getWorkOutSelection(self, selStr="Input number of exercise to view: ", title = None):
index = 0
while (True):
if title:
print(title)
self.viewWorkOutList()
string = input(selStr)
length = str(len(string))
p = re.search(r"^[0-9]{1," + str(length) + r"}$", string) and (not re.search(r"^0.", string))
if not p:
print("Contains invalid characters!")
input()
os.system('cls')
continue
index = int(string)
if not (0 <= index < self.countWorkouts):
print("The number does not exist in the list!")
input()
os.system('cls')
continue
os.system('cls')
break
return index
def viewWorkOut(self, index):
selectedWorkOut = self.workOutList[index]
workOutName = selectedWorkOut[0]
while True:
print(f"Name of Exercise: {workOutName}")
print()
workOutCalories = selectedWorkOut[1]
print("Calorie consumption by section: ")
for calorieRange in self.calorieRanges:
calorie = workOutCalories[calorieRange]
if calorieRange == self.BELOW:
print(f"~{calorieRange}: {calorie}kcal")
elif calorieRange == self.ABOVE:
print(f"{calorieRange}~: {calorie}kcal")
else:
print(f"{calorieRange}~{int(calorieRange) + self.RANGE}: {calorie}kcal")
workOutName = self.workOutList[index][0]
# DEFAULT_WORKOUTS can't be edited
if workOutName in self.DEFAULT_WORKOUTS:
OPTION = "1. back"
print(OPTION)
SELECT_MENU = "select menu: "
sel = input(SELECT_MENU)
os.system('cls')
if sel != '1':
os.system('cls')
print("Invalid input. please try again.")
input()
os.system('cls')
continue
return ['3', '']
OPTION1 = "1. edit"
OPTION2 = "2. delete"
OPTION3 = "3. back"
options = [OPTION1, OPTION2, OPTION3]
countOptions = len(options)
for option in options:
print(option)
SELECT_MENU = "select menu"
sel = input("select menu: ")
os.system('cls')
if len(sel) >= 2 or not ('1' <= sel <= str(countOptions)):
os.system('cls')
print("Invalid input. please try again.")
input()
sel = input(SELECT_MENU)
os.system('cls')
return [sel, index]
def addWorkOut(self):
if self.countWorkouts >= 10:
print("Max count of exercise limit is 10!\n You cannot over it!")
input()
return
name = ""
names = [workOut[0] for workOut in self.workOutList]
while True:
name = input("Input name of exercise: ")
p = re.search(r'^[\w ]{1,20}$', name)
if len(name) > 20:
print("Length of name must be 1 to 20.")
input()
os.system('cls')
elif not p:
print("Wrong input! Please Enter Again!")
input()
os.system('cls')
elif name in names:
print("Name already exists.")
input()
os.system('cls')
else:
os.system('cls')
break
consumption = ""
while True:
consumption = input("(65~75kg standard) Input calorie consumption per minute: ")
p = re.search(r'^([1-9]|[1-9][0-9]|[1-4][0-9][0-9]|500)$', consumption)
if not p:
os.system('cls')
print("Please enter digit between 1~500!")
input()
os.system('cls')
continue
os.system('cls')
break
self.workOutList.append([name, {self.calorieRanges[indexCalories]: self.rountUpWithinTwo(self.coefficient[indexCalories] * int(consumption)) for indexCalories in range(self.countCalories)}])
self.countWorkouts += 1
self.rewrite()
print("Exercise added.")
input()
os.system('cls')
def editWorkOut(self, index):
workOutName = self.workOutList[index][0]
if workOutName in self.DEFAULT_WORKOUTS:
print("this workout can't be deleted.")
input()
os.system('cls')
return
while True:
OPTION1 = "1. workout name"
OPTION2 = "2. calorie consumption"
options = [OPTION1, OPTION2]
for option in options:
print(option)
sel = input("please select an item to modify: ")
p = re.search(r'^[1|2]$', sel)
if not p:
os.system('cls')
print("invalid input. please try again.")
input()
os.system('cls')
continue
os.system('cls')
break
if sel == '1':
names = [workOut[0] for workOut in self.workOutList]
name = ""
while True:
name = input("Input name of exercise: ")
p = re.search(r'^[\w ]{1,20}$', name)
if len(name) > 20:
os.system('cls')
print("Length of name must be 1 to 20.")
input()
os.system('cls')
elif not p:
os.system('cls')
print("Wrong input! Please Enter Again!")
input()
os.system('cls')
elif name == workOutName:
os.system('cls')
print("Same as existing name.")
input()
os.system('cls')
elif name in names:
os.system('cls')
print("Name already exists.")
input()
os.system('cls')
else:
break
self.workOutList[index][0] = name
elif sel == '2':
consumption = ""
while True:
consumption = input("(65~75kg standard) Input calorie consumption per minute: ")
p = re.search(r'^([1-9]|[1-9][0-9]|[1-4][0-9][0-9]|500)$', consumption)
if not p:
os.system('cls')
print("Please enter digit between 1~500!")
input()
os.system('cls')
else:
for indexCalories in range(self.countCalories):
key = self.calorieRanges[indexCalories]
self.workOutList[index][1][key] = self.rountUpWithinTwo(self.coefficient[indexCalories] * int(consumption))
break
self.rewrite()
os.system('cls')
print("Successfully modified.")
input()
os.system('cls')
def deleteWorkOut(self, index):
self.workOutList.pop(index)
self.countWorkouts -= 1
self.rewrite()
os.system('cls')
print("finished deleting.")
input()
os.system('cls')
def rewrite(self):
if os.path.isfile(self.filePath):
os.remove(self.filePath)
f = open(self.filePath, "w")
for workOutName, calorieConsumption in self.workOutList:
consumptions = [str(value) for value in calorieConsumption.values()]
f.write('\t'.join([workOutName] + consumptions) + '\n')
def rountUpWithinTwo(self, num):
return floor(num * 100) / 100
|
print ("Hello Poland again :)")
|
in_order = ['T', 'b', 'H', 'V', 'h', '3', 'o', 'g', 'P', 'W', 'F', 'L', 'u', 'A',
'f', 'G', 'r', 'm', '1', 'x', 'J', '7', 'w', 'e', '0', 'i', 'Q', 'Y',
'n', 'Z', '8', 'K', 'v', 'q', 'k', '9', 'y', '5', 'C', 'N', 'B', 'D',
'2', '4', 'U', 'l', 'c', 'p', 'I', 'E', 'M', 'a', 'j', '6', 'S', 'R',
'O', 'X', 's', 'd', 'z', 't']
post_order = ['T', 'V', 'H', 'o', '3', 'h', 'P', 'g', 'b', 'F', 'f', 'A', 'u', 'm',
'r', '7', 'J', 'x', 'e', 'w', '1', 'Y', 'Q', 'i', '0', 'Z', 'n', 'G',
'L', 'K', 'y', '9', 'k', 'q', 'v', 'N', 'D', 'B', 'C', '5', '4', 'c',
'l', 'U', '2', '8', 'E', 'I', 'R', 'S', '6', 'j', 'd', 's', 'X', 'O',
'a', 'M', 'p', 'W', 't', 'z']
#in_order = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
#post_order = ['A', 'C', 'E', 'D', 'B', 'H', 'I', 'G', 'F']
class Node:
def __init__(self, value):
self.left = None
self.right = None
self.data = value
'''
# logical error
def make_tree(node, left, right):
print(left, right)
data = post_order[right]
node = Node(data)
if left == right:
return
rooti = in_order.index(data)
make_tree(node.left, left, rooti - 1)
make_tree(node.right, rooti, right -1)
'''
'''
# can't change argument node
def make_tree(node, in_order, post_order):
print(in_order, post_order)
data = post_order[-1]
node = Node(data)
if len(post_order) > 1:
rooti = in_order.index(data)
if rooti != 0:
make_tree(node.left, in_order[:rooti], post_order[:rooti])
if rooti != len(in_order) - 1:
make_tree(node.right, in_order[rooti + 1:], post_order[rooti:-1])
'''
def make_tree(in_order, post_order):
if not in_order:
return
root = Node(post_order[-1])
rootPos = in_order.index(post_order[-1])
root.left = make_tree(in_order[ : rootPos], post_order[ : rootPos])
root.right = make_tree(in_order[rootPos + 1 : ], post_order[rootPos : -1])
return root
def show_tree(root):
if root is None:
return
show_tree(root.left)
print(root.data, end = ' ')
show_tree(root.right)
def find_deepest_path(tree):
#show_tree(tree)
#print()
if not tree:
return
path = [tree.data]
left_path = find_deepest_path(tree.left)
right_path = find_deepest_path(tree.right)
if (not left_path) and (right_path):
path.extend(right_path)
elif (left_path) and (not right_path):
path.extend(left_path)
elif (left_path) and (right_path):
if len(left_path) > len(right_path):
path.extend(left_path)
else:
path.extend(right_path)
return path
#tree = Node(post_order[-1])
#make_tree(tree, 0, len(post_order) -1)
#make_tree(tree, in_order, post_order)
tree = make_tree(in_order, post_order)
show_tree(tree)
deepest_path = find_deepest_path(tree)
print("".join(deepest_path)) # zWp8LGn01wxJ7
'''
echo "U2FsdGVkX1+gxunKbemS2193vhGGQ1Y8pc5gPegMAcg=" | openssl enc -aes-128-cbc -a -d -pass pass:"zWp8LGn01wxJ7"
nqueens
'''
|
from common.run_method import RunMethod
import allure
@allure.step("小程序/商品/课程套餐详情")
def applet_package_course_detail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/商品/课程套餐详情"
url = f"/service-gos/applet/package/course/detail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/商品/课程单品详情")
def applet_goods_detail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/商品/课程单品详情"
url = f"/service-gos/applet/goods/detail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/首页/校区详情")
def applet_schoolArea_detail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/首页/校区详情"
url = f"/service-gos/applet/schoolArea/detail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/首页/附近校区列表")
def applet_schoolArea_nearList_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/首页/附近校区列表"
url = f"/service-gos/applet/schoolArea/nearList"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/首页/优惠券可购买商品列表")
def applet_goods_forCoupon_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/首页/优惠券可购买商品列表"
url = f"/service-gos/applet/goods/forCoupon"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/首页/banner详情")
def applet_banner_detail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/首页/banner详情"
url = f"/service-gos/applet/banner/detail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/首页/关于极客")
def applet_company_column_list_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/首页/关于极客"
url = f"/service-gos/applet/company/column/list"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/小程序码生成")
def applet_qr_getQr_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/小程序码生成"
url = f"/service-gos/applet/qr/getQr"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/入学诊断/学生可参加诊断列表")
def applet_diagnosis_usable_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/入学诊断/学生可参加诊断列表"
url = f"/service-gos/applet/diagnosis/usable"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/入学诊断/获取试卷")
def applet_diagnosis_exam_paper_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/入学诊断/获取试卷"
url = f"/service-gos/applet/diagnosis/exam/paper"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/入学诊断/提交答案")
def applet_diagnosis_submit_exam_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/入学诊断/提交答案"
url = f"/service-gos/applet/diagnosis/submit/exam"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/入学诊断/诊断报告详情")
def applet_diagnosis_report_detail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/入学诊断/诊断报告详情"
url = f"/service-gos/applet/diagnosis/report/detail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/入学诊断/试题讲解")
def applet_diagnosis_question_analysis_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/入学诊断/试题讲解"
url = f"/service-gos/applet/diagnosis/question/analysis"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/入学诊断/查询学生的诊断报告")
def applet_diagnosis_report_list_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/入学诊断/查询学生的诊断报告"
url = f"/service-gos/applet/diagnosis/report/list"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/入学诊断/诊断的成绩能报的班级")
def applet_diagnosis_able_class_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/入学诊断/诊断的成绩能报的班级"
url = f"/service-gos/applet/diagnosis/able/class"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/入学诊断/是否应该去诊断")
def applet_diagnosis_should_diagnosis_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/入学诊断/是否应该去诊断"
url = f"/service-gos/applet/diagnosis/should/diagnosis"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/入学诊断/学生是否可参加该诊断")
def applet_diagnosis_can_join_diagnosis_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/入学诊断/学生是否可参加该诊断"
url = f"/service-gos/applet/diagnosis/can/join/diagnosis"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
|
import random
Player_Hand = []
Shuffled_Deck = []
Deck = []
Suits = [" Of Spades", " Of Hearts", " Of Diamonds", " Of Clubs"]
Ranks = ["Ace", "2", "3", "4", "5", "6", "7", "8", "9", "10", "Jack", "Queen", "King"]
Random_Card1 = random.choice(Ranks)
Random_Card2 = random.choice(Suits)
print("You Drew A " + Random_Card1 + Random_Card2)
for eot in Suits:
newsuit = eot
for ctj in Ranks:
newrank = ctj
newcard = "The {} of {}".format(ctj, eot)
Deck.append(newcard)
print(Deck)
for d in Deck:
print(d)
|
from django.contrib import admin
from django.db.models.loading import get_models
for m in get_models():
exec "from %s import %s" % (m.__module__, m.__name__)
class ChoicesInline(admin.TabularInline):
model = QuestionChoice
extra = 0
class VarsInline(admin.TabularInline):
model = QuestionVariable
extra = 0
class QuestionChoiceAdmin(admin.ModelAdmin):
add_form_template = 'question/admin/change_form.html'
class QuestionAdmin(admin.ModelAdmin):
add_form_template = 'question/admin/change_form.html'
change_form_template = 'question/admin/change_form.html'
inlines = [VarsInline, ChoicesInline]
class ExposInline(admin.StackedInline):
model = Exposition
extra = 3
class CategoryAdmin(admin.ModelAdmin):
inlines = [ExposInline]
admin.site.register(Category, CategoryAdmin)
admin.site.register(Exposition)
admin.site.register(Submission)
admin.site.register(Vote)
admin.site.register(VoteCategory)
admin.site.register(Question, QuestionAdmin)
admin.site.register(QuestionChoice, QuestionChoiceAdmin)
|
from machine import I2C, Pin
import m5stack
import utime
from mpu6886 import MPU6886
from maze import *
import maze_reader as reader
from maze_renderer import MazeRenderer
print('Maze game started')
s = """
+-+-+-+-+-+-+-+-+
|. . .|o|. . . .|
+ +-+-+ +-+-+-+ +
|.|. . .|. .|. .|
+ + +-+-+ + + +-+
|.|.|.|. .|.|. .]
+ + + + + + +-+ +
|. .|. .|.|. . .|
+ +-+ +-+ +-+ + +
|. .|. .|.|. .|.|
+-+ +-+ + +-+-+-+
|. .|. .|. . . .|
+ +-+-+-+-+-+ + +
|. . .|. . .|.|.|
+-+-+ + +-+ +-+ +
|. . . .|. . . .|
+-+-+-+-+-+-+-+-+
"""
maze = reader.maze_from_string(s)
i2c = I2C(scl=Pin(22), sda=Pin(21))
sensor = MPU6886(i2c)
renderer = MazeRenderer(maze)
renderer.draw_maze()
start = utime.ticks_ms()
while True:
acc = sensor.acceleration
utime.sleep_ms(10)
renderer.erase_ball()
end = utime.ticks_ms()
elapsed = utime.ticks_diff(end, start)
start = end
maze.accelerate(acc, elapsed)
renderer.draw_ball()
if maze.is_ball_out:
renderer.print_game_over()
print('Game over')
break
print('Maze game finished')
|
#!/usr/bin/env python
import sys
for line in sys.stdin:
sys.stdout.write( line )
|
# Day 6: Custom Customs
# <ryc> 2021
def inputdata( ):
stream = open('day_06_2020.input')
data = [ ]
record = [ ]
for line in stream:
if len(line) == 1:
data.append(record)
record = [ ]
else:
record.append(line[ : -1 ])
data.append(record)
stream.close()
return data
def processing( groups ):
count_union = 0
count_intersection = 0
universe = { chr(i) for i in range(ord('a'),ord('z')+1) }
for group in groups:
union = set()
intersection = universe.copy()
for person in group:
answer = set(person)
union |= answer
intersection &= answer
count_union += len(union)
count_intersection += len(intersection)
return ( count_union, count_intersection )
if __name__ == '__main__':
print('\nDay 6: Custom Customs')
groups = inputdata()
( count_union, count_intersection ) = processing(groups)
print('\nCount union=', count_union)
print('\nCount intersection',count_intersection)
|
VOWELS = {'a', 'A', 'e', 'E', 'i', 'I', 'o', 'O', 'u', 'U'}
def vowel_2_index(string):
return ''.join(str(i) if a in VOWELS else a
for i, a in enumerate(string, start=1))
|
class node:
def __init__(self,val=None, next=None):
self.val = val
self.next = next
class List:
def createList(self):
nhead = node(0)
print("nhead.next", nhead.next)
temp = nhead
temp.next = node(2)
print("nhead.next", nhead.next)
temp= temp.next
temp.next = node(4)
temp= temp.next
temp.next = node(7)
temp= temp.next
temp.next = node(9)
temp= temp.next
temp.next = node(11)
temp= temp.next
return nhead
def createList2(self):
nhead = node(1)
print("nhead.next", nhead.next)
temp = nhead
temp.next = node(3)
print("nhead.next", nhead.next)
temp= temp.next
temp.next = node(5)
temp= temp.next
temp.next = node(10)
temp= temp.next
temp.next = node(12)
temp= temp.next
temp.next = node(13)
temp= temp.next
return nhead
def printList(self,nHead):
temp = nHead
while temp != None:
print(temp.val, end=" ")
temp = temp.next
def mergeTwoList(self,nHead1, nHead2):
newHead1 = node(0); newHead1.next = nHead1; temp1 = nHead1;
newHead2 = node(0); newHead2.next = nHead2; temp2 = nHead2;
if temp1 == None:
return nHead2;
if temp2 ==None:
return nHead1;
if temp1.val < temp2.val:
res = temp1;
else:
res = temp2;
while temp1.next != None:
if temp2.next != None:
if temp1.val > temp2.val:
temp2
def bubblesortNode(nHead):
if nHead == None:
return nHead;
nTempNode = nHead
nNum = 0
while nTempNode.next != None:
nTempNode = nTempNode.next
nNum+=1
nTail = nTempNode
endNode = nTail
nCur = nHead
nFlag = False
while nFlag == False:
nFlag = True
nCur = nHead
while nCur.next != None:
if nCur.val > nCur.next.val:
nCur.val, nCur.next.val = nCur.next.val, nCur.val
nFlag = False
nCur = nCur.next
return nHead
#def quicksortNode(nHead):
myList = List()
nhead = myList.createList()
temp = nhead
myList.printList(nhead)
print("nhead2.next", nhead.next)
ntail= temp
#print("ntail ",ntail.val)
temp = nhead.next
temppre = nhead
temppre.next = None
#temppre.next = None
print("nhead.val", nhead.val)
tempnext = temp.next
while temp != None:
#print(tempnext.val)
temp.next = temppre
temppre = temp
temp = tempnext
if None != tempnext:
tempnext = tempnext.next
nhead = temppre
temp = nhead
print("converse list")
curNode = nhead
while curNode != None:
if curNode.val == 2:
tempNode = curNode.next;
curNode.next = node(1)
curNode.next.next = tempNode
break
curNode = curNode.next
temp = nhead
print("inser node is")
myList.printList(nhead)
bubblesortNode(nhead)
temp = nhead
print("After sort")
myList.printList(nhead)
class Solution2(object):
'''
题意:合并两个有序链表
'''
def mergeTwoLists(self, l1, l2):
dummy = node(999)
tmp = dummy #PreviousNode
while l1 != None and l2 != None:
print("tmp.val is ",tmp.val)
if l1.val < l2.val:
tmp.next = l1
l1 = l1.next
else:
tmp.next = l2
l2 = l2.next
tmp = tmp.next
if l1 != None:
tmp.next = l1
else:
tmp.next = l2
return dummy.next
mysolut2 = Solution2()
list1 = myList.createList()
list2 = myList.createList2()
myList.printList(list1)
print("")
myList.printList(list2)
print("")
res = mysolut2.mergeTwoLists(list1,list2)
print("mergeTwoList-------------------------------")
myList.printList(res)
|
from django import template
from django.contrib.auth.models import Group
register = template.Library()
@register.filter(name="add_class")
def add_class(field, class_name):
return field.as_widget(attrs={
"class": " ".join((field.css_classes(), class_name))
})
@register.filter(name='has_group')
def has_group(user, group_name):
group = Group.objects.get(name=group_name)
return True if group in user.groups.all() else False
@register.filter(name="reduce_string")
def reduce_string(string, length):
return string if len(string) < int(length) else string[:int(length)]+"..."
|
#!/usr/bin/env python
import os
import sys
import time
import random
import requests
import argparse
parser = argparse.ArgumentParser(description="Kepps a webapp up.")
parser.add_argument("--pidfile", action="store",
default="/var/run/webapp-up-keeper.pid", help="where to save the pidfile.")
parser.add_argument("--logfile", action="store", help="set the log file",
default="/var/log/webapp-up-keeper.log")
__debug = False
if os.environ.get("DEBUG") == "TRUE" :
__debug = True
print "We will debug..."
## If not debugging, fork to background.
pid = None
if __debug :
pid = 0
else :
pid = os.fork()
## let's just prevent bad shit from happenind
if pid == -1 :
print "COULD NOT FORK TO BACKGROUND, THIS IS NOT SUPPOSED TO HAPPEN."
sys.exit(1)
# program is forked, we can exit.
if pid != 0 :
sys.exit(0)
## We now write the pidfile...
args = parser.parse_args()
try :
with open(args.pidfile, "w") as pidfile :
pidfile.write("%d\n" % os.getpid())
except :
print "Could not open pidfile %s" % args.pidfile
## okay we're daemonized, let's behave properly...
if not __debug :
os.chdir("/")
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
sys.stdin = open("/dev/null", "r")
sys.stdout = open(args.logfile, "a+")
sys.stderr = sys.stdout
## Okay now we're a decend deamon, let's do our job...
target = os.environ.get("WEB_APP")
if not target :
print "$WEB_APP environment variable not set, exiting..." ;
sys.exit(0)
response = None
while True :
try :
if __debug :
print "sleeping..."
time.sleep(30 + random.randrange(0, 30))
response = requests.get(target) ;
except :
print "Unable to perform the request... (%s)" % time.ctime()
continue
if __debug :
print "%s: %s (%s)" % (time.ctime(), response.status_code, response.reason)
if response.status_code != requests.codes.ok :
print "HTTP server at %s reponded with status code %d (%s)" % (target, response.status_code, response.reason)
|
import numpy as np
from sklearn.manifold import TSNE
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
''''''
from PIL import Image
import numpy as np
import matplotlib.image
img=np.array(Image.open('/home/jiawei/Pictures/lookup-table.png'))
for i in range(512):
for j in range(512):
r=(i%64)*4
g=(j%64)*4
b=(i//64)*32+(j//64)*4
img[i,j]=(r,g,b)
matplotlib.image.imsave('/home/jiawei/Pictures/oricard.png',img)
from PIL import Image
import numpy as np
import matplotlib.image
img=np.array(Image.open('/home/jiawei/Pictures/timg.jpeg'))
style=np.array(Image.open('/home/jiawei/Pictures/pink.jpg'))
rows,cols,dims=img.shape
for i in range(rows):
for j in range(cols):
r, g, b=img[i,j]
m=b//4//8*64+r//4
n=b//4%8*64+g//4
img[i,j]=style[m,n]
matplotlib.image.imsave('/home/jiawei/Pictures/output.png',img)
|
from dateutil.parser import parse
from fileinput import input
from collections import defaultdict
import random
import re
class Guard:
def __init__(self, gid):
self.gid = gid
self.events = []
def addEvent(self, event):
self.events.append(event)
def __eq__(self, other):
return self.gid == other.gid
def __hash__(self):
return hash(id)
def __repr__(self):
return str(self)
def __str__(self):
return "Guard " + str(self.gid)
def timeSpentAsleep(self):
timeAsleep = 0
for i in range(int(len(self.events) / 2)):
timeAsleep += self.events[2*i + 1].time.minute - self.events[2*i].time.minute
return timeAsleep
def minuteMostSpentAsleep(self):
minutes = defaultdict(lambda: 0)
for i in range(int(len(self.events) / 2)):
for j in range(self.events[2*i].time.minute, self.events[2*i + 1].time.minute):
minutes[j] += 1
return max(minutes.keys(), key=lambda s: minutes[s])
def minutesMostSpentasleep(self):
minutes = defaultdict(lambda: 0)
for i in range(int(len(self.events) / 2)):
for j in range(self.events[2*i].time.minute, self.events[2*i + 1].time.minute):
minutes[j] += 1
return minutes
class Event:
def __init__(self, kind, time, id = 0):
self.time = time
self.kind = kind
self.id = id
def __str__(self):
return "Event time: " + str(self.time)
def __repr__(self):
return str(self)
def parseLine(line):
if("Guard" in line):
reg = re.compile(r"#[0-9]+")
return Event(0, parse(line.split(']')[0][1:]), int(reg.findall(line)[0][1:]))
elif ("wakes" in line):
return Event(1, parse(line.split(']')[0][1:]))
else:
return Event(2, parse(line.split(']')[0][1:]))
lines = [parseLine(line) for line in input()]
lines.sort(key = lambda l: l.time)
guards = {}
currguard = None
for event in lines:
if event.kind == 0:
if event.id not in guards:
guards[event.id] = Guard(event.id)
currguard = guards[event.id]
else:
currguard.addEvent(event)
nwdct = [(0,0)] * 60
for guard in guards.values():
mins = guard.minutesMostSpentasleep()
for i in range(60):
if nwdct[i][0] < mins[i]:
nwdct[i] = (mins[i], guard.gid)
l = max(nwdct, key=lambda s: s[0])
print(l)
print(nwdct.index(l))
sleepy = max(guards.values(), key=lambda g: g.timeSpentAsleep())
print(sleepy.minuteMostSpentAsleep())
print(sleepy.gid)
|
from torch.nn.modules.module import Module
from ..functions.psroi_align import PSRoIAlignFunction
class PSRoIAlign(Module):
def __init__(self, out_size, spatial_scale, sample_num=0, output_dim=10, group_size=7):
super(PSRoIAlign, self).__init__()
self.out_size = out_size
self.spatial_scale = float(spatial_scale)
self.sample_num = int(sample_num)
self.output_dim = int(output_dim)
self.group_size = int(group_size)
def forward(self, features, rois):
return PSRoIAlignFunction.apply(features, rois, self.out_size,
self.spatial_scale, self.sample_num,
self.output_dim, self.group_size)
|
#Define is_palindrome function thatr take one word in string as input
#and return True if it is palindrome else return False
a=input("ingrese la palabra para saber si es palindroma: ")
def is_palindrome(a):
if a==a[::-1]:
return True
else:
return False
print(is_palindrome(a))
|
import pickle
import numpy as np
import pandas as pd
import sys
import math
import requests
from pyimzml.ImzMLParser import ImzMLParser
from annotation_pipeline.utils import logger, get_pixel_indices, append_pywren_stats, read_object_with_retry, \
read_cloud_object_with_retry, read_ranges_from_url
from concurrent.futures import ThreadPoolExecutor
import msgpack_numpy as msgpack
ISOTOPIC_PEAK_N = 4
MAX_MZ_VALUE = 10 ** 5
def get_imzml_reader(pw, imzml_path):
def get_portable_imzml_reader(storage):
imzml_stream = requests.get(imzml_path, stream=True).raw
parser = ImzMLParser(imzml_stream, ibd_file=None)
imzml_reader = parser.portable_spectrum_reader()
imzml_cobject = storage.put_cobject(pickle.dumps(imzml_reader))
return imzml_reader, imzml_cobject
memory_capacity_mb = 1024
future = pw.call_async(get_portable_imzml_reader, [])
imzml_reader, imzml_cobject = pw.get_result(future)
append_pywren_stats(future, memory_mb=memory_capacity_mb, cloud_objects_n=1)
return imzml_reader, imzml_cobject
def get_spectra(ibd_url, imzml_reader, sp_inds):
mz_starts = np.array(imzml_reader.mzOffsets)[sp_inds]
mz_ends = mz_starts + np.array(imzml_reader.mzLengths)[sp_inds] * np.dtype(imzml_reader.mzPrecision).itemsize
mz_ranges = np.stack([mz_starts, mz_ends], axis=1)
int_starts = np.array(imzml_reader.intensityOffsets)[sp_inds]
int_ends = int_starts + np.array(imzml_reader.intensityLengths)[sp_inds] * np.dtype(imzml_reader.intensityPrecision).itemsize
int_ranges = np.stack([int_starts, int_ends], axis=1)
ranges_to_read = np.vstack([mz_ranges, int_ranges])
data_ranges = read_ranges_from_url(ibd_url, ranges_to_read)
mz_data = data_ranges[:len(sp_inds)]
int_data = data_ranges[len(sp_inds):]
del data_ranges
for i, sp_idx in enumerate(sp_inds):
mzs = np.frombuffer(mz_data[i], dtype=imzml_reader.mzPrecision)
ints = np.frombuffer(int_data[i], dtype=imzml_reader.intensityPrecision)
mz_data[i] = int_data[i] = None # Avoid holding memory longer than necessary
yield sp_idx, mzs, ints
def chunk_spectra(pw, ibd_path, imzml_cobject, imzml_reader):
MAX_CHUNK_SIZE = 512 * 1024 ** 2 # 512MB
sp_id_to_idx = get_pixel_indices(imzml_reader.coordinates)
row_size = 3 * max(4,
np.dtype(imzml_reader.mzPrecision).itemsize,
np.dtype(imzml_reader.intensityPrecision).itemsize)
def plan_chunks():
chunk_sp_inds = []
estimated_size_mb = 0
# Iterate in the same order that intensities are laid out in the file, hopefully this will
# prevent fragmented read patterns
for sp_i in np.argsort(imzml_reader.intensityOffsets):
spectrum_size = imzml_reader.mzLengths[sp_i] * row_size
if estimated_size_mb + spectrum_size > MAX_CHUNK_SIZE:
estimated_size_mb = 0
yield np.array(chunk_sp_inds)
chunk_sp_inds = []
estimated_size_mb += spectrum_size
chunk_sp_inds.append(sp_i)
if chunk_sp_inds:
yield np.array(chunk_sp_inds)
def upload_chunk(ch_i, storage):
chunk_sp_inds = chunks[ch_i]
# Get imzml_reader from COS because it's too big to include via pywren captured vars
imzml_reader = pickle.loads(read_cloud_object_with_retry(storage, imzml_cobject))
n_spectra = sum(imzml_reader.mzLengths[sp_i] for sp_i in chunk_sp_inds)
sp_mz_int_buf = np.zeros((n_spectra, 3), dtype=imzml_reader.mzPrecision)
chunk_start = 0
for sp_i, mzs, ints in get_spectra(ibd_path, imzml_reader, chunk_sp_inds):
chunk_end = chunk_start + len(mzs)
sp_mz_int_buf[chunk_start:chunk_end, 0] = sp_id_to_idx[sp_i]
sp_mz_int_buf[chunk_start:chunk_end, 1] = mzs
sp_mz_int_buf[chunk_start:chunk_end, 2] = ints
chunk_start = chunk_end
by_mz = np.argsort(sp_mz_int_buf[:, 1])
sp_mz_int_buf = sp_mz_int_buf[by_mz]
del by_mz
chunk = msgpack.dumps(sp_mz_int_buf)
size = sys.getsizeof(chunk) * (1 / 1024 ** 2)
logger.info(f'Uploading spectra chunk {ch_i} - %.2f MB' % size)
chunk_cobject = storage.put_cobject(chunk)
logger.info(f'Spectra chunk {ch_i} finished')
return chunk_cobject
chunks = list(plan_chunks())
memory_capacity_mb = 3072
futures = pw.map(upload_chunk, range(len(chunks)), runtime_memory=memory_capacity_mb)
ds_chunks_cobjects = pw.get_result(futures)
append_pywren_stats(futures, memory_mb=memory_capacity_mb, cloud_objects_n=len(chunks))
return ds_chunks_cobjects
def define_ds_segments(pw, ibd_url, imzml_cobject, ds_segm_size_mb, sample_n):
def get_segm_bounds(storage):
imzml_reader = pickle.loads(read_cloud_object_with_retry(storage, imzml_cobject))
sp_n = len(imzml_reader.coordinates)
sample_sp_inds = np.random.choice(np.arange(sp_n), min(sp_n, sample_n))
print(f'Sampling {len(sample_sp_inds)} spectra')
spectra_sample = list(get_spectra(ibd_url, imzml_reader, sample_sp_inds))
spectra_mzs = np.concatenate([mzs for sp_id, mzs, ints in spectra_sample])
print(f'Got {len(spectra_mzs)} mzs')
total_size = 3 * spectra_mzs.nbytes * sp_n / len(sample_sp_inds)
segm_n = int(np.ceil(total_size / (ds_segm_size_mb * 2 ** 20)))
segm_bounds_q = [i * 1 / segm_n for i in range(0, segm_n + 1)]
segm_lower_bounds = [np.quantile(spectra_mzs, q) for q in segm_bounds_q]
return np.array(list(zip(segm_lower_bounds[:-1], segm_lower_bounds[1:])))
logger.info('Defining dataset segments bounds')
memory_capacity_mb = 1024
future = pw.call_async(get_segm_bounds, [], runtime_memory=memory_capacity_mb)
ds_segments = pw.get_result(future)
append_pywren_stats(future, memory_mb=memory_capacity_mb)
return ds_segments
def segment_spectra(pw, ds_chunks_cobjects, ds_segments_bounds, ds_segm_size_mb, ds_segm_dtype):
ds_segm_n = len(ds_segments_bounds)
# extend boundaries of the first and last segments
# to include all mzs outside of the spectra sample mz range
ds_segments_bounds = ds_segments_bounds.copy()
ds_segments_bounds[0, 0] = 0
ds_segments_bounds[-1, 1] = MAX_MZ_VALUE
# define first level segmentation and then segment each one into desired number
first_level_segm_size_mb = 512
first_level_segm_n = (len(ds_segments_bounds) * ds_segm_size_mb) // first_level_segm_size_mb
first_level_segm_n = max(first_level_segm_n, 1)
ds_segments_bounds = np.array_split(ds_segments_bounds, first_level_segm_n)
def segment_spectra_chunk(chunk_cobject, id, storage):
print(f'Segmenting spectra chunk {id}')
sp_mz_int_buf = read_cloud_object_with_retry(storage, chunk_cobject, msgpack.load)
def _first_level_segment_upload(segm_i):
l = ds_segments_bounds[segm_i][0, 0]
r = ds_segments_bounds[segm_i][-1, 1]
segm_start, segm_end = np.searchsorted(sp_mz_int_buf[:, 1], (l, r)) # mz expected to be in column 1
segm = sp_mz_int_buf[segm_start:segm_end]
return storage.put_cobject(msgpack.dumps(segm))
with ThreadPoolExecutor(max_workers=128) as pool:
sub_segms_cobjects = list(pool.map(_first_level_segment_upload, range(len(ds_segments_bounds))))
return sub_segms_cobjects
memory_safe_mb = 1536
memory_capacity_mb = first_level_segm_size_mb * 2 + memory_safe_mb
first_futures = pw.map(segment_spectra_chunk, ds_chunks_cobjects, runtime_memory=memory_capacity_mb)
first_level_segms_cobjects = pw.get_result(first_futures)
if not isinstance(first_futures, list): first_futures = [first_futures]
append_pywren_stats(first_futures, memory_mb=memory_capacity_mb, cloud_objects_n=len(first_futures) * len(ds_segments_bounds))
def merge_spectra_chunk_segments(segm_cobjects, id, storage):
print(f'Merging segment {id} spectra chunks')
def _merge(ch_i):
segm_spectra_chunk = read_cloud_object_with_retry(storage, segm_cobjects[ch_i], msgpack.load)
return segm_spectra_chunk
with ThreadPoolExecutor(max_workers=128) as pool:
segm = list(pool.map(_merge, range(len(segm_cobjects))))
segm = np.concatenate(segm)
# Alternative in-place sorting (slower) :
# segm.view(f'{ds_segm_dtype},{ds_segm_dtype},{ds_segm_dtype}').sort(order=['f1'], axis=0)
segm = segm[segm[:, 1].argsort()]
bounds_list = ds_segments_bounds[id]
segms_len = []
segms_cobjects = []
for segm_j in range(len(bounds_list)):
l, r = bounds_list[segm_j]
segm_start, segm_end = np.searchsorted(segm[:, 1], (l, r)) # mz expected to be in column 1
sub_segm = segm[segm_start:segm_end]
segms_len.append(len(sub_segm))
base_id = sum([len(bounds) for bounds in ds_segments_bounds[:id]])
segm_i = base_id + segm_j
print(f'Storing dataset segment {segm_i}')
segms_cobjects.append(storage.put_cobject(msgpack.dumps(sub_segm)))
return segms_len, segms_cobjects
second_level_segms_cobjects = np.transpose(first_level_segms_cobjects).tolist()
second_level_segms_cobjects = [[segm_cobjects] for segm_cobjects in second_level_segms_cobjects]
# same memory capacity
second_futures = pw.map(merge_spectra_chunk_segments, second_level_segms_cobjects, runtime_memory=memory_capacity_mb)
ds_segms_len, ds_segms_cobjects = list(zip(*pw.get_result(second_futures)))
ds_segms_len = list(np.concatenate(ds_segms_len))
ds_segms_cobjects = list(np.concatenate(ds_segms_cobjects))
append_pywren_stats(second_futures, memory_mb=memory_capacity_mb, cloud_objects_n=ds_segm_n)
return ds_segms_cobjects, ds_segms_len
def clip_centr_df(pw, bucket, centr_chunks_prefix, mz_min, mz_max):
def clip_centr_df_chunk(obj, storage):
print(f'Clipping centroids dataframe chunk {obj.key}')
centroids_df_chunk = pd.read_msgpack(obj.data_stream._raw_stream).sort_values('mz')
centroids_df_chunk = centroids_df_chunk[centroids_df_chunk.mz > 0]
ds_mz_range_unique_formulas = centroids_df_chunk[(mz_min < centroids_df_chunk.mz) &
(centroids_df_chunk.mz < mz_max)].index.unique()
centr_df_chunk = centroids_df_chunk[centroids_df_chunk.index.isin(ds_mz_range_unique_formulas)].reset_index()
clip_centr_chunk_cobject = storage.put_cobject(centr_df_chunk.to_msgpack())
return clip_centr_chunk_cobject, centr_df_chunk.shape[0]
memory_capacity_mb = 512
futures = pw.map(clip_centr_df_chunk, f'cos://{bucket}/{centr_chunks_prefix}/', runtime_memory=memory_capacity_mb)
clip_centr_chunks_cobjects, centr_n = list(zip(*pw.get_result(futures)))
append_pywren_stats(futures, memory_mb=memory_capacity_mb, cloud_objects_n=len(futures))
clip_centr_chunks_cobjects = list(clip_centr_chunks_cobjects)
centr_n = sum(centr_n)
logger.info(f'Prepared {centr_n} centroids')
return clip_centr_chunks_cobjects, centr_n
def define_centr_segments(pw, clip_centr_chunks_cobjects, centr_n, ds_segm_n, ds_segm_size_mb):
logger.info('Defining centroids segments bounds')
def get_first_peak_mz(cobject, id, storage):
print(f'Extracting first peak mz values from clipped centroids dataframe {id}')
centr_df = read_cloud_object_with_retry(storage, cobject, pd.read_msgpack)
first_peak_df = centr_df[centr_df.peak_i == 0]
return first_peak_df.mz.values
memory_capacity_mb = 512
futures = pw.map(get_first_peak_mz, clip_centr_chunks_cobjects, runtime_memory=memory_capacity_mb)
first_peak_df_mz = np.concatenate(pw.get_result(futures))
append_pywren_stats(futures, memory_mb=memory_capacity_mb)
ds_size_mb = ds_segm_n * ds_segm_size_mb
data_per_centr_segm_mb = 50
peaks_per_centr_segm = 1e4
centr_segm_n = int(max(ds_size_mb // data_per_centr_segm_mb, centr_n // peaks_per_centr_segm, 32))
segm_bounds_q = [i * 1 / centr_segm_n for i in range(0, centr_segm_n)]
centr_segm_lower_bounds = np.quantile(first_peak_df_mz, segm_bounds_q)
logger.info(f'Generated {len(centr_segm_lower_bounds)} centroids bounds: {centr_segm_lower_bounds[0]}...{centr_segm_lower_bounds[-1]}')
return centr_segm_lower_bounds
def segment_centroids(pw, clip_centr_chunks_cobjects, centr_segm_lower_bounds, ds_segms_bounds, ds_segm_size_mb,
max_ds_segms_size_per_db_segm_mb, ppm):
centr_segm_n = len(centr_segm_lower_bounds)
centr_segm_lower_bounds = centr_segm_lower_bounds.copy()
# define first level segmentation and then segment each one into desired number
first_level_centr_segm_n = min(32, len(centr_segm_lower_bounds))
centr_segm_lower_bounds = np.array_split(centr_segm_lower_bounds, first_level_centr_segm_n)
first_level_centr_segm_bounds = np.array([bounds[0] for bounds in centr_segm_lower_bounds])
def segment_centr_df(centr_df, db_segm_lower_bounds):
first_peak_df = centr_df[centr_df.peak_i == 0].copy()
segment_mapping = np.searchsorted(db_segm_lower_bounds, first_peak_df.mz.values, side='right') - 1
first_peak_df['segm_i'] = segment_mapping
centr_segm_df = pd.merge(centr_df, first_peak_df[['formula_i', 'segm_i']], on='formula_i').sort_values('mz')
return centr_segm_df
def segment_centr_chunk(cobject, id, storage):
print(f'Segmenting clipped centroids dataframe chunk {id}')
centr_df = read_cloud_object_with_retry(storage, cobject, pd.read_msgpack)
centr_segm_df = segment_centr_df(centr_df, first_level_centr_segm_bounds)
def _first_level_upload(args):
segm_i, df = args
del df['segm_i']
return segm_i, storage.put_cobject(df.to_msgpack())
with ThreadPoolExecutor(max_workers=128) as pool:
sub_segms = [(segm_i, df) for segm_i, df in centr_segm_df.groupby('segm_i')]
sub_segms_cobjects = list(pool.map(_first_level_upload, sub_segms))
return dict(sub_segms_cobjects)
memory_capacity_mb = 512
first_futures = pw.map(segment_centr_chunk, clip_centr_chunks_cobjects, runtime_memory=memory_capacity_mb)
first_level_segms_cobjects = pw.get_result(first_futures)
append_pywren_stats(first_futures, memory_mb=memory_capacity_mb,
cloud_objects_n=len(first_futures) * len(centr_segm_lower_bounds))
def merge_centr_df_segments(segm_cobjects, id, storage):
print(f'Merging segment {id} clipped centroids chunks')
def _merge(cobject):
segm_centr_df_chunk = read_cloud_object_with_retry(storage, cobject, pd.read_msgpack)
return segm_centr_df_chunk
with ThreadPoolExecutor(max_workers=128) as pool:
segm = pd.concat(list(pool.map(_merge, segm_cobjects)))
def _second_level_segment(segm, sub_segms_n):
segm_bounds_q = [i * 1 / sub_segms_n for i in range(0, sub_segms_n)]
sub_segms_lower_bounds = np.quantile(segm[segm.peak_i == 0].mz.values, segm_bounds_q)
centr_segm_df = segment_centr_df(segm, sub_segms_lower_bounds)
sub_segms = []
for segm_i, df in centr_segm_df.groupby('segm_i'):
del df['segm_i']
sub_segms.append(df)
return sub_segms
init_segms = _second_level_segment(segm, len(centr_segm_lower_bounds[id]))
from annotation_pipeline.image import choose_ds_segments
segms = []
for init_segm in init_segms:
first_ds_segm_i, last_ds_segm_i = choose_ds_segments(ds_segms_bounds, init_segm, ppm)
ds_segms_to_download_n = last_ds_segm_i - first_ds_segm_i + 1
segms.append((ds_segms_to_download_n, init_segm))
segms = sorted(segms, key=lambda x: x[0], reverse=True)
max_ds_segms_to_download_n, max_segm = segms[0]
max_iterations_n = 100
iterations_n = 1
while max_ds_segms_to_download_n * ds_segm_size_mb > max_ds_segms_size_per_db_segm_mb and iterations_n < max_iterations_n:
sub_segms = []
sub_segms_n = math.ceil(max_ds_segms_to_download_n * ds_segm_size_mb / max_ds_segms_size_per_db_segm_mb)
for sub_segm in _second_level_segment(max_segm, sub_segms_n):
first_ds_segm_i, last_ds_segm_i = choose_ds_segments(ds_segms_bounds, sub_segm, ppm)
ds_segms_to_download_n = last_ds_segm_i - first_ds_segm_i + 1
sub_segms.append((ds_segms_to_download_n, sub_segm))
segms = sub_segms + segms[1:]
segms = sorted(segms, key=lambda x: x[0], reverse=True)
iterations_n += 1
max_ds_segms_to_download_n, max_segm = segms[0]
def _second_level_upload(df):
return storage.put_cobject(df.to_msgpack())
print(f'Storing {len(segms)} centroids segments')
with ThreadPoolExecutor(max_workers=128) as pool:
segms = [df for _, df in segms]
segms_cobjects = list(pool.map(_second_level_upload, segms))
return segms_cobjects
from collections import defaultdict
second_level_segms_cobjects = defaultdict(list)
for sub_segms_cobjects in first_level_segms_cobjects:
for first_level_segm_i in sub_segms_cobjects:
second_level_segms_cobjects[first_level_segm_i].append(sub_segms_cobjects[first_level_segm_i])
second_level_segms_cobjects = sorted(second_level_segms_cobjects.items(), key=lambda x: x[0])
second_level_segms_cobjects = [[cobjects] for segm_i, cobjects in second_level_segms_cobjects]
memory_capacity_mb = 2048
second_futures = pw.map(merge_centr_df_segments, second_level_segms_cobjects, runtime_memory=memory_capacity_mb)
db_segms_cobjects = list(np.concatenate(pw.get_result(second_futures)))
append_pywren_stats(second_futures, memory_mb=memory_capacity_mb, cloud_objects_n=centr_segm_n)
return db_segms_cobjects
|
# Mapping of roman numerals to equivalent decimal values.
# Includes each roman numeral and the subtractive value of one significant
# numeral smaller.
MAP = (
('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1),
)
def integer_to_roman(integer):
result_numeral = ''
# Iterate over the roman numerals from most to least significant digit.
for numeral, decimal in MAP:
# Add this digit to the `result_numeral` the number of times it may be
# consumed. This is integer math, so the remainer of this division will
# be the number of times this digit may be added to the number with the
# remainder omitted.
# Determine the number of times to repeat this digit.
times = integer / decimal
# Add this digit that many times to the result.
result_numeral += numeral * times
# Decrement our remaining count by the above value.
integer -= times * decimal
return result_numeral
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
from django.contrib.auth.models import User
from . import models
from . import serializers
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
def home(request):
print("home")
return HttpResponse("Home Page")
@csrf_exempt
def createRecipe(request):
if(request.method == 'POST'):
query_username = request.POST['user']
query_recipe_name = request.POST['recipename']
user_obj = User.objects.filter(username=query_username)
if(len(query_recipe_name.strip())==0):
return HttpResponse("Recipe name can't be empty", status="422")
if(len(user_obj)!=1):
return HttpResponse("User doesn't exist", status="422")
receipe_res = models.RecipeModel.objects.filter(user__username=query_username)
print(len(receipe_res))
if(len(receipe_res) >= 1):
return HttpResponse("Receipe for user already exists", status="422")
elif(len(user_obj) == 1) :
p = models.RecipeModel(name=query_recipe_name, user=user_obj[0])
p.save()
return HttpResponse("Success Recipe Added!", status=200)
else:
content = {'Method Not Allowed': '405 Status'}
return JsonResponse(content, status=405)
def getRecipeOfGivenUser(request, username):
if(request.method == 'GET'):
user_obj = User.objects.filter(username=username)
if (len(user_obj) != 1):
return HttpResponse("User doesn't exist", status="422")
receipe = models.RecipeModel.objects.filter(user__username=user_obj[0].username)
if(len(receipe)<1):
return HttpResponse("No Recipe's for user", status="200")
recipe_data = serializers.RecipeSerialize(receipe, many=True)
return JsonResponse(recipe_data.data, safe=False, status=200)
else:
content = {'Method Not Allowed': '405 Status'}
return JsonResponse(content, status=405)
@csrf_exempt
def updateRecipe(request):
if(request.method=='POST'):
query_username = request.POST['user']
query_recipe_name = request.POST['recipename']
user_obj = User.objects.filter(username=query_username)
if (len(query_recipe_name.strip()) == 0):
return HttpResponse("Recipe name can't be empty", status=422)
if (len(user_obj) != 1):
return HttpResponse("User doesn't exist", status=422)
receipe_res = models.RecipeModel.objects.filter(user__username=query_username)
if (len(receipe_res) == 1):
receipe_res[0].name = query_recipe_name
receipe_res[0].save()
return HttpResponse("Success Recipe Updated!", status=200)
else:
return HttpResponse("No Recipe to Update!", status=422)
else:
content = {'Method Not Allowed': '405 Status'}
return JsonResponse(content, status=405)
def deleteRecipe(request, recipename):
if (request.method == 'GET'):
receipe = models.RecipeModel.objects.filter(name=recipename)
receipe.delete()
return HttpResponse("Recipe deleted!", status=200)
else:
content = {'Method Not Allowed': '405 Status'}
return JsonResponse(content, status=405)
|
import numpy as np
def batch_gradient_descent(model, eta, max_iterations=1e4, epsilon=1e-5, weights_start=None):
"""
Batch Gradient Descent
============================================================
Parameters::
````````````````````````````````````````````````````````````
+ model : optimization model object
model contains two objects
1. Data
2. Gradient Function to be applied (loss function)
+ eta : learning rate
+ max_iterations : maximum number of gradient iterations
+ epsilon : tolerance for stopping condition
+ weights_start : where to start (otherwise random)
Output::
````````````````````````````````````````````````````````````
+ trained_weights : final weights value
+ weights_history : weight values from each iteration
References:
````````````````````````````````````````````````````````````
Code reference : https://github.com/idc9/optimization_algos/blob/master/opt_algos/gradient_descent.py
Numpy Linalg Norm : https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.linalg.norm.html
GD stopping Condition : https://stats.stackexchange.com/questions/33136/how-to-define-the-termination-condition-for-gradient-descent
"""
# Get data and Gradient function from the model
# Example: For Least Squares loss function, gradient_of_loss_function is = np.dot(self.X.T, np.dot(self.X, beta) - self.y)
gradient_of_loss_function = model.gradient_of_loss_function
data = model.data
# Check if initial weights are given, otherwise generate random weights
if weights_start:
weights_current = weights_start
else:
weights_current = np.random.normal(loc=0, scale=1, size=data)
# Keep track of how weights are changing over iterations
weights_history = []
for iterator in range(int(max_iterations)):
weights_history.append(weights_current)
# Update the gradient as per the formula or gradient descent
weights_next = weights_current - eta * gradient_of_loss_function(weights_current)
# If relative error is smaller than the epsilon then break the iterations
# Stop when the improvement drops below the tolerance threshold
# We have taken Frobenius norm here
if np.linalg.norm(weights_next - weights_current) <= epsilon * np.linalg.norm(weights_current):
break
weights_next=weights_current
print('Gradient Descent finished after' + str(iterator) + 'iterations')
return {
'trained_weights': weights_current,
'weights_history': weights_history
}
|
numeral = input('Введите целое положительное число: ')
while True:
if str(numeral).isdigit():
numeral = int(numeral)
break
else:
numeral = input('Число введено некорректно, повторите попытку: ')
max_num = 0
while numeral != 0:
new_num = numeral % 10
if new_num > max_num:
max_num = new_num
numeral = numeral // 10
print(f'Максимальная цифра во введенном числе равна {max_num}')
|
import os
import tarfile
import urllib.request
from urllib.parse import urlparse
from pathlib import Path
from tqdm import tqdm
class DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download(data_file_url, output_dir):
file_name=os.path.basename(urlparse(data_file_url).path)
Path(output_dir).mkdir(parents=True, exist_ok=True)
output_file_path=os.path.join(output_dir, file_name)
if not os.path.isfile(output_file_path):
with DownloadProgressBar(unit='B', unit_scale=True, miniters=1, desc=data_file_url.split('/')[-1]) as t:
urllib.request.urlretrieve(data_file_url, filename=output_file_path, reporthook=t.update_to)
return output_file_path
def extract(targz_file_path):
# extract.
if targz_file_path.endswith(".tar.gz"):
print ("Extracting...")
model_dir = Path(targz_file_path).parent.absolute()
tar = tarfile.open(targz_file_path, "r:gz")
tar.extractall(model_dir)
tar.close()
#Path(output_file_path).unlink()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 6 16:09:56 2020
@author: vikaa
"""
#import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Load the data
df=pd.read_csv('Dia.csv') # Use to load data on Google Colab
print(df.head(7))#print the 7 rows of the dataset from the beginning
#Count the number of rows and columns in the data set
print(df.shape)
#Count the empty (NaN, NAN, na) values in each column
print(df.isna().sum()) #since it is a cleaned and preprocessed data we need not clean the data
#Get a count of the number of patients with 1 (predicted to be having diabetes) && 0(not having diabetes)
df['Outcome'].value_counts()
#Visualize this count
sns.countplot(df['Outcome'],label="Count")
#Look at the data types to see which columns need to be transformed / encoded to a number
print(df.dtypes) #since there is no strings to be encoded no need to use label encoder
#A “pairs plot” is also known as a scatterplot, in which one variable in the same data row is matched with another variable's value
sns.pairplot(df, hue="Outcome") #plot a sample of the columns
#Get the correlation of the columns
print(df.corr())
#Visualize the correlation
plt.figure(figsize=(20,20)) #This is used to change the size of the figure/ heatmap
sns.heatmap(df.corr(), annot=True, fmt='.0%')
#Split the data into independent 'X' and dependent 'Y' variables
X=df.iloc[:,0:8].values #
Y=df.iloc[:,8:].values #
# Split the dataset into 75% Training set and 25% Testing set
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.25, random_state = 0)
# Scale the data to bring all features to the same level of magnitude
# This means the data will be within a specific range for example 0 -100 or 0 - 1
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#Create a function within many Machine Learning Models
def models(X_train,Y_train):
#Using Logistic Regression Algorithm to the Training Set
from sklearn.linear_model import LogisticRegression
log = LogisticRegression(random_state = 0,solver='lbfgs')
log.fit(X_train, Y_train.ravel())
#Using KNeighborsClassifier Method of neighbors class to use Nearest Neighbor algorithm
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
knn.fit(X_train, Y_train.ravel())
#Using SVC method of svm class to use Support Vector Machine Algorithm
from sklearn.svm import SVC
svc_lin = SVC(kernel = 'linear', random_state = 0)
svc_lin.fit(X_train, Y_train.ravel())
#Using SVC method of svm class to use Kernel SVM Algorithm
from sklearn.svm import SVC
svc_rbf = SVC(kernel = 'rbf', random_state = 0)
svc_rbf.fit(X_train, Y_train.ravel())
#Using GaussianNB method of naïve_bayes class to use Naïve Bayes Algorithm
from sklearn.naive_bayes import GaussianNB
gauss = GaussianNB()
gauss.fit(X_train, Y_train.ravel())
#Using DecisionTreeClassifier of tree class to use Decision Tree Algorithm
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
tree.fit(X_train, Y_train.ravel())
#Using RandomForestClassifier method of ensemble class to use Random Forest Classification algorithm
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
forest.fit(X_train, Y_train.ravel())
#print model accuracy on the training data.
print('[0]Logistic Regression Training Accuracy:', log.score(X_train, Y_train))
print('[1]K Nearest Neighbor Training Accuracy:', knn.score(X_train, Y_train))
print('[2]Support Vector Machine (Linear Classifier) Training Accuracy:', svc_lin.score(X_train, Y_train))
print('[3]Support Vector Machine (RBF Classifier) Training Accuracy:', svc_rbf.score(X_train, Y_train))
print('[4]Gaussian Naive Bayes Training Accuracy:', gauss.score(X_train, Y_train))
print('[5]Decision Tree Classifier Training Accuracy:', tree.score(X_train, Y_train))
print('[6]Random Forest Classifier Training Accuracy:', forest.score(X_train, Y_train))
return log, knn, svc_lin, svc_rbf, gauss, tree, forest
model = models(X_train,Y_train)
print(model)
#Show the confusion matrix and accuracy for all of the models on the test data
#Classification accuracy is the ratio of correct predictions to total predictions made.
from sklearn.metrics import confusion_matrix
for i in range(len(model)):
cm = confusion_matrix(Y_test, model[i].predict(X_test))
TN = cm[0][0]
TP = cm[1][1]
FN = cm[1][0]
FP = cm[0][1]
print(cm)
print('Model[{}] Testing Accuracy = "{}!"'.format(i, (TP + TN) / (TP + TN + FN + FP)))
print()# Print a new line
#Show other ways to get the classification accuracy & other metrics
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
for i in range(len(model)):
print('Model ',i)
# plotting the points
plt.plot(X_test, model[i].predict(X_test))
# naming the x axis
plt.xlabel('x - axis')
# naming the y axis
plt.ylabel('y - axis')
# giving a title to my graph
plt.title('Graphs for accuracy')
# function to show the plot
plt.show()
#Check precision, recall, f1-score
print( classification_report(Y_test, model[i].predict(X_test)) )
#Another way to get the models accuracy on the test data
print( accuracy_score(Y_test, model[i].predict(X_test)))
print()#Print a new line
#Print Prediction of Random Forest Classifier model
pred = model[6].predict(X_test)
print(pred)
#Print a space
print()
#Print the actual values
print(Y_test)
|
from __future__ import division
import natsort
# 1. Sort File Names in a proper way
## Make a list of file names like P_0.png, P_1.png
file_names = [f'P_{index}.png' for index in range(200)]
print('New List\t', file_names)
## Sort by sort()
file_names.sort()
print('Sort by sort\t', file_names)
## Sort by index
def takeFirstElement(elem):
return elem[0]
file_names.sort(key=takeFirstElement)
print('Sort by index\t', file_names)
## Sort by sorted
student_tuples = [
('john', 'A', 15),
('jane', 'B', 12),
('dave', 'B', 10),
]
sorted(student_tuples, key=lambda student: student[2]) # sort by age
print('Sort by sorted\t', student_tuples)
## Sort by natsort
file_names = natsort.natsorted(file_names, reverse=False)
print('Sort by natsort\t', file_names)
|
#The Running time is O(n)
def maxset(A):
subarrays = []
length = len(A)
#print length
start = 0
end = 0
flag=0
maxsum=0
globalsum=0
maxarray=[]
globalarray=[]
for i in range(0,length-1):
if (A[i] >= 0 ):
if(A[i+1]>=0):
start = i
#print start
flag=1
maxsum=maxsum+A[i]
else:
end = i
maxsum=maxsum+A[i]
#print "The maxsum is =",maxsum
if(maxsum>=globalsum):
globalsum=maxsum
small_array = A[start-1:end + 1]
maxarray=small_array
else:
maxsum=0
continue
if(flag==1):
end=i+1
maxsum = maxsum + A[i+1]
#print "The maxsum is =", maxsum
if (maxsum >= globalsum):
globalsum = maxsum
small_array = A[start:end + 1]
subarrays.append(small_array)
print (globalsum)
print "The maximum array is=",maxarray
A=[1,2,3,-5,-7,2,3]
maxset(A)
|
import sqlite3
import json
#facebookid to json
conn = sqlite3.connect("users.db")
c = conn.cursor()
def create_table_user_info():
c.execute('CREATE TABLE IF NOT EXISTS accessUser(firstName TEXT, lastName TEXT, facebookID TEXT, smartcarToken TEXT, expDate TEXT)')
def check_new_user(FBid):
c.execute("SELECT facebookID FROM accessUser")
stringList=[]
for elem in c.fetchall():
stringList.append(elem[0])
for elem in stringList:
elem = elem.encode('utf-8')
if FBid in stringList:
return False
else:
return True
def data_entry_user_info(firstname, lastname, facebookid, accesstoken, expdate):
if check_new_user(facebookid) == True:
with conn:
c.execute("INSERT INTO accessUser VALUES(:firstName, :lastName, :facebookID, :smartcarToken, :expDate)", {'firstName': firstname, 'lastName': lastname, 'facebookID': facebookid, 'smartcarToken': accesstoken, 'expDate': expdate})
conn.commit()
def user_info(FBid):
c.execute("SELECT * FROM accessUser")
all_users_list = []
for lists in c.fetchall():
stringList=[]
lists = list(lists)
for elem in lists:
stringList.append(elem.encode('utf-8'))
all_users_list.append(stringList)
for elem in all_users_list:
if elem[2] == FBid:
userDict = {
'firstname': elem[0],
'lastname': elem[1],
'facebookID': elem[2],
'accessToken': elem[3],
'expDate': elem[4]
}
print(userDict)
returnJSON=json.dumps(userDict)
return returnJSON
def create_table_car():
c.execute('CREATE TABLE IF NOT EXISTS userCarInfo(facebookID TEXT,jsonObject JSON)')
def car_data_entry(facebookid, jsonObject):
with conn:
c.execute("INSERT INTO userCarInfo VALUES(:facebookID, :jsonobject)", {'facebookID': facebookid, 'jsonobject': jsonObject})
conn.commit()
def get_user_car_info(FBid):
c.execute("SELECT * FROM accessUser")
for row in c.fetchall():
if 'facebookid' == FBid:
return row[1]
c.close()
conn.close()
|
from django.db import models
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
import time
import django_rq
from decimal import Decimal
import socket
from django.conf import settings
from exchange.audit_logging import AuditLogger
from exchange.thread_local import get_current_logging_info, \
set_current_logging_info
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
# Uncomment to switch back to singleton
# @singleton
class Socket():
def __init__(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(
(settings.QUOTE_SERVER_HOST, settings.QUOTE_SERVER_PORT))
class Stock:
def __init__(self, symbol, price):
self.symbol = symbol
self.price = Decimal(price)
def check_sell_trigger(self):
sell_trigger = SellTrigger.objects.filter(
sell__stock_symbol=self.symbol, active=True)
for trigger in sell_trigger:
trigger.check_validity(self.price)
def check_buy_trigger(self):
buy_trigger = BuyTrigger.objects.filter(
buy__stock_symbol=self.symbol, active=True)
for trigger in buy_trigger:
trigger.check_validity(self.price)
def verify_triggers(self, logging_info):
# Set logging info, since this is executed in a different thread than
# the views
set_current_logging_info(logging_info)
self.check_sell_trigger()
self.check_buy_trigger()
def execute_quote_request(self, user_id):
request = "{},{}\r".format(self.symbol, user_id)
socket = Socket()
socket.socket.send(request.encode())
data = socket.socket.recv(1024)
response = data.decode().split(",") # log the timestamp etc from this response
quote_price = response[0]
self.price = Decimal(quote_price)
logging_info = get_current_logging_info()
AuditLogger.log_quote_server_event(logging_info['server'],
logging_info['transaction_num'],
quote_price, self.symbol, user_id,
response[3], response[4])
@classmethod
def quote(cls, symbol, user_id):
stock = cache.get(symbol)
if(stock is None):
stock = cls(symbol=symbol, price=0)
stock.execute_quote_request(user_id)
cache.set(symbol, stock, 60)
logging_info = get_current_logging_info()
django_rq.enqueue(stock.verify_triggers, logging_info)
return stock
class User(models.Model):
user_id = models.CharField(max_length=100, primary_key=True)
name = models.TextField(max_length=100)
balance = models.DecimalField(
max_digits=65, decimal_places=2, default=0)
password = models.CharField(max_length=50)
@classmethod
def get(cls, user_id):
user = cache.get(user_id)
if(user is None):
user = cls.objects.get(user_id=user_id)
user.sell_stack = []
user.buy_stack = []
cache.set(user.user_id, user)
if not hasattr(user, 'buy_stack'):
user.buy_stack = []
if not hasattr(user, 'sell_stack'):
user.sell_stack = []
return user
def perform_buy(self, symbol, amount):
buy, err = Buy.create(stock_symbol=symbol,
cash_amount=amount, user=self)
if(err):
return err
if not hasattr(self, 'buy_stack'):
self = User.get(self.user_id)
self.buy_stack.append(buy)
cache.set(self.user_id, self)
def cancel_buy(self):
buy = self.pop_from_buy_stack()
if buy is not None:
buy.cancel(self)
def perform_sell(self, symbol, amount):
user_stock, created = UserStock.objects.get_or_create(
stock_symbol=symbol, user=self)
sell, err = Sell.create(stock_symbol=symbol,
cash_amount=amount, user=self)
if(err):
return err
if not hasattr(self, 'sell_stack'):
self = User.get(self.user_id)
self.sell_stack.append(sell)
cache.set(self.user_id, self)
def cancel_sell(self):
sell = self.pop_from_sell_stack()
if sell is not None:
sell.cancel()
def set_buy_amount(self, symbol, amount):
if(self.balance < amount):
return "user balance too low"
try:
buy_trigger = BuyTrigger.objects.get(
user__user_id=self.user_id,
buy__stock_symbol=symbol
)
err = buy_trigger.update_cash_amount(amount)
if err:
return err
except ObjectDoesNotExist:
buy, err = Buy.create(stock_symbol=symbol,
cash_amount=amount, user=self)
if(err):
return err
buy.save()
a = BuyTrigger.objects.filter(
user__user_id=self.user_id,
buy__stock_symbol=symbol
)
buy_trigger = BuyTrigger.objects.create(
user=self,
buy=buy
)
def set_sell_amount(self, symbol, amount):
try:
sell_trigger = SellTrigger.objects.get(
user__user_id=self.user_id,
sell__stock_symbol=symbol,
)
err = sell_trigger.update_cash_amount(amount)
if err:
return err
except ObjectDoesNotExist:
sell, err = Sell.create(
stock_symbol=symbol, cash_amount=amount, user=self)
if(err):
return err
sell.save()
sell_trigger = SellTrigger.objects.create(
user=self,
sell=sell
)
def set_buy_trigger(self, symbol, price):
try:
buy_trigger = BuyTrigger.objects.get(
buy__stock_symbol=symbol, user__user_id=self.user_id)
buy_trigger.update_trigger_price(price)
except ObjectDoesNotExist:
return "Trigger requires a buy amount first, please make one"
def set_sell_trigger(self, symbol, price):
try:
sell_trigger = SellTrigger.objects.get(
sell__stock_symbol=symbol, user__user_id=self.user_id)
sell_trigger.update_trigger_price(price)
except ObjectDoesNotExist:
return "Trigger requires a sell amount first, please make one"
def cancel_set_buy(self, symbol):
try:
buy_trigger = BuyTrigger.objects.get(
buy__stock_symbol=symbol, user__user_id=self.user_id)
err = None
if not buy_trigger.active:
err = "No active trigger found for set_buy corresponding to {0}".format(
symbol)
else:
err = "Disabling trigger for {0}".format(symbol)
buy_trigger.cancel()
return err
except ObjectDoesNotExist:
return "set buy not found"
def cancel_set_sell(self, symbol):
try:
sell_trigger = SellTrigger.objects.get(
sell__stock_symbol=symbol, user__user_id=self.user_id)
err = None
if not sell_trigger.active:
err = "No active trigger found for set_sell corresponding to {0}".format(
symbol)
else:
err = "Disabling trigger for {0}".format(symbol)
sell_trigger.cancel()
return err
except ObjectDoesNotExist:
return "sell sell not found"
def update_balance(self, change):
self = User.get(self.user_id)
self.balance = Decimal(self.balance) + Decimal(change)
cache.set(self.user_id, self)
self.save()
action = 'add' if change >= 0 else 'remove'
logging_info = get_current_logging_info()
AuditLogger.log_account_transaction(logging_info['server'],
logging_info['transaction_num'],
action, self.user_id, abs(change))
def pop_from_buy_stack(self):
if hasattr(self, 'buy_stack'):
buy = self.buy_stack.pop() if self.buy_stack else None
cache.set(self.user_id, self)
return buy
return None
def pop_from_sell_stack(self):
if hasattr(self, 'sell_stack'):
sell = self.sell_stack.pop() if self.sell_stack else None
cache.set(self.user_id, self)
return sell
return None
class UserStock(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
stock_symbol = models.CharField(max_length=3)
amount = models.PositiveIntegerField(default=0)
def update_amount(self, change):
self.amount += change
self.save()
class Sell(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
stock_symbol = models.CharField(max_length=3)
intended_cash_amount = models.DecimalField(
max_digits=65, decimal_places=2, default=0)
actual_cash_amount = models.DecimalField(
max_digits=65, decimal_places=2, default=0)
stock_sold_amount = models.PositiveIntegerField(default=0)
sell_price = models.DecimalField(
max_digits=65, decimal_places=2, default=0)
@classmethod
def create(cls, stock_symbol, cash_amount, user):
stock = Stock.quote(stock_symbol, user.user_id)
sell = cls(user=user, stock_symbol=stock_symbol)
err = sell.update_cash_amount(cash_amount)
if(err):
return None, err
err = sell.update_price(stock.price)
if(err):
return None, err
return sell, None
def update_price(self, stock_price):
self.cancel()
user_stock, created = UserStock.objects.get_or_create(
user=self.user, stock_symbol=self.stock_symbol)
self.stock_sold_amount = min(
self.intended_cash_amount//stock_price,
user_stock.amount)
if(self.stock_sold_amount <= 0):
return "Update trigger price failed"
self.actual_cash_amount = self.stock_sold_amount*stock_price
self.timestamp = time.time()
self.sell_price = stock_price
user_stock.update_amount(self.stock_sold_amount*-1)
def update_cash_amount(self, amount):
stock = Stock.quote(self.stock_symbol, self.user.user_id)
user_stock, created = UserStock.objects.get_or_create(
user=self.user, stock_symbol=self.stock_symbol)
stock_sold_amount = amount//stock.price
if(stock_sold_amount > user_stock.amount):
return "Not enough stock, have {0} need {1}".format(user_stock.amount, stock_sold_amount)
self.intended_cash_amount = amount
def commit(self, user):
user.update_balance(self.actual_cash_amount)
self.save()
def cancel(self):
user_stock, created = UserStock.objects.get_or_create(
user=self.user, stock_symbol=self.stock_symbol)
user_stock.update_amount(self.stock_sold_amount)
class Buy(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
stock_symbol = models.CharField(max_length=3)
intended_cash_amount = models.DecimalField(
max_digits=65, decimal_places=2, default=0)
actual_cash_amount = models.DecimalField(
max_digits=65, decimal_places=2, default=0)
stock_bought_amount = models.PositiveIntegerField(default=0)
purchase_price = models.DecimalField(
max_digits=65, decimal_places=2, default=0)
@classmethod
def create(cls, stock_symbol, cash_amount, user):
stock = Stock.quote(stock_symbol, user.user_id)
buy = cls(user=user, stock_symbol=stock_symbol,
purchase_price=stock.price)
err = buy.update_cash_amount(cash_amount)
if(err):
return None, err
buy.update_price(stock.price)
buy.timestamp = time.time()
return buy, None
def update_cash_amount(self, amount):
if amount > self.user.balance:
return "Not enough balance, have {0} need {1}".format(self.user.balance, amount)
updated_amount = (self.intended_cash_amount - amount)
self.user.update_balance(updated_amount)
self.intended_cash_amount = abs(updated_amount)
def update_price(self, stock_price):
self.stock_price = stock_price
self.stock_bought_amount = self.intended_cash_amount//self.stock_price
self.actual_cash_amount = self.stock_bought_amount*self.stock_price
def cancel(self, user):
user.update_balance(self.intended_cash_amount)
def commit(self):
user_stock, created = UserStock.objects.get_or_create(
user=self.user, stock_symbol=self.stock_symbol)
user_stock.update_amount(self.stock_bought_amount)
self.save()
class SellTrigger(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
sell = models.ForeignKey(Sell, on_delete=models.CASCADE)
# Cash amount and trigger price are set.
active = models.BooleanField(default=False)
def check_validity(self, price):
sell_object = self.sell
user_object = self.user
if(sell_object.sell_price <= price):
logging_info = get_current_logging_info()
AuditLogger.log_system_event(logging_info['server'],
logging_info['transaction_num'], logging_info['command'],
username=user_object.user_id,
stock_symbol=sell_object.stock_symbol,
funds=user_object.balance)
sell_object.commit(user_object)
self.active = False
self.save()
def update_cash_amount(self, amount):
err = self.sell.update_cash_amount(amount)
if err:
return err
self.save()
def update_trigger_price(self, price):
err = self.sell.update_price(price)
if(err is None):
self.active = True
self.save()
return err
def cancel(self):
self.sell.cancel()
self.active = False
self.save()
class BuyTrigger(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
buy = models.ForeignKey(Buy, on_delete=models.CASCADE)
# Cash amount and trigger price are set.
active = models.BooleanField(default=False)
def check_validity(self, price):
buy_object = self.buy
user_object = self.user
if(buy_object.purchase_price >= price):
logging_info = get_current_logging_info()
AuditLogger.log_system_event(logging_info['server'],
logging_info['transaction_num'], logging_info['command'],
username=user_object.user_id,
stock_symbol=buy_object.stock_symbol,
funds=user_object.balance)
buy_object.update_price(price)
buy_object.commit()
self.active = False
self.save()
def update_cash_amount(self, amount):
err = self.buy.update_cash_amount(amount)
self.save()
return err
def update_trigger_price(self, price):
self.buy.update_price(price)
self.active = True
self.save()
def cancel(self):
self.buy.cancel(self.user)
self.active = False
self.save()
def is_expired(previous_time):
elapsed_time = time.time() - previous_time
if(elapsed_time > 60):
return True
return False
|
from rest_framework import serializers
from progress_analyzer.models import CumulativeSum,\
OverallHealthGradeCumulative, \
NonExerciseStepsCumulative, \
SleepPerNightCumulative, \
MovementConsistencyCumulative, \
ExerciseConsistencyCumulative, \
NutritionCumulative, \
ExerciseStatsCumulative, \
AlcoholCumulative, \
PenaltyCumulative
class OverallHealthGradeCumulativeSerializer(serializers.ModelSerializer):
user_cum = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = OverallHealthGradeCumulative
fields = ('__all__')
class NonExerciseStepsCumulativeSerializer(serializers.ModelSerializer):
user_cum = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = NonExerciseStepsCumulative
fields = ('__all__')
class SleepPerNightCumulativeSerializer(serializers.ModelSerializer):
user_cum = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = SleepPerNightCumulative
fields = ('__all__')
class MovementConsistencyCumulativeSerializer(serializers.ModelSerializer):
user_cum = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = MovementConsistencyCumulative
fields = ('__all__')
class ExerciseConsistencyCumulativeSerializer(serializers.ModelSerializer):
user_cum = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = ExerciseConsistencyCumulative
fields = ('__all__')
class NutritionCumulativeSerializer(serializers.ModelSerializer):
user_cum = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = NutritionCumulative
fields = ('__all__')
class ExerciseStatsCumulativeSerializer(serializers.ModelSerializer):
user_cum = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = ExerciseStatsCumulative
fields = ('__all__')
class AlcoholCumulativeSerializer(serializers.ModelSerializer):
user_cum = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = AlcoholCumulative
fields = ('__all__')
class PenaltyCumulativeSerializer(serializers.ModelSerializer):
user_cum = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = PenaltyCumulative
fields = ('__all__')
class CumulativeSumSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(read_only=True)
overall_health_grade_cum = OverallHealthGradeCumulativeSerializer()
non_exercise_steps_cum = NonExerciseStepsCumulativeSerializer()
sleep_per_night_cum = SleepPerNightCumulativeSerializer()
movement_consistency_cum = MovementConsistencyCumulativeSerializer()
exercise_consistency_cum = ExerciseConsistencyCumulativeSerializer()
nutrition_cum = NutritionCumulativeSerializer()
exercise_stats_cum = ExerciseStatsCumulativeSerializer()
alcohol_cum = AlcoholCumulativeSerializer()
penalty_cum = PenaltyCumulativeSerializer()
class Meta:
model = CumulativeSum
fields = ('user','created_at','updated_at','overall_health_grade_cum','non_exercise_steps_cum',
'sleep_per_night_cum','movement_consistency_cum','exercise_consistency_cum','nutrition_cum',
'exercise_stats_cum','alcohol_cum','penalty_cum')
read_only_fields = ('updated_at',)
def _update_helper(instance, validated_data):
'''
This function will iterate all fields of given instance
and update them with new data (if present) otherwise
with old data
'''
fields = [f.name for f in instance._meta._get_fields()]
for f in fields:
setattr(instance,f,
validated_data.get(f,getattr(instance,f)))
instance.save()
def create(self, validated_data):
user = self.context['request'].user
overall_health_grade_cum_data = validated_data.pop('overall_health_grade_cum')
non_exercise_steps_cum_data = validated_data.pop('non_exercise_steps_cum')
sleep_per_night_cum_data = validated_data.pop('sleep_per_night_cum')
mc_cum_data = validated_data.pop('movement_consistency_cum')
ec_cum_data = validated_data.pop('exercise_consistency_cum')
nutrition_cum_data = validated_data.pop('nutrition_cum')
exercise_stats_cum_data = validated_data.pop('exercise_stats_cum')
alcohol_cum_data = validated_data.pop('alcohol_cum')
penalty_cum_data = validated_data.pop('penalty_cum')
user_cum = CumulativeSum.objects.create(user=user, **validated_data)
OverallHealthGradeCumulative.objects.create(user_cum=user_cum, **overall_health_grade_cum_data)
NonExerciseStepsCumulative.objects.create(user_cum=user_cum,**non_exercise_steps_cum_data)
SleepPerNightCumulative.objects.create(user_cum=user_cum,**sleep_per_night_cum_data)
MovementConsistencyCumulative.objects.create(user_cum=user_cum,**mc_cum_data)
ExerciseConsistencyCumulative.objects.create(user_cum=user_cum,**ec_cum_data)
NutritionCumulative.objects.create(user_cum=user_cum,**nutrition_cum_data)
ExerciseStatsCumulative.objects.create(user_cum=user_cum,**exercise_stats_cum_data)
AlcoholCumulative.objects.create(user_cum=user_cum,**alcohol_cum_data)
PenaltyCumulative.objects.create(user_cum=user_cum,**penalty_cum_data)
return user_cum
def update(self,instance,validated_data):
overall_health_grade_cum_data = validated_data.pop('overall_health_grade_cum')
non_exercise_steps_cum_data = validated_data.pop('non_exercise_steps_cum')
sleep_per_night_cum_data = validated_data.pop('sleep_per_night_cum')
mc_cum_data = validated_data.pop('movement_consistency_cum')
ec_cum_data = validated_data.pop('exercise_consistency_cum')
nutrition_cum_data = validated_data.pop('nutrition_cum')
exercise_stats_cum_data = validated_data.pop('exercise_stats_cum')
alcohol_cum_data = validated_data.pop('alcohol_cum')
penalty_cum_data = validated_data.pop('penalty_cum')
overall_health_grade_obj = instance.overall_health_grade_cum
self._update_helper(overall_health_grade_obj,overall_health_grade_cum_data)
non_exercise_steps_obj = instance.non_exercise_steps_cum
self._update_helper(non_exercise_steps_obj, non_exercise_steps_cum_data)
sleep_per_night_obj = instance.sleep_per_night_cum
self._update_helper(sleep_per_night_obj,sleep_per_night_cum_data)
mc_obj = instance.movement_consistency_cum
self._update_helper(mc_obj,mc_cum_data)
ec_obj = instance.exercise_consistency_cum
self._update_helper(ec_obj,ec_cum_data)
nutrition_obj = instance.nutrition_cum
self._update_helper(nutrition_obj,nutrition_cum_data)
exercise_stat_obj = instance.exercise_stats_cum
self._update_helper(exercise_stat_obj,exercise_stats_cum_data)
alcohol_obj = instance.alcohol_cum
self._update_helper(alcohol_obj,alcohol_cum_data)
penalty_obj = instance.penalty_cum
self._update_helper(penalty_obj,penalty_cum_data)
return instance
|
from collections import defaultdict
import random
class Dice:
""" Representation of the five dice
Attributes:
throws (int): How often the dice were rolled.
faces (list[int]): the current faces of the dice.
counts (dict[int->int]): occurances of each face value.
"""
def __init__(self):
self.throws = 0
self.faces = [0, 0, 0, 0, 0]
self.counts = {}
def roll(self, keep=None):
# making sure only existing faces are kept
keep = self.verify(keep)
# increase counter
self.throws += 1
if keep is not None:
to_throw = 5 - len(keep)
else:
to_throw = 5
keep = []
# update faces
self._update(keep + [random.randint(1, 6) for i in range(to_throw)])
def _update(self, values):
""" updates the face values of the dice"""
self.faces = values
self._update_counts()
def _update_counts(self):
""" update face counter """
self.counts = defaultdict(int)
for num in self.faces:
self.counts[num] += 1
def verify(self, keep):
""" verifies that keep only contains existing faces """
verified = []
available = self.faces[:]
for k in keep:
if k in available:
verified.append(k)
available.remove(k)
return verified
def __repr__(self):
""" string representation """
return " ".join([str(d) for d in sorted(self.faces)])
|
from ui.UI import UI
from battle.round.RoundAction import RoundAction
# Contains the processing for fighters and round actions
class Round:
def __init__(self, round_actions):
self.round_actions = round_actions
# processes the round actions one by one...
def process_round_actions(self):
for action in self.round_actions:
if action.targets is None:
UI().show_text(action.battle_effect.get_battle_text())
elif action.battle_effect.source_fighter.current_hp > 0:
action.apply_battle_effect()
|
from ELMo.ELMoNet import ELMoNet
import torch
class sent2elmo():
def __init__(self, char_lexicon, config, device, model_path):
self.char_lexicon = char_lexicon
self.config = config
self.device = device
self.model_path = model_path
checkpoint = torch.load(self.model_path, map_location=lambda storage, loc: storage.cuda(0))
num_embeddings = len(self.char_lexicon)
padding_idx = self.char_lexicon['<pad>']
self.model = ELMoNet(num_embeddings,
self.config['embedding_dim'],
padding_idx,
self.config['filters'],
self.config['n_highways'],
self.config['projection_size'],
checkpoint['vocab_size'])
self.model.to(self.device)
self.model.load_state_dict(checkpoint['model'])
self.model.eval()
def get_feature(self, sentences):
test_data, test_data_reverse = self.create_dataset(sentences)
test_data = torch.tensor(test_data)
test_data = test_data[:,:-1,:]
test_data_reverse = torch.tensor(test_data_reverse)
test_data_reverse = test_data_reverse[:,:-1,:]
with torch.no_grad():
forward_feature, backward_feature = self.model.forward(test_data.to(self.device), test_data_reverse.to(self.device),None,None, False)
forward_feature = forward_feature[:,:-1,:]
backward_feature = backward_feature[:,:-1,:]
feature = torch.cat((forward_feature, backward_feature), dim=2)
return feature
def create_dataset(self, sentences):
max_len = 0
max_word_len = 16
special_word = ['<unk>', '<bos>', '<eos>', '<pad>']
char2id = []
for i, sentence in enumerate(sentences):
""" create a list for every sentence """
char2id.append([])
for word in sentence:
tmp = []
if len(word) > max_word_len:
tmp.append(self.char_lexicon['<unk>'])
elif word not in special_word:
if len(word) > max_len:
max_len = len(word)
for char in word:
if char not in self.char_lexicon:
tmp.append(self.char_lexicon['<unk>'])
else:
tmp.append(self.char_lexicon[char])
else:
tmp.append(self.char_lexicon[word])
char2id[i].append(tmp)
max_len = min(max_word_len, max_len)
""" padding the character of each word """
for i, sentence in enumerate(char2id):
for j, word in enumerate(sentence):
if len(word) < max_len:
for _ in range(max_len-len(word)):
char2id[i][j].append(self.char_lexicon['<pad>'])
char2id_reverse = []
for i, sentence in enumerate(char2id):
char2id_reverse.append([])
for rev in reversed(sentence):
char2id_reverse[i].append(rev)
return char2id, char2id_reverse
|
#VIEWS IS THE HANDLER
# / means the root of the current drive;
# ./ means the current directory;
# ../ means the parent of the current directory.
#Defining what you will show
#gets information from the front-end
#all you are doing here is post
from django.shortcuts import render, HttpResponse, HttpResponseRedirect, redirect
from django.contrib.auth import authenticate, login
from django.template import loader
from django.core.exceptions import *
from django.views import generic
from django.views.generic import View
#import specific models
from .models import Billboard
from .forms import UserForm
#get request
def index(request):
#return HttpResponse("Hello, world. You're at the polls index.")
posts=Billboard.objects.order_by('-created_date')
context={'post_list':posts}
return render(request, 'billboard/billboard.html', context)
#query database
#store in context dictionary
#send that to template (render function)
def newpost(request):
title= request.POST.get('title')
text_area=request.POST.get('text')
author=request.POST.get('author')
new_entry=Billboard(title=title,text=text_area,author=author)
new_entry.save()
return HttpResponse(request, 'billboard/newentry.html', {'newentry':new_entry} )
#class based views
class UserFormView(View):
form_class=UserForm
template_name='billboard/registration_form.html'
#display blank form
def get(self, request):
form=self.form_class(None)
return render(request,self.template_name, {"form":form})
#process form data
def post(self,request):
form=self.form_class(request.POST)
if form.is_valid():
user=form.save(commit=False)
#cleaned(Normalized) Data
#formatted properly
username=form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
#return user object if credentials are correct
user=authenticate(username=username,password=password)
|
from __future__ import print_function
import numpy as np
from numpy import linalg as la
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
ACTIVATION_THRESHOLD = 4.5
NO_PREDICTION_PENALTY_COST = 2
def preprocess_activations(sigmoid_gradient, things):
#return 1 /(1+np.exp(-sigmoid_gradient*things))
return things
def get_activation_mean_locations(coords, place_cell_activations):
n = place_cell_activations.shape[1]
preprocessed_activations = preprocess_activations(0.05, place_cell_activations)
mean_activation_locations = np.zeros((n,2))
for i in range(n):
mean_activation_locations[i] = np.mean(coords[preprocessed_activations[:,i] > ACTIVATION_THRESHOLD, :], axis=0)
return mean_activation_locations
def average_cost(coords, place_cell_activations, activation_locations):
cost = 0
m = place_cell_activations.shape[0]
preprocessed_activations = preprocess_activations(0.1, place_cell_activations)
predicted_locations = np.zeros((m,2))
prediction_count = 0
for t in range(m):
locations = activation_locations[preprocessed_activations[t,:] > ACTIVATION_THRESHOLD,:]
if len(locations) > 0:
predicted_locations[t,:] = np.mean(locations, axis=0)
cost = cost + la.norm(coords[t,:] - predicted_locations[t,:])
prediction_count = prediction_count + 1
else:
cost = cost + NO_PREDICTION_PENALTY_COST
print('Prediction count: ', prediction_count, ' of ', m)
return cost/m
def calculate_fitness(coords, place_cell_activations, activation_locations):
cost = average_cost(coords, place_cell_activations, activation_locations)
return 1/cost if cost > 0 else 0
def plot_activations(title, trajectory, features, single_plot):
''' Example plot:
plot_activations('Place cells: ', self.coordinate_matrix, test_features, False)
'''
n = features.shape[1]
m = features.shape[0]
if single_plot:
plots_per_fig = 1
place_cells_per_plot = n
else:
plots_per_fig = 9
place_cells_per_plot = 4
fig_dim = np.sqrt(plots_per_fig)
colormap = plt.cm.Set1
colours = [colormap(i) for i in np.linspace(0, 0.9, place_cells_per_plot)]
light_grey = '#DDDDDD'
almost_black = '#262626'
fig_index = 0
for i in range(n):
if (i % (plots_per_fig*place_cells_per_plot)) == 0:
print('fig ' + str(i % plots_per_fig*place_cells_per_plot))
fig = plt.figure()
fig.patch.set_facecolor('black')
fig.text
if (i % place_cells_per_plot) == 0:
print('subplot ' + str((i // place_cells_per_plot) % plots_per_fig))
ax = fig.add_subplot(fig_dim, fig_dim, \
1+(i // place_cells_per_plot) % plots_per_fig,\
axisbg=almost_black)
plot_text = title + str(i+1) + '-' + str(i+place_cells_per_plot)
ax.text(0.5, 1, plot_text, verticalalignment='top',
horizontalalignment='center', transform=ax.transAxes,
color='white', fontsize=8)
plt.setp( ax.get_xticklabels(), visible=False)
plt.setp( ax.get_yticklabels(), visible=False)
ax.scatter(trajectory[1:m:17,0], trajectory[1:m:17,1], \
edgecolor=light_grey, \
facecolor=light_grey, \
s=0.01, alpha=0.5)
activations = trajectory[features[:,i] > ACTIVATION_THRESHOLD, :]
ax.scatter(activations[:,0], activations[:,1], \
edgecolor=colours[i%place_cells_per_plot], \
facecolor=colours[i%place_cells_per_plot], \
s=30,
alpha=0.2)
plt.show()
print('Press enter to continue.')
raw_input()
return
|
import math
dic = {}
com_dic = {}
white = 0
black = 1
range_x = 25
range_y = 25
def readFile(f,value):
for line in f:
dic[line[0:-1]] = value
def writFile(fileNameBlack,fileNameWhite):
f1 = open(fileNameBlack, 'w', encoding='utf-8')
count1 = 0
f2 = open(fileNameWhite, 'w', encoding='utf-8')
count2 = 0
for index,value in com_dic.items():
if value == white:
f1.seek(count1)
f1.write(str(index))
f1.write("\n")
count1 = f1.tell()
else:
f2.seek(count2)
f2.write(str(index))
f2.write("\n")
count2 = f2.tell()
def initArray(dict,start,end):
array = []
for i in range(start,end):
array.append(dict[str(i)])
return array
def initDict(array,count):
base = count * range_x
for index,value in enumerate(array):
num = int(base + index)
com_dic[num] = value
def makeValue(array):
sum = 0
for value in array:
sum +=value
return sum
def valueArray(array):
res = makeValue(array)
if res >= 3:
return colorArray(array,black)
elif res <= 1:
return colorArray(array,white)
else:
if array[0] == array[2]:
return colorArray(array,white)
else:
return colorArray(array,black)
def colorArray(array,value):
for index,ele in enumerate(array):
array[index] = value
return array
def updateArray(firstLine,secondLine):
array = []
if range_y%2 == 1:
for i in range(0,range_y-1,2):
array.append(firstLine[i])
array.append(firstLine[i+1])
array.append(secondLine[i])
array.append(secondLine[i+1])
array = valueArray(array)
firstLine[i] = array[0]
firstLine[i+1] = array[1]
secondLine[i] = array[2]
secondLine[i+1] = array[3]
array = []
else:
for i in range(0, range_y, 2):
array.append(firstLine[i])
array.append(firstLine[i + 1])
array.append(secondLine[i])
array.append(secondLine[i + 1])
array = valueArray(array)
firstLine[i] = array[0]
firstLine[i + 1] = array[1]
secondLine[i] = array[2]
secondLine[i + 1] = array[3]
array = []
return firstLine,secondLine
def fixArray(x,y):
arr = []
array = []
sumEle = x*y
for i in range(0,sumEle+1,x):
arr.append(i)
print(arr)
if y%2 == 1:
for i in range(0,y-1,2):
arr1 = initArray(dic,arr[i],arr[i+1])
arr2 = initArray(dic,arr[i+1],arr[i+2])
arr1,arr2 = updateArray(arr1,arr2)
array = arr1 +arr2
initDict(array,i)
array = []
array = initArray(dic,arr[-1]-x,arr[-1])
# print(arr[-1]-x,arr[-1])
# print(array)
initDict(array,y-1)
else:
for i in range(0, y, 2):
arr1 = initArray(dic, arr[i], arr[i + 1])
arr2 = initArray(dic, arr[i + 1], arr[i + 2])
arr1, arr2 = updateArray(arr1, arr2)
array = arr1 + arr2
initDict(array, i)
array = []
def main():
f_red = open("D:\\red", encoding='utf-8')
f_green = open("D:\\green", encoding='utf-8')
readFile(f_red,white)
readFile(f_green,black)
fixArray(range_x,range_y)
f1 = "D:\\11"
f2 = "D:\\22"
print(com_dic)
writFile(f1,f2)
if __name__ == '__main__':
main()
|
def make_pretty(func):
print("it is outer function")
def thecaller():
print("I got decorated")
func() # calling the passed function
return thecaller
def ordinary():
print("I am ordinary")
# driver code
ordinary()
pretty = make_pretty(ordinary) # a function is passed to other function, 'make_pretty' is decorator and 'thecaller' is decorated
pretty() # calling the returned function i.e. 'thecaller' function
|
from dronekit import connect, VehicleMode, LocationGlobalRelative, APIException
import time
import socket
import exceptions
import math
import argparse #To import some values from command line and use it on our python script
#####################functions####
def connectMyCopter():
parser=argparse.ArgumentParser(description='commands')
parser.add_argument('--connect') ##Sec
args = parser.parse_args()
connection_string=args.connect
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
vehicle=connect(connection_string,wait_ready=True) #To connect the vehicle using ip address and wait ready means it will pass command only when the connection is set up.
return vehicle
def arm_and_takeoff(targetHeight):
while vehicle.is_armable != True:
print ("Waiting for vehicle to become armable")
time.sleep(1)
print("Vehicle is now armable.")
vehicle.mode = VehicleMode("GUIDED")
while vehicle.mode != "GUIDED":
print("wainting for drone to enter GUIDED mode ")
time.sleep(1)
print("Vehicle now in GUIDED mode.")
vehicle.armed = True
while vehicle.armed == False:
print ("Waiting for vehicle to become armed")
time.sleep(1)
print("Look out! props are spinning!!")
vehicle.simple_takeoff(targetHeight) ##meters
while True:
print("Current altitude: %d"%vehicle.location.global_relative_frame.alt)
if vehicle.location.global_relative_frame.alt >= .95*targetHeight:
break
time.sleep(1)
print("Target altitude reached !!")
return None
def get_distance_meters(targetLocation,currentLocation):
dLat = targetLocation.lat - currentLocation.lat
dLon = targetLocation.lon - currentLocation.lon
return math.sqrt((dLon*dLon)+(dLat*dLat))*1.113195e5
def goto(targetLocation):
distanceToTargetLocation = get_distance_meters(targetLocation, vehicle.location.global_relative_frame)
vehicle.simple_goto(targetLocation)
while vehicle.modo.name == "GUIDED":
currentDistance = get_distance_meters(targetLocation, vehicle.location.global_relative_frame)
if currentDistance < distanceToTargetLocation*0.01:
print ("Reached target waypoint")
time.sleep(2)
break
time.sleep(1)
return None
###>> pythonconnection_template.py --connect 127.0.0.1:14550
#main execution#
wp1 = LocationGlobalRelative(44.50202, -88.060316, 10)
vehicle =connectMyCopter()
vehicle.wait_ready('autopilot_version')
arm_and_takeoff(10)
goto(wp1)
vehicle.mode = VehicleMode("LAND")
while vehicle.mode != "LAND":
print("wainting for drone to enter LAND mode ")
time.sleep(1)
print("Vehicle in LAND mode")
#This will keep QGC alive
while True:
time.sleep(1)
##This run like: launchSitl location_based_movement.py
|
# !/usr/bin/python27
# coding: utf8
from sklearn import neighbors
import hocmidp
import warnings
import cPickle
warnings.filterwarnings("ignore")
seg1 = hocmidp.hocmidp('sac1.hoc')
seg2 = hocmidp.hocmidp('sac2.hoc')
seg3 = hocmidp.hocmidp('sac3.hoc')
seg4 = hocmidp.hocmidp('sac4.hoc')
seg5 = hocmidp.hocmidp('sac5.hoc')
seg6 = hocmidp.hocmidp('sac6.hoc')
seg7 = hocmidp.hocmidp('sac7.hoc')
tocels = {}
def segname(i):
if i in range(29):
name = 'dend[%d]' % i
elif i in range(29, 96):
name = 'apic[%d]' % (i-29)
elif i in range(96, 262):
name = 'axon[%d]' % (i-96)
else: print 'error'
return name
def bt2(seg, k):
global seg1
global tocels
num = 0
lst = []
for ele in seg1:
j = seg1.index(ele)
tem = seg[:] ## very important here if there is no [:],see the list definition for detail
tem[0:0] = [ele]
tree = neighbors.KDTree(tem)
i = tree.query_radius(tem[0], r=8.2, count_only=True)
# if i > 1:
# if not tocels.has_key(segname(j)):
# tocels[segname(j)] = 1
# else:
# tocels[segname(j)] = tocels[segname(j)] + 1
num = i + num
ind = tree.query_radius(tem[0], r=8.2)
ind = ind[0].tolist()
for i in range(len(ind)):
ind[i] = int(ind[i])-1
del ind[ind.index(-1)] # usable for segname()
for k in ind:
x = (j, k)
lst.append(x)
return lst
# num12 = bt2(seg2[:], 2)
# num13 = bt2(seg3[:], 3)
# num14 = bt2(seg4[:], 4)
# num15 = bt2(seg5[:], 5)
# num16 = bt2(seg6[:], 6)
# num17 = bt2(seg7[:], 7)
# print num12, num13, num14, num15, num16, num17
# print num12+num13+num14+num15+num16+num17
lst12 = bt2(seg2[:], 2)
lst13 = bt2(seg3[:], 3)
lst14 = bt2(seg4[:], 4)
lst15 = bt2(seg5[:], 5)
lst16 = bt2(seg6[:], 6)
lst17 = bt2(seg7[:], 7)
cPickle.dump(lst12, open("E:\\test\\lst12.txt", "wb"))
cPickle.dump(lst13, open("E:\\test\\lst13.txt", "wb"))
cPickle.dump(lst14, open("E:\\test\\lst14.txt", "wb"))
cPickle.dump(lst15, open("E:\\test\\lst15.txt", "wb"))
cPickle.dump(lst16, open("E:\\test\\lst16.txt", "wb"))
cPickle.dump(lst17, open("E:\\test\\lst17.txt", "wb"))
# i = 0
# for k in tocels:
# if tocels[k] > 3 :
# i = i + 1
# print tocels[k]
# print i
# print len(tocels)
if __name__ == "__main__":
print ('This is main of module "countcon.py"')
bt2(seg2[:], 2)
|
import argparse
from pyrosetta import *
from pyrosetta.rosetta.core.select.residue_selector import InterGroupInterfaceByVectorSelector, ChainSelector, ResidueIndexSelector, OrResidueSelector, NotResidueSelector, AndResidueSelector
from pyrosetta.rosetta.core.pack.task import TaskFactory
from pyrosetta.rosetta.core.pack.task.operation import \
IncludeCurrent, ExtraRotamers, OperateOnResidueSubset, \
PreventRepackingRLT, RestrictToRepackingRLT
from pyrosetta.rosetta.protocols.denovo_design.movers import FastDesign
from pyrosetta.rosetta.core.kinematics import FoldTree
from pyrosetta.rosetta.protocols.enzdes import ADD_NEW, AddOrRemoveMatchCsts
from pyrosetta.teaching import SimpleThreadingMover
from pyrosetta.rosetta.protocols.relax import FastRelax
def apply_constraints(pose):
""" Applies enzdes constraints form the input CST file to a pose """
cstm = AddOrRemoveMatchCsts()
cstm.set_cst_action(ADD_NEW)
cstm.apply(pose)
return pose
def make_fold_tree():
"""
Make a fold tree that connects the res 385 to the
substrate terminal residue. More efficient sampling.
Presently hard-coded for Htra1 PDZ
I385 is residue 10.
Last residue of PDZ chain A is 105
Terminal residue of substrate chain B is 112
Substrate chain B is 7 residues
"""
ft = FoldTree()
ft.add_edge(10, 1, -1)
ft.add_edge(10, 105, -1)
ft.add_edge(10, 112, 1)
ft.add_edge(112 ,106, -1)
assert ft.check_fold_tree()
return ft
def thread_to_htra1(sequence, pose):
"""
Uses SimpleThreadingMover to swap out the native substrate and put in a
new test sequence docked with Htra1 protease. The substrate peptide begins
at residue 212 of the pose, based on 3nzi.
"""
assert len(sequence) == 7
# Constructing and applying mover
tm = SimpleThreadingMover(sequence, 106)
threaded_pose = Pose()
threaded_pose.assign(pose)
tm.apply(threaded_pose)
return threaded_pose
def shell_selection(selection_1, selection_2, base_range):
shell_select = InterGroupInterfaceByVectorSelector()
shell_select.group1_selector(selection_1)
shell_select.group2_selector(selection_2)
shell_select.nearby_atom_cut(base_range)
shell_select.vector_dist_cut(base_range + 2)
return AndResidueSelector(shell_select, NotResidueSelector(selection_2))
parser = argparse.ArgumentParser()
parser.add_argument('model', help='Select starting model.')
parser.add_argument('job', help='job from SLURM')
args = parser.parse_args()
#opts = '-use_input_sc -ex1 -ex2 -enzdes::cstfile htra1_pdz.cst -run:preserve_header'
opts = '-enzdes::cstfile htra1_pdz.cst -run:preserve_header'
init(opts)
pdz = ChainSelector('A')
peptide = ChainSelector('B')
designable = shell_selection(pdz, peptide, 6)
packable = shell_selection(pdz, designable, 6)
mobile = OrResidueSelector(designable, packable)
mobile.add_residue_selector(peptide)
static = NotResidueSelector(mobile)
sf = get_fa_scorefxn()
tf = TaskFactory()
tf.push_back(IncludeCurrent())
tf.push_back(ExtraRotamers(0, 1, 1))
tf.push_back(ExtraRotamers(0, 2, 1))
prevent = PreventRepackingRLT() # No repack, no design
repack = RestrictToRepackingRLT() # No design
tf.push_back(OperateOnResidueSubset(prevent, static))
tf.push_back(OperateOnResidueSubset(repack, packable))
tf.push_back(OperateOnResidueSubset(repack, peptide))
fr = FastRelax()
fr.set_scorefxn(sf)
fd = FastDesign()
fd.set_scorefxn(sf)
fd.set_task_factory(tf)
pose = pose_from_pdb(args.model)
pose.fold_tree(make_fold_tree())
pose = apply_constraints(pose)
pose = thread_to_htra1('QDYEPEA', pose)
jnam = 'pdz_designs/{}_designed_{}.pdb'.format(args.model, args.job)
fr.apply(pose)
fd.apply(pose)
pose.dump_pdb(jnam)
|
#!/usr/bin/env python3
import VariablesParser as vp
import Command
import io
class PipeParser:
"""
класс, который обрабатывает часть пайплайна
то есть все между "|", вызывая нужные команды
"""
def __init__(self):
self.var_parser = vp.VarParser()
# имена наличествующих команд
self.__commands = ['wc', 'echo', 'cat', 'pwd', 'exit', 'ls', 'cd', 'grep']
def parse(self, command, input_stream, arguments=None):
"""
:param command: список токенов в части пайпа
:param input_stream: входящий поток
:return: поток с результатом данного пайпа
"""
command = self.var_parser.set_values(command)
command_name = command[0].text
if command_name in self.__commands:
if command_name == 'echo':
new_command = Command.Echo(command[1:], input_stream)
elif command_name == 'pwd':
new_command = Command.Pwd(command[1:], input_stream)
elif command_name == 'exit':
new_command = Command.Exit(command[1:], input_stream)
elif command_name == 'wc':
new_command = Command.Wc(command[1:], input_stream)
elif command_name == 'ls':
new_command = Command.Ls(command[1:], input_stream)
elif command_name == 'cd':
new_command = Command.Cd(command[1:], input_stream)
elif command_name == 'grep':
new_command = Command.Grep(command[1:], input_stream, arguments=arguments)
else:
new_command = Command.Cat(command[1:], input_stream)
try:
return new_command.execute()
except Exception:
raise Exception
else:
if (command[0].text.find('=') > 0
and command[0].quot == 'NOT_QUOTING'):
input_stream.close()
try:
self.var_parser.parse(command)
except Exception:
print(command[1].txt + ' :command not found')
raise Exception
outstream = io.StringIO()
return outstream
else:
try:
command_name = Command.ShellProcess(command, input_stream)
return command_name.execute()
except Exception:
print("Command or arguments are wrong")
raise Exception
|
import argparse
import logging
import os
import sys
from string import Template
class Reshaper:
""" Reshapes race results with lap data into a Tableau-readable form """
def __init__(self, inFoldName, outFileName, raceName=""):
self.__logger = logging.getLogger(__name__)
self.__inFoldName = inFoldName
self.__outFileName = outFileName
self.__rawFiles = []
self.__NUM_EXPECTED_FILES = 7
self.__SCHEMA_FLAT = ["Category", "Place", "Bib", "Name", "Team name", "Age", "Year of birth", "Time"]
self.__SCHEMA_RESHAPE = ["Lap", "Lap time"]
self.__DEFAULT_VALUE = "-"
self.__IN_DELIM = "\t"
self.__OUT_DELIM = ","
self.__DEFAULT_VALUES = {}
for col in self.__SCHEMA_FLAT:
self.__DEFAULT_VALUES[col] = self.__DEFAULT_VALUE
self.__DEFAULT_VALUES["Category"] = "Cat 1/2 Men"
if raceName:
self.__DEFAULT_VALUES["Race"] = raceName
self.__SCHEMA_FLAT.insert(0, "Race")
def getFiles(self):
self.__logger.info("Searching for files in " + self.__inFoldName)
self.__logger.debug("Contents:")
files = []
for test in os.listdir(self.__inFoldName):
test = os.path.join(self.__inFoldName, test)
if not os.path.isfile(test):
self.__logger.debug(" " + test + " - Not a file")
continue
if test[-4:] != ".txt":
self.__logger.debug(" " + test + " - Skipped (looking for .txt files)")
continue
self.__logger.debug(" " + test)
files.append( test )
if len(files) != self.__NUM_EXPECTED_FILES:
self.__logger.warn("Expected " + str(self.__NUM_EXPECTED_FILES) + " race files, only " + str(len(files)) + " present.")
self.__rawFiles = files
def outputData(self):
with open(self.__outFileName, "w") as outF:
self.__writeHeader(outF)
self.__logger.debug("Processing:")
for file in self.__rawFiles:
self.__logger.debug(" " + file)
self.__processFile(file, outF)
self.__logger.info("All data written to " + self.__outFileName)
def __writeHeader(self, outF):
outF.write(self.__OUT_DELIM.join(self.__SCHEMA_FLAT))
outF.write(self.__OUT_DELIM)
outF.write(self.__OUT_DELIM.join(self.__SCHEMA_RESHAPE))
outF.write("\n")
def __isHeader(self, line):
return "Place" in line and "Bib" in line
def __isBlank(self, line):
return not line.strip()
def __getNumLaps(self, line):
return len(line.split(self.__IN_DELIM)) - IGNORED_COLS - (S_FLAT_E-S_FLAT_S)
def __isDNS(self, line):
return line.startswith("-")
def __isCategory(self, line):
return line.startswith("\t\t")
def __getCategory(self, line):
return line.strip()
def __getFlatRow(self, schema_map):
row = []
for col in self.__SCHEMA_FLAT:
row.append(schema_map[col])
return self.__OUT_DELIM.join(row)
def __writeReshapedData(self, outF, flatRow, schema_map):
lap = 1
col = Template("Lap $lap").substitute(lap=lap)
while col in schema_map:
outF.write(flatRow)
outF.write(self.__OUT_DELIM)
outF.write(self.__OUT_DELIM.join([str(lap), schema_map[col]]))
outF.write("\n")
lap += 1
col = Template("Lap $lap").substitute(lap=lap)
def __processFile(self, file, outF):
default_values = self.__DEFAULT_VALUES.copy()
schema = self.__SCHEMA_FLAT
num_laps = -1
with open(file, "r") as inF:
for line in inF:
if self.__isHeader(line):
schema = line.split(self.__IN_DELIM)
continue
if self.__isBlank(line):
continue
if self.__isDNS(line):
continue
if self.__isCategory(line):
default_values["Category"] = self.__getCategory(line)
continue
# else it's a racer with a place
vars = line.split(self.__IN_DELIM)
schema_map = default_values.copy()
for i in range(len(schema)):
if "Place" in schema[i]: # Weird case where this gets extra noise in the Cat 1/2 Men single file
schema[i] = "Place"
vars[i] = vars[i].replace("\"","").replace("\'","").replace(",","")
schema_map[schema[i].strip()] = vars[i]
flat_row = self.__getFlatRow(schema_map)
self.__writeReshapedData(outF, flat_row, schema_map)
if __name__ == "__main__":
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser(description="Process MFG race results into a single Tableau-readable\
file")
parser.add_argument('inFoldName', metavar='inputFolder', help='Path to the folder containing the result\
files for each start time, download from webscorer using "Download complete results"/"Tab-delimited\
TXT file"')
parser.add_argument('outFileName', metavar='outputFile', help="Path/name of the csv file to be created\
for feeding into Tableau")
parser.add_argument('-r', dest="raceName", help="Name of the race")
meg = parser.add_mutually_exclusive_group()
meg.add_argument("-v", action="store_true", help="Print verbose information about program process")
args = parser.parse_args()
if args.v:
logger.setLevel(logging.DEBUG)
reshaper = Reshaper(args.inFoldName, args.outFileName, args.raceName)
reshaper.getFiles()
reshaper.outputData()
|
# Write a program which takes 2 digits, X,Y as input and generates a 2-dimensional array.
# The element value in the i-th row and j-th column of the array should be i*j.
# Note: i=0,1.., X-1; j=0,1,¡Y-1.
# Example
# Suppose the following inputs are given to the program:
# 3,5
# Then, the output of the program should be:
# [[0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]
raw = input('Give me a number: ')
dimensions = [int(num) for num in raw.split(',')]
x = dimensions[0]
y = dimensions[1]
matrix = list()
for i in range(x):
dimension = list()
for j in range(y):
dimension.append(i*j)
matrix.append(dimension)
print(matrix)
|
from graph import *
import math
import random
def oval_with_angle(x, y, size, angle, color):
"""Function draw the oval with given angle , coordinates, size, color"""
brushColor(color)
penColor(color)
angle = math.radians(angle)
point_massive_for_oval = []
for j in range(361):
a = 2 * size * math.cos(j)
b = size * math.sin(j)
a, b = a * math.cos(angle) - b * math.sin(angle), -1 * a * math.sin(angle) + b * math.cos(angle)
k = (a + x, b + y)
point_massive_for_oval.append(k)
polygon(point_massive_for_oval)
brushColor('black')
def spots(x, y, angle, size):
"""Draw the spots on mushroom. Coordinates mus be the same as in the head function mushroom"""
a = [[0, 0.2], [-0.7, 0.6], [0.65, - 0.4], [-1.7, 0.2], [1.1, 0.2], [-0.65, -0.65]]
if angle == 0:
for j in range(6):
a[j][0], a[j][1] = x + a[j][0] * size, y + a[j][1] * size
oval_with_angle(a[j][0], a[j][1], random.randint(10, 19) / 100 * size, angle, 'white')
elif angle > 0:
for j in range(6):
a[j][0], a[j][1] = x + a[j][0] * size, y + a[j][1] * size
length = math.pow(pow(x - a[j][0], 2) + pow(y - a[j][1], 2), 0.5)
start_angle = math.asin((a[j][0] - x) / length)
delta_x = length * math.sin(start_angle + math.radians(angle))
delta_y = length * math.cos(start_angle + math.radians(angle))
oval_with_angle(x + delta_x, y + delta_y, random.randint(10, 19) / 100 * size, angle, 'white')
else:
for j in range(6):
a[j][0], a[j][1] = x + a[j][0] * size, y + a[j][1] * size
length = math.pow(pow(x - a[j][0], 2) + pow(y - a[j][1], 2), 0.5)
start_angle = math.asin((a[j][0] - x) / length)
delta_x = length * math.sin(start_angle - math.radians(angle))
delta_y = length * math.cos(start_angle + math.radians(angle))
oval_with_angle(x + delta_x, y + delta_y, random.randint(10, 19) / 100 * size, angle, 'white')
def mushroom(x, y, angle, size):
"""Draw the mushroom. Coordinates is the right and left corners of the rectangle, which is the border of oval"""
if angle >= 0:
oval_with_angle(x + 2 * size * math.sin(math.radians(angle)), y + 1.5 * size, size * 0.6, angle + 90, 'white')
else:
oval_with_angle(x + 2 * size * math.sin(math.radians(angle)), y + 1.5 * size, size * 0.6, angle + 90, 'white')
oval_with_angle(x, y, size, angle + math.pi / 2, 'red')
spots(x, y, angle, size)
def first_plan(max_width, width_mushrooms, height_max):
"""Draw the line of mushrooms on the first plane"""
for j in range(5):
x_coord = (max_width - width_mushrooms) / 5 * j + width_mushrooms
mushroom(x_coord, height_max, random.randint(-15, 15), random.randint(15, 50))
def spine(x, y, angle, size):
"""Draw the standalone spine in x, y coordinates with angle and size"""
penColor('black')
brushColor(46, 36, 36)
x1 = (x + size * math.sin(angle), y - size * math.cos(angle))
x2 = (x + 0.1 * size * math.cos(angle), y + math.sin(angle))
points = [(x, y), x1, x2]
polygon(points)
def spines_random(start_x, start_y, element_numbers, size, hedgehog_size):
"""Draw the field of spines on the back of hedgehog"""
for j in range(element_numbers):
x = random.randrange(int(start_x - hedgehog_size), int(start_x + hedgehog_size))
y = pow(math.fabs(1 - pow(x - start_x, 2) / pow(hedgehog_size, 2)), 0.5) * 0.5 * hedgehog_size
y = random.randrange(int(start_y - y - 1), int(start_y + y + 1))
angle = math.radians(randint(-30, 30))
spine(x, y, angle, size)
spine(x - size / element_numbers, y, angle, size)
def hedgehog(x, y, size):
"""Draw the hedgehog"""
brushColor(66, 48, 48)
oval(x - 1.1 * size, y + 0.38 * size, x - 0.7 * size, y + 0.22 * size)
oval(x + 1.1 * size, y + 0.38 * size, x + 0.7 * size, y + 0.22 * size)
oval(x - size, y - 0.5 * size, x + size, y + 0.5 * size)
oval(x - 0.9 * size, y + 0.48 * size, x - 0.5 * size, y + 0.32 * size)
oval(x + 0.9 * size, y + 0.48 * size, x + 0.5 * size, y + 0.32 * size)
spines_random(x, y, 90, 0.8 * size, size)
brushColor(66, 48, 48)
oval(x + 0.8 * size, y + 0.2 * size, x + 1.5 * size, y - 0.2 * size)
brushColor('black')
oval(x + 1.45 * size, y, x + 1.52 * size, y - 0.06 * size)
oval(x + 1.1 * size, y + 0.06 * size, x + 1.2 * size, y - 0.06 * size)
oval(x + 1.25 * size, y - 0.16 * size, x + 1.35 * size, y - 0.04 * size)
mushroom(x + 0.1 * size, y - 1.1 * size, -15, size * 0.2)
brushColor('red')
penColor(66, 48, 48)
penSize(5)
oval(x + 1.2 * size, y - 0.2 * size, x + 0.4 * size, y - size)
brushColor(163, 136, 69)
oval(x - 1.2 * size, y - 0.2 * size, x - 0.4 * size, y - size)
oval(x - 1.1 * size, y - 0.2 * size, x - 0.3 * size, y - size)
penSize(1)
spines_random(x, y, 80, 0.8 * size, size)
def light(x, y, light_width, light_height):
"""Draw the rectangle by coordinates with parameters width and height"""
brushColor('yellow')
rectangle(x, y, x + light_width, y + light_height)
width = 1600
height = 900
windowSize(width, height)
penColor(0, 0, 0)
brushColor(0, 150, 0)
rectangle(0, 0, width, height / 3 * 2)
penColor(0, 0, 0)
brushColor('grey')
rectangle(0, height / 3 * 2, width, height)
light(0, 0, width / 15, height / 3 * 2 + 10)
hedgehog(width * 0.3, height / 3 * 2, 60)
light(0 + width / 7, 0, width / 6, height - 15)
light(0.9 * width, 0, width * 0.09, 0.75 * height)
light(0.75 * width, 0, 0.1 * width, height * 0.85)
first_plan(width, width / 2, height * 0.93)
hedgehog(width / 3 * 2, height * 0.8, 150)
hedgehog(width * 0.01, height / 8 * 7, 60)
hedgehog(width * 0.95, height / 3 * 2, 90)
run()
|
import subprocess
import os
import sys
import re
sys.path.insert(0, os.path.join("tools", "families"))
import fam
import fam_data
import saved_metrics
import run_all_species
import generate_families_with_subsampling
from run_all_species import SpeciesRunFilter
import plot_speciesrax
import generate_families_with_filter as cov_filter
do_run = False
do_plot = True
launch_mode = "normald"
cores = 40
coverages = [1.0, 0.9, 0.7, 0.5, 0.0]
run_filter = SpeciesRunFilter()
run_filter.disable_all()
run_filter.duptree = True
run_filter.njrax = True
run_filter.cherry = True
run_filter.njst = True
run_filter.astralpro = True
run_filter.starting_gene_trees = ["raxml-ng"]
run_filter.speciesrax = True
run_filter.speciesraxprune = True
run_filter.speciesraxperfamily = True
run_filter.stag = True
run_filter.cleanup = True
experiments = []
dna_model = "GTR+G"
experiments.append(("ensembl_98_ncrna_primates", dna_model, coverages))
experiments.append(("ensembl_98_ncrna_lowprimates", dna_model, coverages))
experiments.append(("ensembl_98_ncrna_mammals", dna_model, coverages))
experiments.append(("ensembl_98_ncrna_vertebrates", dna_model, coverages))
experiments.append(("ensembl_98_ncrna_sauropsids", dna_model, coverages))
#experiments.append(("cyano_empirical", "LG+G"))
#experiments.append(("cyano_empirical", "LG+G+I"))
plot_metric_names = ["species_unrooted_rf"]
methods_tuples = []
methods_tuples.append(("speciesrax-dtl-raxml-ng-perfam-hybrid", "SpeciesRax", None))
methods_tuples.append(("speciesrax-prune", "SpeciesRaxPrune", None))
methods_tuples.append(("astralpro-raxml-ng", "Astral-Pro", None))
methods_tuples.append(("njrax-mininj-raxml-ng", "MiniNJ", None))
#methods_tuples.append(("njrax-cherry-raxml-ng", "CherryMerging", None))
#methods_tuples.append(("njrax-cherrypro-raxml-ng", "CherryMergingPro", None))
methods = []
methods_dict = {}
for t in methods_tuples:
methods.append(t[0])
style = None
if ("MCMC" in t[1]):
style = "dashed"
methods_dict[t[0]] = (t[1], style, t[2])
def get_param_coverage(key, dataset_name):
assert(key == "coverage")
print(dataset_name)
if (not "mincov" in dataset_name):
return str(0.0)
return dataset_name.split("mincov")[-1]
def plot_coverage(initial_dataset, subst_model, coverages, metric_names, methods, methods_dict, prefix):
for metric_name in metric_names:
output = prefix + "_" + initial_dataset + "_" + subst_model + ".svg"
grouped_datasets = {}
for coverage in coverages:
datasets = [cov_filter.get_output_dir(initial_dataset, coverage, min_sites = 0)]
grouped_datasets[datasets[0]] = datasets
plot_speciesrax.plot(grouped_datasets, "coverage", methods, methods_dict, subst_model, metric_name, output, get_param_coverage, title = initial_dataset)
def run_with_coverage(input_datadir, subst_model, coverage, min_sites = 0):
output_dir = cov_filter.get_output_dir(input_datadir, coverage, min_sites)
if (not os.path.isdir(output_dir)):
cov_filter.generate(input_datadir, coverage, min_sites)
run_filter.run_reference_methods(output_dir, subst_model, cores, launch_mode)
if (do_run):
for dataset in datasets:
datadir = fam.get_datadir(dataset[0])
subst_model = dataset[1]
coverages = dataset[2]
for coverage in coverages:
run_with_coverage(datadir, subst_model, coverage, min_sites = 0)
if (do_plot):
for experiment in experiments:
dataset, subst_model, coverages = experiment
prefix = "coverage"
plot_coverage(dataset, subst_model, coverages, plot_metric_names, methods, methods_dict, prefix)
|
from pymytools import (
timerun,
systools,
pyio,
basemap,
)
|
import cv2 as cv
import numpy as np
def nothing(x):
pass
img = np.zeros((300,512,3),np.uint8)
cv.namedWindow('image')
cv.createTrackbar("parameterA","image",0,20,nothing)
cv.createTrackbar("parameterB","image",0,20,nothing)
cv.createTrackbar("parameterC","image",0,20,nothing)
switch = "0:OFF\n1:ON"
cv.createTrackbar(switch,"image",0,1,nothing)
while(1):
cv.imshow("image",img)
k = cv.waitKey(1)
if k == ord('q'):
break
a = cv.getTrackbarPos("parameterA","image")
b = cv.getTrackbarPos("parameterB","image")
c = cv.getTrackbarPos("parameterC","image")
s = cv.getTrackbarPos(switch,"image")
if s == 0:
img[:]=0
else:
img[:]=[a*12,b*12,c*12]
cv.destroyAllWindows()
|
# -*- coding: utf-8 -*-
# flake8: noqa: E501
from __future__ import unicode_literals
from kinopoisk.person import Person
from .base import BaseTest
class PersonTest(BaseTest):
def test_person_manager_with_one_result(self):
persons = Person.objects.search('Гуальтиеро Якопетти')
self.assertEqual(len(persons), 1)
p = persons[0]
self.assertEqual(p.id, 351549)
self.assertEqual(p.name, 'Гуалтьеро Якопетти')
self.assertEqual(p.year_birth, 1919)
self.assertEqual(p.name_en, 'Gualtiero Jacopetti')
def test_person_manager_with_many_results(self):
persons = Person.objects.search('malkovich')
self.assertGreater(len(persons), 1)
p = persons[0]
self.assertEqual(p.id, 24508)
self.assertEqual(p.name, 'Джон Малкович')
self.assertEqual(p.year_birth, 1953)
self.assertEqual(p.name_en, 'John Malkovich')
p = persons[4]
self.assertEqual(p.name, 'Др. Марк Малкович III')
self.assertEqual(p.year_birth, 1930)
self.assertEqual(p.year_death, 2010)
def test_person_main_page_source(self):
p = Person(id=6245)
p.get_content('main_page')
self.assertEqual(p.id, 6245)
self.assertEqual(p.name, 'Джонни Депп')
self.assertEqual(p.year_birth, 1963)
self.assertEqual(p.name_en, 'Johnny Depp')
self.assertGreater(len(p.information), 50)
# career
self.assertGreaterEqual(len(p.career['actor']), 86)
self.assertGreaterEqual(len(p.career['producer']), 7)
self.assertGreaterEqual(len(p.career['director']), 3)
self.assertGreaterEqual(len(p.career['writer']), 1)
self.assertGreaterEqual(len(p.career['hrono_titr_male']), 11)
self.assertGreaterEqual(len(p.career['himself']), 124)
self.assertEqual(p.career['actor'][0].movie.title, 'Человек-невидимка')
self.assertEqual(p.career['actor'][0].movie.title_en, 'The Invisible Man')
self.assertEqual(p.career['actor'][0].name, 'Dr. Griffin')
self.assertEqual(p.career['actor'][1].movie.title, 'Ричард прощается')
self.assertEqual(p.career['actor'][1].movie.year, 2018)
self.assertEqual(p.career['actor'][1].movie.title_en, 'Richard Says Goodbye')
self.assertEqual(p.career['actor'][4].movie.title, 'Шерлок Гномс')
self.assertEqual(p.career['actor'][4].movie.title_en, 'Sherlock Gnomes')
self.assertEqual(p.career['actor'][4].movie.year, 2018)
self.assertEqual(p.career['actor'][4].name, 'Sherlock Gnomes') # voice
self.assertEqual(p.career['actor'][5].movie.title_en, 'Murder on the Orient Express')
self.assertAlmostEqual(p.career['actor'][5].movie.rating, 6.68)
self.assertGreaterEqual(p.career['actor'][5].movie.votes, 64162)
self.assertAlmostEqual(p.career['actor'][5].movie.imdb_rating, 6.6)
self.assertGreaterEqual(p.career['actor'][5].movie.imdb_votes, 70581)
self.assertEqual(p.career['actor'][6].name, 'Abel') # short
def test_person_photos_page_source(self):
p = Person(id=8217)
p.get_content('photos')
self.assertGreaterEqual(len(p.photos), 11)
def test_person_repr(self):
instance = Person(name='Чарльз Чаплин', name_en='Charles Chaplin', year_birth='1950')
self.assertEqual(instance.__repr__(), 'Чарльз Чаплин (Charles Chaplin), 1950')
|
# /usr/bin/env python
"""
kmer extraction: a script to crate clr kmer profles from 4mers
usage: kmer_extraction.py infile outfile ksize
"""
import os
import sys
import csv
from Bio import SeqIO
from Bio.Seq import Seq
import skbio.stats.composition
import vica
def _write_kmers_as_csv(infile, outfile, ksize, kmers, ):
"""Calculate centered log ratio transformed transformed kmer compositions for sequences in a fasta
file.
Takes a multi-sequence fasta file and a list of kmers and calculates the
centered log-ratio transformed kmer composition for each sequence,
writing a CSV file with the data.
Args:
infile (str): a Fasta file
outfile (str): a path to a CSV output file
ksize (int): the kmer size, 4-8
kmers (list): A list of the kmers to count
Returns:
None
"""
# The length id non-redundant kmer vectors for each k
try:
with open(infile, 'r') as f1:
with open(outfile, 'w', buffering=16777216) as csvfile:
mywriter = csv.writer(csvfile, lineterminator='\n')
header = ["id"]
header.extend(kmers)
recnum = 0
for record in SeqIO.parse(f1, 'fasta'):
rl = [record.id]
kmer_frequency = vica.khmer_features.get_composition(ksize=int(ksize),
seq=str(record.seq).upper(),
kmers=kmers)
kmer_z = skbio.stats.composition.multiplicative_replacement(kmer_frequency)
kmer_ilr = skbio.stats.composition.clr(kmer_z)
rl.extend(kmer_ilr)
mywriter.writerow(rl)
recnum += 1
print("Wrote {} kmer records to {}.".format(recnum, outfile))
except Exception as err:
print("Could not write kmer profiles to file")
raise err
def main(infile, outfile, ksize):
kmers = vica.khmer_features.itrate_kmer(k=ksize, rc=True)
_write_kmers_as_csv(infile=infile, outfile=outfile, ksize=ksize, kmers=kmers)
if __name__ == '__main__':
main(infile=sys.argv[1], outfile=sys.argv[2], ksize=sys.argv[3])
|
#%% [markdown]
# Load XML File:
#%%
import os
import re
from lxml import etree, objectify
#%% [markdown]
# This is a testing comment
#%%
#xml_file = open(os.getcwd()+'/combined_output.xml')
# print('TYPE: ' + str(type(xml_file)))
all_xml_data = None
xml_file_path = os.getcwd()+'/combined_output.xml'
with open(os.getcwd()+'/combined_output.xml', 'r') as xml_file:
all_xml_data = xml_file.read()
print('String Length: %s' % len(all_xml_data))
#print('String Length:', len(all_xml_data))
#print('String Length:' + len(all_xml_data)) # Does not work
#%%
#tags = []
# for line in all_xml_data:
# #tags.append(re.search('<(.*)>', line))
# tags = re.split('<|>',line)
# #tags.append(line.find)
#Works somewhat
tags = re.split('<|>',all_xml_data)
print('Length: %i' % len(tags))
print('First Tag Length: %i' % len(tags[0]))
#print('Tags: %s' % tags)
#New Try
actual_tag = []
potential_tag =None
leading_tag = re.split('<',all_xml_data)
delimiter = '>'
potential_tag = [tag for tag in leading_tag if delimiter in tag]
#actual_tag = re.split(' |=|>', potential_tag)
#tag_list = tag
print('Leading Tags Length: %i' % len(leading_tag))
print('First Leading Tag: %s' % (leading_tag[0]))
print('2nd Leading Tag: %s' % (leading_tag[1]))
print('List of Potential Tags Length: %i' % len(potential_tag))
print('List of Actual Tags Length: %i' % len(actual_tag))
#print("\n".join(potential_tag))
#print("\n".join(leading_tag))
#print('List of Actual Tags: %s' % (actual_tag))
#print(tags)
#%% [markdown]
# Testing
parser = etree.XMLParser(remove_comments=True, recover=True)
#parser = objectify.makeparser(remove_comments=True)
tree = objectify.parse(xml_file_path, parser=parser)
#print(tree)
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
ana/pmt/ncsgconverter.py
===========================
Translation of detdesc XML defined anaytic PMT into NCSG input serialization.
This probably predated GDML parsing, and it not much more than a curiosity.
Although this may allow direct comparison of the same PMT geometry
using different generations of analytic approaches.
::
2645 tboolean-dd(){ TESTCONFIG=$(tboolean-dd- 2>/dev/null) tboolean-- $* ; }
2646 tboolean-dd-()
2647 {
2648 python $(tboolean-dir)/tboolean_dd.py \
2649 --csgpath $TMP/$FUNCNAME \
2650 --container $(tboolean-container) \
2651 --testobject $(tboolean-testobject)
2652
2653 # got too long for here-string so broke out into script
2654 }
2655 tboolean-dd-check(){ tboolean-dd- 2> /dev/null ; }
2656 tboolean-dd-edit(){ vi $(tboolean-dir)/tboolean_dd.py ; }
2657 tboolean-dd-scan(){ SCAN="0,0,127.9,0,0,1,0,0.1,0.01" NCSGScanTest $TMP/tboolean-dd-/1 ; }
2658
::
delta:pmt blyth$ opticks-find NCSGConverter
./ana/pmt/ncsgconverter.py:class NCSGConverter(object):
./ana/pmt/ncsgconverter.py: cn = NCSGConverter.ConvertLV( tr.root.lv )
./tests/tboolean_dd.py:from opticks.ana.pmt.ncsgconverter import NCSGConverter
./tests/tboolean_dd.py: obj = NCSGConverter.ConvertLV( lv )
delta:opticks blyth$
"""
import os, logging, sys, math, numpy as np
log = logging.getLogger(__name__)
from opticks.ana.base import opticks_main
from opticks.analytic.csg import CSG
from ddbase import Dddb, Sphere, Tubs, Intersection, Union, Subtraction
from opticks.analytic.treebase import Tree
class NCSGConverter(object):
"""
Translate single volume detdesc primitives and CSG operations
into an NCSG style node tree
"""
@classmethod
def ConvertLV(cls, lv ):
"""
:param lv: Elem
:return cn: CSG node instance
"""
lvgeom = lv.geometry()
assert len(lvgeom) == 1, "expecting single CSG operator or primitive Elem within LV"
cn = cls.convert(lvgeom[0])
if lv.posXYZ is not None:
assert cn.transform is None, cn.transform
translate = "%s,%s,%s" % (lv.xyz[0], lv.xyz[1], lv.xyz[2])
cn.translate = translate
log.info("TranslateLV posXYZ:%r -> translate %s " % (lv.posXYZ, translate) )
pass
return cn
@classmethod
def convert(cls, node):
"""
:param node: instance of ddbase.Elem subclass
:return cn: CSG node
"""
assert node.is_operator ^ node.is_primitive, "node must either be operator or primitive "
cn = cls.convert_primitive(node) if node.is_primitive else cls.convert_operator(node)
return cn
@classmethod
def convert_Sphere(cls, en, only_inner=False):
"""
:param en: source element node
:param use_slab: alternative approach using intersect with a slab rather than the nascent zsphere
:param only_inner: used to control/distinguish internal recursive call handling the inner sphere
Prior to implementing zsphere with caps, tried using infinite slab
in boolean intersection with the sphere to effect the zslice.
But this approach unavoidably yields a cap, as switching off the
slab caps causes the intersection with the slab to yield nothing.
Hence proceeded to implement zsphere with cap handling.
* z-slice sphere primitive OR intersect with a slab ?
* r-range sphere primitive OR difference two spheres ?
* doing z and r both at once is problematic for param layout
"""
outerRadius = en.outerRadius.value
innerRadius = en.innerRadius.value
x = en.xyz[0]
y = en.xyz[1]
z = en.xyz[2]
has_inner = not only_inner and innerRadius is not None
if has_inner:
inner = cls.convert_Sphere(en, only_inner=True) # recursive call to make inner sphere
pass
radius = innerRadius if only_inner else outerRadius
assert radius, (radius, innerRadius, outerRadius, only_inner)
startThetaAngle = en.startThetaAngle.value
deltaThetaAngle = en.deltaThetaAngle.value
log.info("convert_Sphere outerRadius:%s innerRadius:%s radius:%s only_inner:%s has_inner:%s " % (outerRadius,innerRadius,radius, only_inner, has_inner))
zslice = startThetaAngle is not None or deltaThetaAngle is not None
if zslice:
if startThetaAngle is None:
startThetaAngle = 0.
if deltaThetaAngle is None:
deltaThetaAngle = 180.
# z to the right, theta 0 -> z=r, theta 180 -> z=-r
rTheta = startThetaAngle
lTheta = startThetaAngle + deltaThetaAngle
assert rTheta >= 0. and rTheta <= 180.
assert lTheta >= 0. and lTheta <= 180.
zmin = radius*math.cos(lTheta*math.pi/180.)
zmax = radius*math.cos(rTheta*math.pi/180.)
assert zmax > zmin, (startThetaAngle, deltaThetaAngle, rTheta, lTheta, zmin, zmax )
log.info("convert_Sphere rTheta:%5.2f lTheta:%5.2f zmin:%5.2f zmax:%5.2f azmin:%5.2f azmax:%5.2f " % (rTheta, lTheta, zmin, zmax, z+zmin, z+zmax ))
cn = CSG("zsphere", name=en.name, param=[x,y,z,radius], param1=[zmin,zmax,0,0], param2=[0,0,0,0] )
ZSPHERE_QCAP = 0x1 << 1 # zmax
ZSPHERE_PCAP = 0x1 << 0 # zmin
flags = ZSPHERE_QCAP | ZSPHERE_PCAP
cn.param2.view(np.uint32)[0] = flags
pass
else:
cn = CSG("sphere", name=en.name)
cn.param[0] = x
cn.param[1] = y
cn.param[2] = z
cn.param[3] = radius
pass
if has_inner:
ret = CSG("difference", left=cn, right=inner )
#ret = inner
#ret = cn
else:
ret = cn
pass
return ret
@classmethod
def convert_Tubs(cls, en):
cn = CSG("cylinder", name=en.name)
cn.param[0] = en.xyz[0]
cn.param[1] = en.xyz[1]
cn.param[2] = 0 ## hmm this z is ignored in NCylinder
cn.param[3] = en.outerRadius.value
zoff = en.xyz[2]
hz = en.sizeZ.value/2.
cn.param1[0] = -hz + zoff
cn.param1[1] = hz + zoff # direct offsetting or use transform ?
PCAP = 0x1 << 0
QCAP = 0x1 << 1
flags = PCAP | QCAP
cn.param1.view(np.uint32)[1] = flags
return cn
@classmethod
def convert_primitive(cls, en):
convert_method_name = "convert_%s" % en.__class__.__name__
convert_method = getattr(cls, convert_method_name, None )
assert convert_method, "missing convert method: %s " % convert_method_name
#log.info("convert_primitive with %s " % convert_method_name )
cn = convert_method(en)
cn.elem = en # <-- temporary during dev, not used downstream
return cn
@classmethod
def convert_operator(cls, en):
"""
Source Elem xml tree CSG operator nodes with three children
have to be divided up to fit into binary CSG tree::
1
/ \
10 11
/ \
100 101
"""
op = en.__class__.__name__.lower()
assert op in ["intersection", "union", "difference"]
children = en.geometry()
nchild = len(children)
if nchild == 2:
cn = CSG(op, name=en.name)
cn.left = cls.convert(children[0])
cn.right = cls.convert(children[1])
elif nchild == 3:
cn = CSG(op, name=en.name)
ln = CSG(op, name=en.name + "_split3")
ln.left = cls.convert(children[0])
ln.right = cls.convert(children[1])
cn.left = ln
cn.right = cls.convert(children[2])
else:
assert 0, "CSG operator nodes must have 2 or 3 children"
pass
return cn
if __name__ == '__main__':
args = opticks_main(apmtidx=2)
g = Dddb.parse(args.apmtddpath)
lvn = "lvPmtHemi"
#lvn = "lvPmtHemiwPmtHolder"
lv = g.logvol_(lvn)
tr = Tree(lv)
cn = NCSGConverter.ConvertLV( tr.root.lv )
cn.dump()
|
"""Analog exceptions."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
class AnalogError(RuntimeError):
"""Exception base class for all Analog errors."""
class MissingFormatError(AnalogError):
"""Error raised when ``Analyzer`` is called without format."""
class InvalidFormatExpressionError(AnalogError):
"""Error raised for invalid format regex patterns."""
class UnknownRendererError(AnalogError):
"""Error raised for unknown output format names (to select renderer)."""
|
# -*- encoding: utf-8 -*-
from debauto.remessa import Remessa
from debauto.utils import formata_data, formata_valor
class Caixa(Remessa):
"""
Caixa
"""
__a = "A{:1}{:20}{:20}{:3}{:20}{:8}{:6}{:2}{:17}{:45}{:0>7}\r\n"
__e = "E{:0>25}{:0<4}{:14}{:8}{:0<15}{:2}{:60}{:6}{:8}{:0>6}{:1}\r\n"
__z = "Z{:0>6}{:0>17}{:119}{:0>6}{:1}"
def __init__(self, *args, **kwargs):
super(Caixa, self).__init__(*args, **kwargs)
self.__cod_remessa = 1
self.__banco = "CAIXA"
self.__codigo = "104"
self.__versao = '04'
self.__identificacao = "DEB AUTOMAT"
@property
def banco(self):
return "%s" % self.__banco
def get_header(self):
""" retorna o header do arquivo """
cfg = self.configuracao
return self.__a.format(
self.__cod_remessa, # 1 - Código da remessa
cfg.convenio, # 20 - Código do convênio
cfg.empresa, # 20 - Nome da empresa
self.__codigo, # 3 - Código do banco
self.__banco, # 20 - Nome do banco
formata_data(cfg.vencimento), # 8 - Data do movimento
cfg.sequencial, # 6 - Número sequencial
self.__versao, # 2 - Versão do layout
self.__identificacao, # 17 - Identificação do serviço
'', '0'
)
def get_debitos(self):
""" retorna as linhas e do arquivo """
linhas = []
for n, x in enumerate(self.debitos, 1):
linhas.append(self.__e.format(
x.identificacao,
x.agencia,
x.conta,
formata_data(x.vencimento),
formata_valor(x.valor),
x.moeda,
x.livre,
"",
"",
n,
x.tipo
))
return linhas
def get_trailler(self):
""" retorna o trailler do arquivo """
return self.__z.format(
self.quantidade() + 2,
formata_valor(self.valor_total()),
'',
self.quantidade() + 1,
''
)
def gerar_txt(self, path):
cfg = self.configuracao
nome = "%s_%s_%s.txt" % (self.banco, formata_data(cfg.vencimento), cfg.sequencial)
with open('%s%s' % (path, nome), 'w+') as f:
f.write(self.get_header())
for _ in self.get_debitos():
f.write(_)
f.write(self.get_trailler())
def __repr__(self):
""" representação do objeto """
return "<Remessa: %s>" % self.banco
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
from mpl_toolkits.mplot3d import Axes3D
def target(x, y):
z = np.exp(-(x-2)**2)+np.exp(-(x-6)**2/5)+1/(x**2+1)+0.1*np.sin(5*x)-0.5
z += np.exp(-(y-2)**2)+np.exp(-(y-6)**2/5)+1/(y**2+1)+0.1*np.sin(5*y)-0.5
return z
def gp_fit(X, Y, x,y):
gp = GaussianProcessRegressor(kernel=Matern(nu=2.5),n_restarts_optimizer=25)
gp.fit(X, Y)
mu, sigma = gp.predict(x, return_std=True)
print mu.shape
'''
fig = plt.figure(figsize=(16,10))
#gs = gridspec.GridSpec(2,1,height_ratios=[3,1])
#axis = plt.subplot(gs[0])
#acq = plt.subplot(gs[1])
plt.plot(x, y, linewidth=3, label='Target')
plt.plot(X.flatten(), Y, 'D', markersize=8, color='r', label='Observation')
plt.plot(x, mu, '--', color='k', label='Prediction')
plt.plot(x, np.zeros(x.shape[0]), linewidth=3, color='r', label='Prediction')
#axis.fill(np.concatenate([x, x[::-1]]), np.concatenate([mu-1.96*sigma, (mu+1.96*sigma)[::-1]]), alpha=0.6, fc='c', ec='None')
plt.show()
'''
def main():
X = ((np.random.random((100, 2))-0.5)*15)+2.5
Y = target(X[:,0], X[:,1])
print X.shape
print Y.shape
gp = GaussianProcessRegressor(kernel=Matern(nu=2.5),n_restarts_optimizer=25)
gp.fit(X, Y)
a = np.arange(-5, 10, 0.2)
b = np.arange(-5, 10, 0.2)
x = []
for i in a:
for j in b:
x.append([i,j])
x= np.array(x)
print x.shape
mu, sigma = gp.predict(x, return_std=True)
mu = mu.reshape((a.shape[0], b.shape[0]))
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(X[:,0], X[:,1], Y,s=40, c='r')
x = np.arange(-5, 10,0.2)
y = np.arange(-5, 10,0.2)
x,y = np.meshgrid(x,y)
z = target(x,y)
print z.shape
ax.plot_surface(x,y,mu,rstride=1, cstride=1, cmap='rainbow', alpha=0.5)
plt.show()
if __name__=="__main__":
main()
|
class Author:
def __init__(self,book):
# self.name = name
self.books = book
# self.publishedTime= publishedTime
def info(self):
print("Writer name :" ,self.name.title())
print("Published Year :" ,self.publishedTime.title())
# def booklist(self):
# book = ['b1','b2','b3']
# for b in ['b1','b2','b3']:
# self.books.append(book)
# def addBook(self,book):
# self.books.append(book)
# def print(self):
# print(self.books)
def show(self):
print(len(self.books))
self.books.pop()
print(len(self.books))
# for book in self.books:
# print("Book name : " ,book)
bookG = ['Mastery','Laws of power','art of Seduction']
greene = Author(bookG)
greene.name = 'Robert Greene'
greene.publishedTime ='1998'
greene.info()
greene.show()
# for b in book:
# greene.addBook(b)
print("\n")
bookM = ['Outliers','Blink','Tipping point']
malcolm = Author(bookM)
malcolm.name = "Malcolm Gladwell"
malcolm.publishedTime = '2010'
malcolm.info()
malcolm.show()
|
import os
import random
import Tkinter
import Tkconstants
import tkFileDialog
import tkMessageBox
import DES
import RSA
def gen_primes(n):
return filter(lambda x: all(map(lambda p: x % p != 0, range(2, x))), range(2, n))
def num_to_binstr(num):
num = bin(num)[2:]
return '0' * (max(0, 128 - len(num))) + num
def binstr_to_num(binstr):
return int('0b' + binstr, 2)
class UI(object):
def __init__(self):
self.ui_init()
self.logic_init()
def ui_init(self):
self.root = Tkinter.Tk()
self.root.title('DES+RSA Messaging System [by Linghao Zhang]')
# Row 0
Tkinter.Button(self.root, text='Generate RSA Keys', command=self.gen_rsa_key).grid(row=0, column=0, columnspan=8)
Tkinter.Label(self.root, text='Public Key').grid(row=1, column=0, columnspan=4)
Tkinter.Label(self.root, text='Private Key').grid(row=1, column=4, columnspan=4)
self.rsa_pubkey_text = Tkinter.Text(self.root, width=45, height=3)
self.rsa_pubkey_text.grid(row=2, column=0, columnspan=4)
self.rsa_prikey_text = Tkinter.Text(self.root, width=45, height=3)
self.rsa_prikey_text.grid(row=2, column=4, columnspan=4)
# Row 1
# Plain Text
Tkinter.Label(self.root, text='Plain Text').grid(row=3, column=0)
Tkinter.Button(self.root, text='Load', command=self.load_pt).grid(row=3, column=1)
self.pt_text = Tkinter.Text(self.root, width=30, height=10)
self.pt_text.grid(row=4, column=0, columnspan=2)
# Cipher Text
Tkinter.Label(self.root, text='Cipher Text').grid(row=3, column=3, columnspan=2)
self.ct_text = Tkinter.Text(self.root, width=30, height=10)
self.ct_text.grid(row=4, column=3, columnspan=2)
# Decrypted Text
Tkinter.Label(self.root, text='Decrypted Text').grid(row=3, column=6)
Tkinter.Button(self.root, text='Save', command=self.save_dt).grid(row=3, column=7)
self.dt_text = Tkinter.Text(self.root, width=30, height=10)
self.dt_text.grid(row=4, column=6, columnspan=2)
# Row 2
# DES Key
Tkinter.Label(self.root, text='DES Key').grid(row=6, column=0, columnspan=2)
self.des_key_text = Tkinter.Text(self.root, width=30, height=10)
self.des_key_text.grid(row=7, column=0, columnspan=2)
# Encrypted DES Key
Tkinter.Label(self.root, text='Encrypted DES Key').grid(row=6, column=3, columnspan=2)
self.enc_des_key_text = Tkinter.Text(self.root, width=30, height=10)
self.enc_des_key_text.grid(row=7, column=3, columnspan=2)
# Decrypted DES Key
Tkinter.Label(self.root, text='Decrypted DES Key').grid(row=6, column=6, columnspan=2)
self.dec_des_key_text = Tkinter.Text(self.root, width=30, height=10)
self.dec_des_key_text.grid(row=7, column=6, columnspan=2)
# Row 3
# Controls
Tkinter.Button(self.root, text='Generate DES Key', command=self.gen_des_key).grid(row=9, column=0, columnspan=2)
Tkinter.Button(self.root, text='Send Message', command=self.send_msg).grid(row=9, column=3, columnspan=2)
Tkinter.Button(self.root, text='Decrypt Message', command=self.decrypt_msg).grid(row=9, column=6, columnspan=2)
# Row 4
# Instructions
instructions = """
Instructions:
1. Press 'Generate RSA Keys'
2. Press 'Generate DES Key'
3. Load plain text file (MUST BE BINARY STRING)
4. Press 'Send'
5. Press 'Decrypt Message'
6. You can save the decrypted message to file
"""
Tkinter.Label(self.root, text=instructions).grid(row=10, column=0, columnspan=8)
def logic_init(self):
self.open_opt = {}
self.open_opt['defaultextension'] = '.txt'
self.open_opt['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
self.open_opt['initialdir'] = os.getcwd()
self.open_opt['parent'] = self.root
self.save_opt = self.open_opt.copy()
self.save_opt.update({'initialfile': 'message.txt'})
def read_from_file(self):
file = tkFileDialog.askopenfile(mode='r', **self.open_opt)
try:
lines = file.readlines()
except:
return None
return lines[0].strip()
def write_to_file(self, data):
file = tkFileDialog.asksaveasfile(mode='w', **self.save_opt)
try:
file.write(data + '\n')
except:
return None
def update_display(self, target, text):
target.delete(1.0, Tkinter.END)
target.insert(1.0, text)
def error(self, text):
tkMessageBox.showwarning('Error', text)
def load_pt(self):
pt = self.read_from_file()
if not pt:
return
try:
assert all(map(lambda ch: ch in ['0', '1'], pt))
except AssertionError:
self.error('Input must be a binary string.')
return
self.pt = pt
self.update_display(self.pt_text, self.pt)
def save_dt(self):
try:
assert self.dt
except:
self.error('Nothing to save.')
return
self.write_to_file(self.dt)
def init_rsa(self):
self.rsa = RSA.RSA()
return True
def gen_rsa_key(self):
if not self.init_rsa():
self.error('RSA init failed.')
return
self.modulus, self.e = self.rsa.get_public_key()
self.modulus, self.d = self.rsa.get_private_key()
pubkey_str = 'e = ' + str(int('0b'+self.e, 2)) + '\n' + 'n = ' + str(int('0b'+self.modulus, 2))
prikey_str = 'd = ' + str(int('0b'+self.d, 2)) + '\n' + 'n = ' + str(int('0b'+self.modulus, 2))
self.update_display(self.rsa_pubkey_text, pubkey_str)
self.update_display(self.rsa_prikey_text, prikey_str)
def init_des(self, key):
self.des = DES.DES(key=key)
return True
def gen_des_key(self):
self.des_key = num_to_binstr(random.getrandbits(64))
self.init_des(self.des_key)
self.update_display(self.des_key_text, self.des_key)
def send_msg(self):
try:
assert self.pt
except:
self.error('Plain Text not loaded.')
return
try:
assert self.rsa_pubkey_text
except:
self.error('RSA Public Key not found.')
return
self.enc_des_key = self.rsa.encrypt(self.des_key, self.rsa.get_public_key())
self.update_display(self.enc_des_key_text, self.enc_des_key)
self.ct = self.des.encrypt(self.pt)
self.update_display(self.ct_text, self.ct)
def decrypt_msg(self):
try:
assert self.ct
except:
self.error('Cipher Text not found.')
return
try:
assert self.enc_des_key
except:
self.error('Encrypted DES Key not found.')
return
self.dec_des_key = self.rsa.decrypt(self.enc_des_key, self.rsa.get_private_key())
self.update_display(self.dec_des_key_text, self.dec_des_key)
self.dt = self.des.decrypt(self.ct)
self.update_display(self.dt_text, self.dt)
if __name__ == '__main__':
ui = UI()
ui.root.mainloop()
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A keytar webdriver test."""
import json
import logging
import signal
import subprocess
import time
import os
from selenium import webdriver
import unittest
import urllib2
import environment
class TestKeytarWeb(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = environment.create_webdriver()
port = environment.reserve_ports(1)
keytar_folder = os.path.join(environment.vttop, 'test/cluster/keytar')
cls.flask_process = subprocess.Popen(
[os.path.join(keytar_folder, 'keytar.py'),
'--config_file=%s' % os.path.join(keytar_folder, 'test_config.yaml'),
'--port=%d' % port, '--password=foo'],
preexec_fn=os.setsid)
cls.flask_addr = 'http://localhost:%d' % port
@classmethod
def tearDownClass(cls):
os.killpg(cls.flask_process.pid, signal.SIGTERM)
cls.driver.quit()
def _wait_for_complete_status(self, timeout_s=180):
start_time = time.time()
while time.time() - start_time < timeout_s:
if 'Complete' in self.driver.find_element_by_id('results').text:
return
self.driver.refresh()
time.sleep(5)
self.fail('Timed out waiting for test to finish.')
def test_keytar_web(self):
self.driver.get(self.flask_addr)
req = urllib2.Request('%s/test_request?password=foo' % self.flask_addr)
req.add_header('Content-Type', 'application/json')
urllib2.urlopen(
req, json.dumps({'repository': {'repo_name': 'test/image'}}))
self._wait_for_complete_status()
logging.info('Dummy test complete.')
self.driver.find_element_by_partial_link_text('PASSED').click()
self.assertIn('Dummy output.',
self.driver.find_element_by_tag_name('body').text)
if __name__ == '__main__':
unittest.main()
|
import cvrp.const as const
import cvrp.learnHeuristic as LH
import os.path
startFolder = 'toExecute2'
arrivalFolder = 'resultats'
dFile = 'GoldenEye'
allinstances = os.listdir(startFolder)
allinstances.sort()
for fileInstance in allinstances:
instance,demand,capacity = const.define(fileInstance,startFolder,arrivalFolder,dFile)
LH.learning_heuristic()
|
"""
A vendor at a food court is in the process of automating his order management system.
The vendor serves the following menu – Veg Roll, Noodles, Fried Rice and Soup and also maintains the quantity available for each item. The customer can order any combination of items. The customer is provided the item if the requested quantity of item is available with the vendor.
Write a python program which implements the following functions.
place_order(*item_tuple): This function accepts the order placed by the customer. Consider it to be a variable length argument as each customer may have a different order.
The function should check whether the items requested are present in the vendor’s menu and if so, it should check whether the requested quantity is available for each by invoking the check_quantity_available() method.
The function should display appropriate messages for each item in the order for the below scenarios:
When the requested item is not available in vendor’s menu, display <Item Name> is not available
When the quantity requested by the customer is not available, display <Item Name> stock is over
When the requested quantity of the item is available with the vendor, display <Item Name> is available
check_quantity_available(index,quantity_requested): This function should check whether the requested quantity of the specified item is available. If so, it should reduce the quantity requested from the quantity available for that item and return True. Otherwise, it should return False.
Test your code by using the given sample inputs.
Verify your code by using the 2nd sample input(highlighted) given below:
+------------------------------------------------------------+------------------------+
| Sample Input | Expected Output |
+------------------------------------------------------------+------------------------+
| Menu and quantity available | Items Ordered | |
+----------------------------------------+-------------------+------------------------+
| (Veg Roll, Noodles, Fried Rice , Soup) | Veg Roll,2 | Veg Roll is available |
| [2,200,250,3] | Noodles,2 | Noodles is available |
+------------------------------------------------------------+------------------------+
| (Veg Roll, Noodles, Fried Rice , Soup) | Fried Rice,2 | |
| [2,200,3,0] Fried Rice,2 | Soup,1 | |
+----------------------------------------+-------------------+------------------------+
"""
#PF-Assgn-39
#This verification is based on string match.
#Global variables
menu=('Veg Roll','Noodles','Fried Rice','Soup')
quantity_available=[2,200,3,0]
'''This method accepts the item followed by the quantity required by a customer in the format item1, quantity_required, item2, quantity_required etc.'''
def place_order(*item_tuple):
item=[]
quant=[]
for i in range(0,len(item_tuple)):
if i%2==0:
item.append(item_tuple[i])
else:
quant.append(item_tuple[i])
for i,j in enumerate(item):
if j not in menu:
print(j + " is not available")
else:
i1=menu.index(j)
if check_quantity_available(i1,quant[i]):
print (j + " is available")
else:
print(j + " stock is over")
'''This method accepts the index position of the item requested by the customer in the quantity_available list, and the requested quantity of the item.'''
def check_quantity_available(index,quantity_requested):
if quantity_available[index]>=quantity_requested:
return True
else:
return False
#Provide different values for items ordered and test your program
#place_order("Veg Roll",2,"Noodles",2)
place_order("Fried Rice",2,"Soup",1)
|
def scoreboard(who_ate_what):
scores = {'chickenwings': 5, 'hamburgers': 3, 'hotdogs': 2}
return sorted((
{'name': a.pop('name'),
'score': sum(scores.get(k, 0) * v for k, v in a.iteritems())}
for a in who_ate_what), key=lambda b: (-b['score'], b['name']))
|
import sys
import torch
import utils
import dataloader
# generate submissions.csv file
def inference():
device = "cuda:0" if torch.cuda.is_available() else "cpu"
save_file = input("save model name : ")
try:
if torch.cuda.is_available():
model = torch.load(save_file, map_location={"cpu": "cuda:0"})
else:
model = torch.load(save_file, map_location={"cuda:0": "cpu"})
print("Success loading model")
except IOError:
print("Couldn't find model")
sys.exit(0)
print("best epoch was {}".format(model.info_dict['epoch']))
# 1783 : length of test data set
test_data_loader = dataloader.DataLoader(1783, test=True)
model.eval()
with torch.no_grad():
X, _ = test_data_loader.get_batch()
X = X.to(device)
output = model(X)
utils.generate_csv(output)
if __name__ == "__main__":
inference()
|
import pytest
from unittest import mock
import builtins
import numpy
def inner_numpy():
a=input('')
b=input('')
A = numpy.array(list(map(int, a.split())))
B = numpy.array(list(map(int, b.split())))
return numpy.inner(A, B)
def outer_numpy():
a=input('')
b = input('')
A = numpy.array(list(map(int, a.split())))
B = numpy.array(list(map(int, b.split())))
return numpy.outer(A, B)
def test_inner():
with mock.patch.object(builtins, 'input', lambda _: "0 1", "2 3"):
assert inner_numpy() == 1
def test_outer():
with mock.patch.object(builtins, 'input', lambda _: "4 1", "2 3"):
assert inner_numpy() == 17
|
from flask import Flask
from app.settings import DEBUG
app = Flask(__name__)
app.debug = DEBUG
from . import urls
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.