text stringlengths 8 6.05M |
|---|
"""
l=0
for word in open('Book2.txt').read().split():
longg = ''
if len(word)>len(longg):
longg = word
# for w in open('book2.txt').read().split():
# if
print(longg)
"""
def Words():
file1 = open('Book1.txt', 'r')
file2 =open('Book2.txt','r')
file3 = open('Book3.txt', 'r')
longg1 = ''
for line in file1:
for word in line.split():
if len(word) > len(longg1):
longg1 = word
# print(longg1)
for l in file2:
for w in l.split():
if len(w)> len(longg1):
longg1=w
# print(longg1)
for li in file3:
for wo in l.split():
if len(wo) > len(longg1):
longg1 = wo
print (longg1)
Words()
|
class TreeNode:
def __init__(self, x=0):
self.val = x
self.left = None
self.right = None
def depth(root):
if root == None:
return 0
else:
return max(depth(root.left), depth(root.right))+1
def isBalanced(root):
if root == None:
return True
n1=depth(root.left)
n2=depth(root.right)
if ((n1-n2) in range(-1,2)) and isBalanced(root.left) and isBalanced(root.right):
return True
else:
return False
t=TreeNode(1)
t.left=TreeNode(2)
t.right=TreeNode(3)
t.right.left=TreeNode()
t.right.right=TreeNode()
t.right.right.right=TreeNode()
t.left.left=TreeNode()
print isBalanced(t)
|
#!/usr/bin/env python3
import sys
def fizzbuzz(length):
output = []
for iterator in range(0, length):
value = iterator
if iterator % 3 == 0 and iterator % 5 == 0:
value = 'FizzBuzz'
elif iterator % 3 == 0:
value = 'Fizz'
elif iterator % 5 == 0:
value = 'Buzz'
output.append(value)
return output
def main(argv):
print('Creating FizzBuzz list of %s elements...' % argv[0])
print(fizzbuzz(int(argv[0])))
if __name__ == "__main__":
main(sys.argv[1:])
|
# Copyright 2014 Symantec.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
import json
import uuid
from nova.openstack.common import log as logging
from oslo.config import cfg
LOG = logging.getLogger(__name__)
designate_restopts = [
cfg.StrOpt('contrail_endpoint',
default='',
help='Contrail API service endpoint url'),
]
CONF = cfg.CONF
CONF.register_opts(designate_restopts)
class ContrailRest():
def __init__(self, tenant_id, tenant_token=None):
self.tenant_id = str(uuid.UUID(tenant_id))
self.token = tenant_token
def _get_project_nw_info_by_tenant(self):
try:
req_headers = {"Content-Type": "application/json"}
if self.token:
req_headers["X-Auth-Token"] = str(self.token)
contrail_ep = CONF.contrail_endpoint
if contrail_ep:
uri = contrail_ep + '/project/' + self.tenant_id
LOG.debug("Contrail RESTAPI URI - %s" % uri)
else:
LOG.error("Contrail RESTAPI please provide a valid "
"contrail API endpoint")
return None
h = httplib2.Http()
resp, content = h.request(uri, "GET", headers=req_headers)
LOG.debug("Contrail RESTAPI Resp - %s" % str(resp))
LOG.debug("Contrail RESTAPI Content - %s" % str(content))
if content:
content = json.loads(content)
if 'status' in resp and resp['status'] in ['200']:
return content["project"]
else:
LOG.error("Contrail RESTAPI generating project nw info from "
"Contrail failed")
except Exception, e:
error_dict = {'error': str(e), 'tenantID': str(self.tenant_id)}
LOG.error("Contrail RESTAPI Error generating project nw info from "
"Contrail for tenant:%(tenantID)s . Error: %(error)s" % error_dict)
return None
def _generate_vDNS_fqname(self):
project_nw_info = self._get_project_nw_info_by_tenant()
if project_nw_info:
try:
project_vDNS = str(project_nw_info['fq_name'][1]) + "-virtual-DNS"
fq_name = [project_nw_info['fq_name'][0], project_vDNS]
return fq_name
except Exception, e:
error_dict = {'error': str(e), 'tenantID': str(self.tenant_id)}
LOG.error("Contrail RESTAPI Error generating vDNS fq name for tenant: "
"%(tenantID)s . Error: %(error)s" % error_dict)
else:
LOG.error("Contrail RESTAPI Project network info is required for generating"
"fq name.")
return None
def _generate_vDNS_id_by_fqname(self):
vDNS_fqname = self._generate_vDNS_fqname()
if vDNS_fqname:
try:
req_headers = {"Content-Type": "application/json"}
if self.token:
req_headers["X-Auth-Token"] = str(self.token)
contrail_ep = CONF.contrail_endpoint
if contrail_ep:
uri = contrail_ep + '/fqname-to-id'
LOG.debug("Contrail RESTAPI URI - %s" % uri)
else:
LOG.error("Contrail RESTAPI please provide a valid "
"contrail API endpoint")
return None
req_body = {
"type": "virtual-DNS",
"fq_name": vDNS_fqname
}
req_body = json.dumps(req_body)
h = httplib2.Http()
resp, content = h.request(uri, "POST", headers=req_headers,
body=req_body)
LOG.debug("Contrail RESTAPI Resp - %s" % str(resp))
LOG.debug("Contrail RESTAPI Content - %s" % str(content))
if content:
content = json.loads(content)
if 'status' in resp and resp['status'] in ['200']:
return content["uuid"]
else:
LOG.error("Contrail RESTAPI generating vDNS ID from "
"Contrail failed")
except Exception, e:
error_dict = {'error': str(e), 'tenantID': str(self.tenant_id)}
LOG.error("Contrail RESTAPI Error generating vDNS ID for tenant: "
"%(tenantID)s . Error: %(error)s" % error_dict)
else:
LOG.error("Contrail RESTAPI vDNS fqname is required for generating"
"vDNS ID.")
return None
def generate_tenant_dns_zone(self):
vDNS_id = self._generate_vDNS_id_by_fqname()
if vDNS_id:
try:
req_headers = {"Content-Type": "application/json"}
if self.token:
req_headers["X-Auth-Token"] = str(self.token)
contrail_ep = CONF.contrail_endpoint
if contrail_ep:
uri = contrail_ep + '/virtual-DNS/' + vDNS_id
LOG.debug("Contrail RESTAPI URI - %s" % uri)
else:
LOG.error("Contrail RESTAPI please provide a valid "
"contrail API endpoint")
return None
h = httplib2.Http()
resp, content = h.request(uri, "GET", headers=req_headers)
LOG.debug("Contrail RESTAPI Resp - %s" % str(resp))
LOG.debug("Contrail RESTAPI Content - %s" % str(content))
if content:
content = json.loads(content)
if 'status' in resp and resp['status'] in ['200']:
return str(content["virtual-DNS"]["virtual_DNS_data"]["domain_name"])
else:
LOG.error("Contrail RESTAPI generating vDNS ID from "
"Contrail failed")
except Exception, e:
error_dict = {'error': str(e), 'tenantID': str(self.tenant_id)}
LOG.error("Contrail RESTAPI Error generating dns zone for tenant: "
"%(tenantID)s . Error: %(error)s" % error_dict)
else:
LOG.error("Contrail RESTAPI vDNS ID is required for generating tenant "
"dns zone.")
return None
|
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# 读入数据/标签 生成x_train y_train
df = pd.read_csv("../datas/dot.csv")
x_data = np.array(df[['x1', 'x2']])
y_data = np.array(df['y_c'])
x_train = np.vstack(x_data).reshape(-1, 2)
y_train = np.vstack(y_data).reshape(-1, 1)
Y_c = [['red' if y else 'blue'] for y in y_train]
# 转换x的数据类型
x_train = tf.cast(x_train, tf.float32)
y_train = tf.cast(y_train, tf.float32)
# from_tensor_slices 函数切分传入张量的第一个维度,生成相应的数据集,使输入特征和标签值一一对应
train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32)
# 生成神经网络的参数,输入层为两个神经元,隐藏层为11个神经元,1隐藏层,输出层为1个神经元
# 用tf.Variable*()保证参数可训练
w1 = tf.Variable(tf.random.normal([2, 11]), dtype=tf.float32)
b1 = tf.Variable(tf.constant(0.01, shape=[11]))
w2 = tf.Variable(tf.random.normal([11, 1]), dtype=tf.float32)
b2 = tf.Variable(tf.constant(0.01, shape=[1]))
lr = 0.005
epoch = 1000
# 训练部分
for epoch in range(epoch):
for step, (x_train, y_train) in enumerate(train_db):
with tf.GradientTape() as tape:
h1 = tf.matmul(x_train, w1) + b1
h1 = tf.nn.relu(h1)
y = tf.matmul(h1, w2) + b2
# 采用均方误差损失函数mse = mean(sum(y-out)^2)
loss = tf.reduce_mean(tf.square(y_train - y))
# 计算loss对各个参数的梯度
variables = [w1, b1, w2, b2]
grads = tape.gradient(loss, variables)
# 实现梯度更新
# w1 = w1 - lr*w1_grad tape.gradient是自动求导结果与[w1, b1, w2, b2] 索引为0,1,2,3
w1.assign_sub(lr * grads[0])
b1.assign_sub(lr * grads[1])
w2.assign_sub(lr * grads[2])
b2.assign_sub(lr * grads[3])
# 每20个epoch,打印loss信息
if epoch % 200 == 0:
print('epoch:', epoch, 'loss', float(loss))
# 预测部分
print("********predict**************")
# xx在-3 到 3 之间以步长为 0.01,yy在 -3 到 3 之间以步长 0.01 生成间隔数值点
xx, yy = np.mgrid[-3:3:.1, -3:3:.1]
# 将xx, yy 拉直,合并并配对为二位张量,生成二维坐标点
grid = np.c_[xx.ravel(), yy.ravel()]
grid = tf.cast(grid, tf.float32)
# 将网络坐标点喂入神经网络,进行预测,probs为输出
probs = []
for x_test in grid:
# 使训练好的参数进行预测
h1 = tf.matmul([x_test], w1) + b1
h1 = tf.nn.relu(h1)
y = tf.matmul(h1, w2) + b2
probs.append(y)
# 取第0列给x1,取第二列给x2
x1 = x_data[:, 0]
x2 = x_data[:, 1]
# probs的shape调整成xx的样子
probs = np.array(probs).reshape(xx.shape)
plt.scatter(x1, x2, color=np.squeeze(Y_c)) # squeeze去掉纬度是1的纬度,相当于去掉[['red'],['blue']],内层括号变为['red','blue']
# 把坐标xx,yy和对应的值probs放入counter函数,给probs值为0.5的所有点上色
plt.contour(xx, yy, probs, levels=[.5])
plt.show()
|
import math
inputFile = open("/Users/samuelcordano/Documents/adventOfCode/Day7_HandyHaversacks/inputFile.txt","r")
Lines = inputFile.readlines()
class bag:
def __init__(self,name,childBags,parentBags) -> None:
self.name = name
self.childBags= childBags
self.parentBags = parentBags
self.visited = False
def __str__(self):
#print(f"name: {self.name} | childBags: {self.childBags}| parentBags: {self.parentBags}| visited: {self.visited}")
return(f"name: {self.name} | childBags: {self.childBags}| parentBags: {self.parentBags}| visited: {self.visited}")
listOfBags = {}
def createGraph():
"""
For each group, count the number of questions to which anyone answered "yes". What is the sum of those counts?
"""
counter =0 #testing purposes
for line in Lines:
counter +=1
currentInput = line.strip()
#Clean Inputs
#print(f"currentInput is: {currentInput}")
currentBag = currentInput.split(" bags")[0]
#currentBagName = currentBag.replace(" ", "_")
currentChildBags = currentInput.split(" contain ",1)[1]
currentChildBags = currentChildBags.replace(" bag.", " bags.")
currentChildBags = currentChildBags.split(" bags.")[0]
currentChildBags = currentChildBags.replace(" bag, ", " bags, ")
currentChildBags = currentChildBags.split(" bags, ")
#print(f"childBags is: {currentChildBags}")
if currentChildBags == ['no other']:
currentChildBags = []
else:
currentChildBagsNumbers = [int(element[0]) for element in currentChildBags]
currentChildBagsNames = [element[2:] for element in currentChildBags]
currentChildBags = list(zip(currentChildBagsNames,currentChildBagsNumbers))
#print(f"currentBag is: {currentBag}")
#print(f"childBags is: {currentChildBags}")
#print(f" ")
#Create object for current bag if it doesn't exist:
if currentBag in listOfBags:
currentBagObject = listOfBags.get(currentBag)
currentBagObject.childBags = currentChildBags
else:
listOfBags[currentBag] = bag(currentBag,currentChildBags,[])
#For each childbag, create an object if it isn't done and add current bag as a parentbag
for childBag in currentChildBags:
if childBag not in listOfBags:
listOfBags[childBag] = bag(childBag,[],[currentBag])
else:
currentChildBagObject = listOfBags.get(childBag)
currentChildBagObject.parentBags = currentChildBagObject.parentBags + [currentBag]
#if counter ==3:
# return True
numberOfBagsContained = []
def findAllBagsContained(originalBag):
currentOriginalBagObject = listOfBags.get(originalBag)
listOfChildBags = currentOriginalBagObject.childBags
if listOfChildBags == []:
#numberOfBagsContained.append(1)
print(f"The originalBag is: {originalBag}. His Child Bags are: {listOfChildBags}")
print(f" ")
return 0
else:
resultArray = []
for childBags in listOfChildBags:
resultArray.append((int(childBags[1]) + (int(childBags[1])*int(findAllBagsContained(childBags[0])))))
result = sum(resultArray)
print(f"The originalBag is: {originalBag}. His Child Bags are: {listOfChildBags}")
print(f"Number of bags in 1 {originalBag} is: {result}.")
print(f"resultArray: {resultArray}")
print(f" ")
return result
createGraph()
print("TESTING")
#for individualBag in listOfBags:
# currentBagObject = listOfBags.get(individualBag)
# print(currentBagObject)
finalResult = findAllBagsContained("shiny gold")
print(f"finalResult is: {finalResult}")
testResult = 1
for x in numberOfBagsContained:
x = int(x)
testResult = testResult * x
print(f"testResult is: {testResult}")
|
test_case = int(input())
for _ in range(test_case):
a, b, c = [list(input()) for i in range(3)]
# print(a, b, c)
for i in range(len(a)):
if a[i] == c[i]:
b[i], c[i] = c[i], b[i]
elif b[i] == c[i]:
a[i], c[i] = c[i], a[i]
else:
a[i], c[i] = c[i], a[i]
# print(''.join(a), ''.join(b), ''.join(c))
print('YES' if (a == b) else 'NO')
|
import os
import sys
import fam
def extract(datadir, subst_model, index):
for family in fam.get_families_list(datadir):
f = fam.get_raxml_multiple_trees(datadir, subst_model, family)
tree = open(f).readlines()[index]
output = fam.build_gene_tree_path(datadir, subst_model, family, "raxml-ng-" + str(index))
open(output, "w").write(tree)
if (__name__ == "__main__"):
if (len(sys.argv) != 4):
print("Syntax python " + os.path.basename(__file__) + " datadir subst_model index")
sys.exit(1)
datadir = sys.argv[1]
subst_model = sys.argv[2]
index = int(sys.argv[3])
extract(datadir, subst_model, index)
|
import gym
import math
import random
import numpy as np
import tensorflow as tf
import matplotlib.pylab as plt
MAX_EPSILON = 1
MIN_EPSILON = 0.01
LAMBDA = 0.0001
GAMMA = 0.99
BATCH_SIZE = 50
class Model:
def __init__(self, num_states, num_actions, batch_size):
'''
Model definition is the number of states of the environment (2 in this game),
the number of possible actions (3 in this game) and the batch size.
'''
self._num_states = num_states
self._num_actions = num_actions
self._batch_size = batch_size
# place holders
self._states = None
self._actions = None
# output operations
self._logits = None
self._optimizer = None
self._var_init = None
# setting up the model
self._define_model()
def _define_model(self):
# to hold the state data and the Q(s,a) training data respectively.
self._states = tf.placeholder(shape =[None, self._num_states], dtype = tf.float32)
self._q_s_a = tf.placeholder(shape =[None, self._num_actions], dtype = tf.float32)
# fully connected hidden layers
fc1 = tf.layers.dense(self._states, 50, activation=tf.nn.relu)
fc2 = tf.layers.dense(fc1, 50, activation=tf.nn.relu)
self._logits = tf.layers.dense(fc2, self._num_actions)
loss = tf.losses.mean_squared_error(self._q_s_a, self._logits)
self._optimizer = tf.train.AdamOptimizer().minimize(loss)
self._var_init = tf.global_variables_initializer()
def predict_one(self, state, sess):
return sess.run(self._logits, feed_dict = {self._states: state.reshape(1, self.num_states)})
def predict_batch(self, states, sess):
return sess.run(self._logits, feed_dict = {self._states: states})
def train_batch(self, sess, x_batch, y_batch):
sess.run(self._optimizer, feed_dict = {self._states: x_batch, self._q_s_a: y_batch})
@property
def num_states(self):
return self._num_states
@property
def num_actions(self):
return self._num_actions
@property
def batch_size(self):
return self._batch_size
@property
def var_init(self):
return self._var_init
"""
control the maximum number of (state, action, reward, next_state) tuples the _samples list can hold.
it ensures better random mixing of the samples, but you have to make sure you don’t run into memory errors.
> add_sample(): takes an individual (state, action, reward, next_state) tuple and appends it to the _samples list.
- a check is made – if the number of samples is now larger than the allowable memory size,
the first element in _samples is removed using the Python .pop() list functionality.
> sample(): returns a random selection of no_samples in length.
"""
class Memory:
def __init__(self, max_memory):
self._max_memory = max_memory
self._samples = []
def add_sample(self, sample):
self._samples.append(sample)
if len(self._samples) > self._max_memory:
self._samples.pop(0)
def sample(self, no_samples):
if no_samples > len(self._samples):
return random.sample(self._samples, len(self._samples))
else:
return random.sample(self._samples, no_samples)
class GameRunner:
def __init__(self, sess, model, env, memory, max_eps, min_eps, decay, render = True):
"""
max_eps and min_eps dictate the maximum and minimum epsilon values respectively – during training
the actual ϵ will decay from the maximum to the minimum based on the following argument decay.
render is a boolean which determines whether the game environment is rendered to the screen.
"""
self._sess = sess
self._env = env
self._model = model
self._memory = memory
self._render = render
self._max_eps = max_eps
self._min_eps = min_eps
self._decay = decay
self._eps = self._max_eps
self._steps = 0
self._reward_store = []
self._max_x_store = []
def run(self):
state = self._env.reset() # call open AI gym and reset it
tot_reward = 0
max_x = -100
while True:
if self._render:
self._env.render()
action = self._choose_action(state)
"""
returns a tuple containing the new state of the agent, the reward received by taking action,
a done boolean indicating whether the game has finished, and an information
object (we won’t using info in this example).
"""
next_state, reward, done, info = self._env.step(action)
if next_state[0] >= 0.1:
reward += 10
elif next_state[0] >= 0.25:
reward += 20
elif next_state[0] >= 0.5:
reward += 100
if next_state[0] >= max_x:
max_x = next_state[0]
# check if the game is complete and clear memory
if done:
next_state = None
self._memory.add_sample((state, action, reward, next_state))
self._replay()
# exponentially decay the eps value
self._steps += 1
self._eps = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) * math.exp(-LAMBDA * self._steps)
# move the agent to the next state and accumulate the reward
state = next_state
tot_reward += reward
# if the game is done, break the loop
if done:
self._reward_store.append(tot_reward)
self._max_x_store.append(max_x)
break
print("Step {}, Total reward: {}, Eps: {}".format(self._steps, tot_reward, self._eps))
def _choose_action(self, state):
"""
method executes our epsilon greedy + Q policy.
if a random number is less than the _eps value, then the returned action will simply be
an action chosen at random from the set of possible actions.
Otherwise, the action will be chosen based on an argmax of the output from the neural network.
the action with the highest Q(s,a) value is that action with the highest expected current + future discounted reward.
"""
if random.random() < self._eps:
return random.randint(0, self._model.num_actions - 1)
else:
return np.argmax(self._model.predict_one(state, self._sess))
def _replay(self):
batch = self._memory.sample(self._model.batch_size)
states = np.array([val[0] for val in batch])
next_states = np.array([(np.zeros(self._model.num_states) if val[3] is None else val[3]) for val in batch])
# predict Q(s,a) given the batch of states
q_s_a = self._model.predict_batch(states, self._sess)
# predict Q(s',a') - so that we can do gamma * max(Q(s'a')) below
q_s_a_d = self._model.predict_batch(next_states, self._sess)
#setup training arrays
x = np.zeros((len(batch), self._model.num_states))
y = np.zeros((len(batch), self._model.num_actions))
for i, b in enumerate(batch):
state, action, reward, next_state = b[0], b[1], b[2], b[3]
# get the current q values for all actions in state
current_q = q_s_a[i]
# update the q value for action
if next_state is None:
# in this case, the game completed after action, so there is no max Q(s',a')
# prediction possible
current_q[action] = reward
else:
current_q[action] = reward + GAMMA * np.amax(q_s_a_d[i])
x[i] = state
y[i] = current_q
self._model.train_batch(self._sess, x, y)
@property
def reward_store(self):
return self._reward_store
@property
def max_x_store(self):
return self._max_x_store
if __name__ == '__main__':
env_name = 'MountainCar-v0'
env = gym.make(env_name)
num_states = env.env.observation_space.shape[0]
num_actions = env.env.action_space.n
model = Model(num_states, num_actions, BATCH_SIZE)
mem = Memory(50000)
with tf.Session() as sess:
sess.run(model.var_init)
gr = GameRunner(sess, model, env, mem, MAX_EPSILON, MIN_EPSILON, LAMBDA)
num_episodes = 300
cnt = 0
while cnt < num_episodes:
if cnt % 10 == 0:
print('Episode {} of {}'.format(cnt+1, num_episodes))
gr.run()
cnt += 1
plt.plot(gr.reeward_store)
plt.show()
plt.close("all")
plt.plot(gr.max_x_store)
plt.show()
|
from flask import Flask,jsonify
import datetime as dt
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy.pool import StaticPool
engine = create_engine("sqlite:///Resources/hawaii.sqlite", connect_args={'check_same_thread': False}, poolclass=StaticPool, echo=True)
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
query_date = dt.date(2017, 8, 23) - dt.timedelta(days=365)
prcp = session.query(Measurement.prcp, Measurement.date).filter(Measurement.date>query_date).order_by\
(Measurement.date).all()
station = session.query(Measurement.station).all()
USC00519281_temps = session.query(Measurement.date, Measurement.tobs).filter(Measurement.station == 'USC00519281').\
filter(Measurement.date>query_date).order_by(Measurement.date).all()
app = Flask(__name__)
@app.route("/")
def home():
return (
f"Routes in this api:<br/><br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start/end"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
return jsonify(prcp)
@app.route("/api/v1.0/stations")
def stations():
return jsonify(station)
@app.route("/api/v1.0/tobs")
def tobs():
return jsonify(USC00519281_temps)
@app.route("/api/v1.0/<start>")
def start(start):
start_date = dt.datetime.strptime (start, "%Y-%m-%d") - dt.timedelta(days=365)
temp = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).all()
return jsonify(temp)
@app.route("/api/v1.0/<start>/<end>")
def startEnd(start, end):
start_date = dt.datetime.strptime (start, "%Y-%m-%d") - dt.timedelta(days=365)
end_date = dt.datetime.strptime (end, "%Y-%m-%d") - dt.timedelta(days=365)
temp = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
return jsonify(temp)
if __name__ == "__main__":
app.run(debug=True) |
#测试类方法
class student:
company ='saic'
@classmethod
def clsmethod(cl):
print(cl.company)
student.clsmethod() |
import smtplib
from smtplib import SMTPException, SMTPAuthenticationError,\
SMTPSenderRefused, SMTPRecipientsRefused
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email import Encoders
from .models import User
from timesheet import session
import os
import getpass
import click
def sendmail(to, subject, text, attach):
user = session.query(User).first()
if user is None:
email = click.prompt('Please enter your email address', type=str)
pwd = getpass.getpass('Password:')
user = User(email=email, password=pwd)
session.add(user)
session.commit()
msg = MIMEMultipart()
msg['From'] = user.email
msg['To'] = to
msg['Subject'] = subject
msg.attach(MIMEText(text))
part = MIMEBase('application', 'octet-stream')
if attach is not None:
try:
with open(attach, 'rb') as fh:
data = fh.read()
part.set_payload(data)
Encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attach))
msg.attach(part)
except IOError:
click.echo(click.style('Error opening attachment file %s' % attach,
fg='red'))
try:
mailServer = smtplib.SMTP("smtp.gmail.com", 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(user.email, user.password)
mailServer.sendmail(user.email, to, msg.as_string())
click.echo(click.style('email sent!', fg='yellow'))
except SMTPException:
click.echo(click.style('Failed sending the email.', fg='red'))
mailServer.close()
|
# coding: utf-8
"""
Задачи для само проверки
"""
# 1
"""
Сформировать возрастающий список из четных чисел от 2 до 10 включительно.
"""
int_list = []
int_list2 = []
n = 10
for i, val in enumerate(range(n+1)):
if i // 2:
int_list.append(val)
int_list = [x for i, x in enumerate(range(2, n+1)) if i % 2 == 0]
# test
assert int_list == [2, 4, 6, 8, 10]
int_list = [x for x in range(2, n+1, 2)]
# test
assert int_list == [2, 4, 6, 8, 10]
# 2
"""
Сформировать убывающий список из чисел от 10 до 3.
"""
int_list = [x for x in range(n, 2, -1)]
# test
assert int_list == [10, 9, 8, 7, 6, 5, 4, 3]
# 3
"""
Создать список только с четными элементами от 10 до 2.
Попробовать comprehension и цикл
"""
int_list = [x for x in range(n, 1, -2)]
# test
assert int_list == [10, 8, 6, 4, 2]
# 4
"""
Сформировать матрицу n x m, состоящую из нулей
Попробовать comprehension и цикл
"""
n = 10
m = 5
matrix = []
for x in range(n):
matrix.append([])
for y in range(m):
matrix[x].append(0)
def test_matrix(matrix):
assert len(matrix) == n
for x in matrix:
assert len(x) == m, 'Wrong matrix dimension'
matrix[0][1] = 1
assert matrix[0][1] == 1 and matrix[0][0] == 0, \
'All items are the same object'
test_matrix(matrix)
# comprehension
matrix_c = [[0 for y in range(m)] for x in range(n)]
test_matrix(matrix_c)
# 5
"""
Дан список, вывести максимальный элемент в списке.
"""
lst = [1, 2, -4, 222, 4, 150]
def get_max(l):
assert l, 'Empty list is not supported'
max_el = l[0]
for el in l:
if max_el < el:
max_el = el
return max_el
assert max(lst) == get_max(lst)
# 6
"""
Найти сумму 2/3 + 3/4 + 4/5 +...+ 9/10
"""
summ = reduce(lambda x, y: x + y/float(y+1), range(2, 10), 0.0)
assert sum([n/float(n+1) for n in range(2, 10)]) == summ
# 7
"""
Для произвольной строки получить строку с уникальными символами
на основе исходной
"""
string = 'AAbbBBca'
# order doesn't matter
assert set('AbBca') == set(string)
def parse_uniques(string):
res_str = []
for char in string:
if char not in res_str:
res_str.append(char)
return res_str
assert 'AbBca' == ''.join(parse_uniques(string))
assert 'AbBca' == reduce(lambda res, ch: res if ch in res else res+ch, string)
# 8
"""
Для произвольной строки найти кол-во вхождений каждого символа
Попробовать dict
"""
search_string = 'BAAAvvvBBBdaA'
valid_result = {
'A': 4,
'B': 4,
'a': 1,
'd': 1,
'v': 3,
}
import collections
char_count = collections.defaultdict(int)
for char in search_string:
char_count[char] += 1
assert valid_result == char_count
# 9
"""
Удалить в строке все цифры.
"""
various_string = 'ABC123C3d9zzz'
result1 = ''.join([char for char in various_string if char.isalpha()])
result2 = reduce(lambda x, y: x+y if y.isalpha() else x, various_string)
assert result1 == result2
# 10
"""
Описать класс, реализующий десятичный счетчик, который может увеличивать
или уменьшать свое значение на единицу в заданном диапазоне.
Предусмотреть инициализацию счетчика значениями по умолчанию и
произвольными значениями.
Счетчик имеет 3 метода: увеличения, уменьшения и
возвращающий текущее состояние счетчика.
Написать программу, демонстрирующую все возможности класса.
"""
class Integer(object):
def __init__(self, *args, **kwargs):
""" Create integer counter from min to max with given step
:param min: starting point for values, default is 0
:param max: maximum element for counter, default is endless count
:param step: step for counter, default is 1
"""
super(Integer, self).__init__()
self._min, self._max, self._step = self.parse_args(*args, **kwargs)
self.__cur = self._min
def parse_args(self, *args, **kwargs):
len_args = len(args)
len_kwargs = len(kwargs)
if len_args == 0:
_max = kwargs.pop('max', None)
_min = kwargs.pop('min', 0)
_step = kwargs.pop('step', 1)
elif len_args == 1:
_max = args[0]
if 'max' in kwargs:
raise ValueError('Wrong input arguments')
_min = kwargs.pop('min', 0)
_step = kwargs.pop('step', 1)
elif len_args == 2:
_min, _max = args
if 'min' in kwargs or 'max' in kwargs:
raise ValueError('Wrong input arguments')
_step = kwargs.pop('step', 1)
elif len_args == 3 and len_kwargs == 0:
_min, _max, _step = args
else:
raise ValueError('Wrong input arguments')
return _min, _max, _step
def __iter__(self):
return self.next()
def next(self):
while self._max is None or self.__cur < self._max:
yield self.__cur
self.__cur += self._step
raise StopIteration()
def previous(self):
self.__cur -= self._step
if self.__cur < self._min:
raise StopIteration()
return self.__cur
def current_value(self):
return self.__cur
# Arguments test
i = Integer()
assert i._max is None and i._step == 1 and i._min == 0
i = Integer(10)
assert i._max == 10 and i._step == 1 and i._min == 0
i = Integer(1, 10)
assert i._max == 10 and i._step == 1 and i._min == 1
i = Integer(1, 10, 2)
assert i._max == 10 and i._step == 2 and i._min == 1
i = Integer(min=1, max=10, step=2)
assert i._max == 10 and i._step == 2 and i._min == 1
i = Integer(10, min=1, step=2)
assert i._max == 10 and i._step == 2 and i._min == 1
i = Integer(1, 10, step=2)
assert i._max == 10 and i._step == 2 and i._min == 1
def test_value_error(*args, **kwargs):
try:
i = Integer(*args, **kwargs)
except ValueError:
pass
else:
raise AssertionError
test_value_error(10, max=1)
test_value_error(1, 10, min=1)
test_value_error(1, 10, 2, step=1)
test_value_error(1, 10, 2, step=1, min=1, max=1)
# Arguments test
to_ten = Integer(10)
all_values = [x for x in to_ten]
assert x == 9
assert all_values == range(10)
odd_num = Integer(1, 11, 2)
assert range(1, 11, 2) == [x for x in odd_num]
even_num = Integer(0, 11, 2)
assert range(0, 11, 2) == list(even_num)
endless_num = Integer()
max_ = 100
for x in endless_num:
if x >= max_:
break
assert x == max_
for i in range(max_):
x = endless_num.previous()
assert x == 0 and i == (max_-1)
|
t = int(input())
for c in range(t):
input()
print('Y') |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class ClusterEmailExtended(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ClusterEmailExtended - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'user_template': 'str',
'settings': 'ClusterEmailSettings',
'smtp_port': 'int',
'mail_subject': 'str',
'smtp_auth_username': 'str',
'mail_sender': 'str',
'batch_mode': 'str',
'mail_relay': 'str',
'smtp_auth_security': 'str',
'use_smtp_auth': 'bool',
'smtp_auth_passwd': 'str'
}
self.attribute_map = {
'user_template': 'user_template',
'settings': 'settings',
'smtp_port': 'smtp_port',
'mail_subject': 'mail_subject',
'smtp_auth_username': 'smtp_auth_username',
'mail_sender': 'mail_sender',
'batch_mode': 'batch_mode',
'mail_relay': 'mail_relay',
'smtp_auth_security': 'smtp_auth_security',
'use_smtp_auth': 'use_smtp_auth',
'smtp_auth_passwd': 'smtp_auth_passwd'
}
self._user_template = None
self._settings = None
self._smtp_port = None
self._mail_subject = None
self._smtp_auth_username = None
self._mail_sender = None
self._batch_mode = None
self._mail_relay = None
self._smtp_auth_security = None
self._use_smtp_auth = None
self._smtp_auth_passwd = None
@property
def user_template(self):
"""
Gets the user_template of this ClusterEmailExtended.
Location of a custom template file that can be used to specify the layout of the notification emails.
:return: The user_template of this ClusterEmailExtended.
:rtype: str
"""
return self._user_template
@user_template.setter
def user_template(self, user_template):
"""
Sets the user_template of this ClusterEmailExtended.
Location of a custom template file that can be used to specify the layout of the notification emails.
:param user_template: The user_template of this ClusterEmailExtended.
:type: str
"""
self._user_template = user_template
@property
def settings(self):
"""
Gets the settings of this ClusterEmailExtended.
Cluster email notification settings.
:return: The settings of this ClusterEmailExtended.
:rtype: ClusterEmailSettings
"""
return self._settings
@settings.setter
def settings(self, settings):
"""
Sets the settings of this ClusterEmailExtended.
Cluster email notification settings.
:param settings: The settings of this ClusterEmailExtended.
:type: ClusterEmailSettings
"""
self._settings = settings
@property
def smtp_port(self):
"""
Gets the smtp_port of this ClusterEmailExtended.
The port on the SMTP server to be used for relaying the notification messages.
:return: The smtp_port of this ClusterEmailExtended.
:rtype: int
"""
return self._smtp_port
@smtp_port.setter
def smtp_port(self, smtp_port):
"""
Sets the smtp_port of this ClusterEmailExtended.
The port on the SMTP server to be used for relaying the notification messages.
:param smtp_port: The smtp_port of this ClusterEmailExtended.
:type: int
"""
self._smtp_port = smtp_port
@property
def mail_subject(self):
"""
Gets the mail_subject of this ClusterEmailExtended.
The subject line for notification messages from this cluster.
:return: The mail_subject of this ClusterEmailExtended.
:rtype: str
"""
return self._mail_subject
@mail_subject.setter
def mail_subject(self, mail_subject):
"""
Sets the mail_subject of this ClusterEmailExtended.
The subject line for notification messages from this cluster.
:param mail_subject: The mail_subject of this ClusterEmailExtended.
:type: str
"""
self._mail_subject = mail_subject
@property
def smtp_auth_username(self):
"""
Gets the smtp_auth_username of this ClusterEmailExtended.
Username to authenticate with if SMTP authentication is being used.
:return: The smtp_auth_username of this ClusterEmailExtended.
:rtype: str
"""
return self._smtp_auth_username
@smtp_auth_username.setter
def smtp_auth_username(self, smtp_auth_username):
"""
Sets the smtp_auth_username of this ClusterEmailExtended.
Username to authenticate with if SMTP authentication is being used.
:param smtp_auth_username: The smtp_auth_username of this ClusterEmailExtended.
:type: str
"""
self._smtp_auth_username = smtp_auth_username
@property
def mail_sender(self):
"""
Gets the mail_sender of this ClusterEmailExtended.
The full email address that will appear as the sender of notification messages.
:return: The mail_sender of this ClusterEmailExtended.
:rtype: str
"""
return self._mail_sender
@mail_sender.setter
def mail_sender(self, mail_sender):
"""
Sets the mail_sender of this ClusterEmailExtended.
The full email address that will appear as the sender of notification messages.
:param mail_sender: The mail_sender of this ClusterEmailExtended.
:type: str
"""
self._mail_sender = mail_sender
@property
def batch_mode(self):
"""
Gets the batch_mode of this ClusterEmailExtended.
This setting determines how notifications will be batched together to be sent by email. 'none' means each notification will be sent separately. 'severity' means notifications of the same severity will be sent together. 'category' means notifications of the same category will be sent together. 'all' means all notifications will be batched together and sent in a single email.
:return: The batch_mode of this ClusterEmailExtended.
:rtype: str
"""
return self._batch_mode
@batch_mode.setter
def batch_mode(self, batch_mode):
"""
Sets the batch_mode of this ClusterEmailExtended.
This setting determines how notifications will be batched together to be sent by email. 'none' means each notification will be sent separately. 'severity' means notifications of the same severity will be sent together. 'category' means notifications of the same category will be sent together. 'all' means all notifications will be batched together and sent in a single email.
:param batch_mode: The batch_mode of this ClusterEmailExtended.
:type: str
"""
allowed_values = ["all", "severity", "category", "none"]
if batch_mode not in allowed_values:
raise ValueError(
"Invalid value for `batch_mode`, must be one of {0}"
.format(allowed_values)
)
self._batch_mode = batch_mode
@property
def mail_relay(self):
"""
Gets the mail_relay of this ClusterEmailExtended.
The address of the SMTP server to be used for relaying the notification messages. An SMTP server is required in order to send notifications. If this string is empty, no emails will be sent.
:return: The mail_relay of this ClusterEmailExtended.
:rtype: str
"""
return self._mail_relay
@mail_relay.setter
def mail_relay(self, mail_relay):
"""
Sets the mail_relay of this ClusterEmailExtended.
The address of the SMTP server to be used for relaying the notification messages. An SMTP server is required in order to send notifications. If this string is empty, no emails will be sent.
:param mail_relay: The mail_relay of this ClusterEmailExtended.
:type: str
"""
self._mail_relay = mail_relay
@property
def smtp_auth_security(self):
"""
Gets the smtp_auth_security of this ClusterEmailExtended.
The type of secure communication protocol to use if SMTP is being used. If 'none', plain text will be used, if 'starttls', the encrypted STARTTLS protocol will be used.
:return: The smtp_auth_security of this ClusterEmailExtended.
:rtype: str
"""
return self._smtp_auth_security
@smtp_auth_security.setter
def smtp_auth_security(self, smtp_auth_security):
"""
Sets the smtp_auth_security of this ClusterEmailExtended.
The type of secure communication protocol to use if SMTP is being used. If 'none', plain text will be used, if 'starttls', the encrypted STARTTLS protocol will be used.
:param smtp_auth_security: The smtp_auth_security of this ClusterEmailExtended.
:type: str
"""
allowed_values = ["none", "starttls"]
if smtp_auth_security not in allowed_values:
raise ValueError(
"Invalid value for `smtp_auth_security`, must be one of {0}"
.format(allowed_values)
)
self._smtp_auth_security = smtp_auth_security
@property
def use_smtp_auth(self):
"""
Gets the use_smtp_auth of this ClusterEmailExtended.
If true, this cluster will send SMTP authentication credentials to the SMTP relay server in order to send its notification emails. If false, the cluster will attempt to send its notification emails without authentication.
:return: The use_smtp_auth of this ClusterEmailExtended.
:rtype: bool
"""
return self._use_smtp_auth
@use_smtp_auth.setter
def use_smtp_auth(self, use_smtp_auth):
"""
Sets the use_smtp_auth of this ClusterEmailExtended.
If true, this cluster will send SMTP authentication credentials to the SMTP relay server in order to send its notification emails. If false, the cluster will attempt to send its notification emails without authentication.
:param use_smtp_auth: The use_smtp_auth of this ClusterEmailExtended.
:type: bool
"""
self._use_smtp_auth = use_smtp_auth
@property
def smtp_auth_passwd(self):
"""
Gets the smtp_auth_passwd of this ClusterEmailExtended.
Password to authenticate with if SMTP authentication is being used.
:return: The smtp_auth_passwd of this ClusterEmailExtended.
:rtype: str
"""
return self._smtp_auth_passwd
@smtp_auth_passwd.setter
def smtp_auth_passwd(self, smtp_auth_passwd):
"""
Sets the smtp_auth_passwd of this ClusterEmailExtended.
Password to authenticate with if SMTP authentication is being used.
:param smtp_auth_passwd: The smtp_auth_passwd of this ClusterEmailExtended.
:type: str
"""
self._smtp_auth_passwd = smtp_auth_passwd
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import pytest
@pytest.fixture
def client():
from app import app
app.config['TESTING'] = True
return app.test_client() |
# Imports
from django.shortcuts import render
from django.views import generic
# Importing Models
from .models import Post, BlogAuthor, Configuration, PostComment
# Imports for Authentication Views
from django.shortcuts import render, redirect
from .forms import NewUserForm, UserLoginForm
from django.contrib.auth import login, authenticate, logout
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm, PasswordResetForm, \
PasswordChangeForm # add this
#
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.edit import CreateView
from django.urls import reverse, reverse_lazy
# Create your views here.
# Home View
def index(request):
posts = Post.objects.all()
index = Configuration.objects.all()[0]
"""
View function for home page of site.
"""
num_visits = request.session.get('num_visits', 0)
request.session['num_visits'] = num_visits + 1
context = {'posts': posts, 'num_visits': num_visits, 'index': index}
# Render the HTML template index.html
return render(request, 'index.html', context=context)
# About Page View
def about(request):
about = Configuration.objects.all()[0]
"""
View function for about page of site.
"""
context = {'about': about}
return render(request, 'about.html', context=context)
# Contact Page View
def contact(request):
contact = Configuration.objects.all()[0]
"""
View function for about page of site.
"""
context = {'contact': contact}
return render(request, 'contact.html', context=context)
# Authentication Views
def register_request(request):
bg = Configuration.objects.all()[0]
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
messages.success(request, "Registration successful.")
return redirect("/")
else:
form = NewUserForm()
return render(request=request,
template_name="registration/register.html",
context={
"register_form": form,
"bg": bg
})
def login_request(request):
bg = Configuration.objects.all()[0]
if request.method == "POST":
form = UserLoginForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}.")
return redirect("/")
else:
messages.error(request, "Invalid username or password.")
else:
form = UserLoginForm()
return render(request=request,
template_name="registration/login.html",
context={
"login_form": form,
"bg": bg
})
def logout_request(request):
logout(request)
messages.success(request, "You have successfully logged out.")
return redirect("/")
from django.contrib.auth.views import PasswordChangeView
class PasswordsChangeView(PasswordChangeView):
form_class = PasswordChangeForm
success_url = reverse_lazy('index')
template_name = "registration/change_password.html"
# --------
# Post Detail View
class PostDetailView(generic.DetailView):
"""
Generic class-based detail view for a blog.
"""
model = Post
# Post by Author View
from django.shortcuts import get_object_or_404
class PostListbyAuthorView(generic.ListView):
"""
Generic class-based view for a list of blogs posted by a particular BlogAuthor.
"""
model = Post
paginate_by = 5
template_name = 'blog/post/post_list_by_author.html'
def get_queryset(self):
"""
Return list of Blog objects created by BlogAuthor (author id specified in URL)
"""
id = self.kwargs['pk']
target_author = get_object_or_404(BlogAuthor, pk=id)
return Post.objects.filter(author=target_author)
def get_context_data(self, **kwargs):
"""
Add BlogAuthor to context so they can be displayed in the template
"""
# Call the base implementation first to get a context
context = super(PostListbyAuthorView, self).get_context_data(**kwargs)
# Get the blogger object from the "pk" URL parameter and add it to the context
context['blogger'] = get_object_or_404(BlogAuthor,
pk=self.kwargs['pk'])
return context
# Post Comment Create View
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.edit import CreateView
class PostCommentCreate(LoginRequiredMixin, CreateView):
"""
Form for adding a blog comment. Requires login.
"""
model = PostComment
fields = [
'comment',
]
def get_context_data(self, **kwargs):
"""
Add associated blog to form template so can display its title in HTML.
"""
# Call the base implementation first to get a context
context = super(PostCommentCreate, self).get_context_data(**kwargs)
# Get the blog from id and add it to the context
context['post'] = get_object_or_404(Post, pk=self.kwargs['pk'])
return context
def form_valid(self, form):
"""
Add author and associated blog to form data before setting it as valid (so it is saved to model)
"""
# Add logged-in user as author of comment
form.instance.author = self.request.user
# Associate comment with blog based on passed id
form.instance.post = get_object_or_404(Post, pk=self.kwargs['pk'])
# Call super-class form validation behaviour
return super(PostCommentCreate, self).form_valid(form)
def get_success_url(self):
"""
After posting comment return to associated blog.
"""
return reverse('post-detail', kwargs={
'pk': self.kwargs['pk'],
})
|
from rest_framework import serializers
from .models import FindClosingBracket
class FindClosingBracketSerializer(serializers.ModelSerializer):
class Meta:
model = FindClosingBracket
fields = '__all__'
|
# adapted from mtbatchgen by Zahoor
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from modisco.visualization import viz_sequence
from collections import OrderedDict
import modisco.visualization
import deepdish
import h5py
import numpy as np
import modisco
import argparse
import os
def fetch_modisco_args():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--scores_prefix", type=str, required=True, help="Prefix to counts/profile h5 files. Will use prefix.{profile,counts}_scores.h5")
parser.add_argument("-p","--profile_or_counts", type=str, required=True, help="Scoring method to use, profile or counts scores")
parser.add_argument("-o", "--output_dir", type=str, required=True, help="Output directory")
parser.add_argument("-c", "--crop", type=int, default=1000, help="Crop scores to this width from the center for each example")
parser.add_argument("-m", "--max_seqlets", type=int, default=50000, help="Max number of seqlets per metacluster for modisco")
args = parser.parse_args()
return args
def save_plot(weights, dst_fname):
"""
"""
print(dst_fname)
colors = {0:'green', 1:'blue', 2:'orange', 3:'red'}
plot_funcs = {0: viz_sequence.plot_a, 1: viz_sequence.plot_c,
2: viz_sequence.plot_g, 3: viz_sequence.plot_t}
fig = plt.figure(figsize=(20, 2))
ax = fig.add_subplot(111)
viz_sequence.plot_weights_given_ax(ax=ax, array=weights,
height_padding_factor=0.2,
length_padding=1.0,
subticks_frequency=1.0,
colors=colors, plot_funcs=plot_funcs,
highlight={}, ylabel="")
plt.savefig(dst_fname)
def main():
args = fetch_modisco_args()
# check if the output directory exists
if not os.path.exists(args.output_dir):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), args.output_dir)
scoring_type = args.profile_or_counts
if scoring_type=='profile':
scores_path = args.scores_prefix + '.profile_scores.h5'
print(" Scores path is {}".format(scores_path))
elif scoring_type=='counts':
scores_path = args.scores_prefix+ '.counts_scores.h5'
print(" Scores path is {}".format(scores_path))
else:
print("Enter a valid scoring type: counts or profile")
assert(os.path.exists(scores_path))
if scoring_type=='profile':
save_path = os.path.join(args.output_dir,'modisco_results_allChroms_profile.hdf5')
seqlet_path = os.path.join(args.output_dir,'seqlets_profile.txt')
elif scoring_type=='counts':
save_path = os.path.join(args.output_dir,'modisco_results_allChroms_counts.hdf5')
seqlet_path = os.path.join(args.output_dir,'seqlets_counts.txt')
# create a directory for storing pngs later
outdirname = os.path.join(args.output_dir, "{}".format("untrimmed_logos_"+scoring_type))
if not os.path.exists(outdirname):
os.mkdir(outdirname)
##Load the scores
scores = deepdish.io.load(scores_path)
shap_scores_seq = []
proj_shap_scores_seq = []
one_hot_seqs = []
center = scores['shap']['seq'].shape[-1]//2
start = center - args.crop//2
end = center + args.crop//2
for i in scores['shap']['seq']:
shap_scores_seq.append(i[:,start:end].transpose())
for i in scores['projected_shap']['seq']:
proj_shap_scores_seq.append(i[:,start:end].transpose())
for i in scores['raw']['seq']:
one_hot_seqs.append(i[:,start:end].transpose())
tasks = ['task0']
task_to_scores = OrderedDict()
task_to_hyp_scores = OrderedDict()
onehot_data = one_hot_seqs
task_to_scores['task0'] = proj_shap_scores_seq
task_to_hyp_scores['task0'] = shap_scores_seq
tfmodisco_results = modisco.tfmodisco_workflow.workflow.TfModiscoWorkflow(
min_metacluster_size_frac=0.0001,
max_seqlets_per_metacluster=args.max_seqlets,
sliding_window_size=20,
flank_size=5,
target_seqlet_fdr=0.05,
seqlets_to_patterns_factory=modisco.tfmodisco_workflow.seqlets_to_patterns.TfModiscoSeqletsToPatternsFactory(
n_cores=10,
trim_to_window_size=20,
initial_flank_to_add=5,
final_min_cluster_size=20))(task_names=["task0"],
contrib_scores=task_to_scores,
hypothetical_contribs=task_to_hyp_scores,
one_hot=onehot_data)
if os.path.exists(save_path):
raise OSError('File {} already exists'.format(save_path))
grp = h5py.File(save_path, "w")
tfmodisco_results.save_hdf5(grp)
print("Saved modisco results to file {}".format(str(save_path)))
print("Saving seqlets to %s" % seqlet_path)
seqlets = \
tfmodisco_results.metacluster_idx_to_submetacluster_results[0].seqlets
bases = np.array(["A", "C", "G", "T"])
with open(seqlet_path, "w") as f:
for seqlet in seqlets:
sequence = "".join(
bases[np.argmax(seqlet["sequence"].fwd, axis=-1)]
)
example_index = seqlet.coor.example_idx
start, end = seqlet.coor.start, seqlet.coor.end
f.write(">example%d:%d-%d\n" % (example_index, start, end))
f.write(sequence + "\n")
print("Saving pattern visualizations")
patterns = (tfmodisco_results
.metacluster_idx_to_submetacluster_results[0]
.seqlets_to_patterns_result.patterns)
for idx,pattern in enumerate(patterns):
print(pattern)
print("pattern idx",idx)
print(len(pattern.seqlets))
save_plot(pattern["task0_contrib_scores"].fwd,
os.path.join(args.output_dir, "untrimmed_logos_"+scoring_type, 'contrib_{}.png'.format(idx)))
save_plot(pattern["sequence"].fwd,
os.path.join(args.output_dir, "untrimmed_logos_"+scoring_type, 'sequence_{}.png'.format(idx)))
if __name__=="__main__":
main()
|
import curses
win = curses.initscr()
win.border(0)
win.addstr(0, 1, "Here be the title")
win.refresh()
win.getch()
curses.endwin()
|
class Node:
def __init__(self, key="", count=0):
self.prev = None
self.next = None
self.keys = {key}
self.count = count
def insert(self, node: 'Node') -> 'Node': # 在 self 后插入 node
node.prev = self
node.next = self.next
node.prev.next = node
node.next.prev = node
return node
def remove(self): # 从链表中移除 self
self.prev.next = self.next
self.next.prev = self.prev
class AllOne:
def __init__(self):
self.root = Node()
self.root.prev = self.root
self.root.next = self.root # 初始化链表哨兵,下面判断节点的 next 若为 self.root,则表示 next 为空(prev 同理)
self.nodes = {}
def inc(self, key: str) -> None:
if key not in self.nodes: # key 不在链表中
if self.root.next is self.root or self.root.next.count > 1:
self.nodes[key] = self.root.insert(Node(key, 1))
else:
self.root.next.keys.add(key)
self.nodes[key] = self.root.next
else:
cur = self.nodes[key]
nxt = cur.next
if nxt is self.root or nxt.count > cur.count + 1:
self.nodes[key] = cur.insert(Node(key, cur.count + 1))
else:
nxt.keys.add(key)
self.nodes[key] = nxt
cur.keys.remove(key)
if len(cur.keys) == 0:
cur.remove()
def dec(self, key: str) -> None:
cur = self.nodes[key]
if cur.count == 1: # key 仅出现一次,将其移出 nodes
del self.nodes[key]
else:
pre = cur.prev
if pre is self.root or pre.count < cur.count - 1:
self.nodes[key] = cur.prev.insert(Node(key, cur.count - 1))
else:
pre.keys.add(key)
self.nodes[key] = pre
cur.keys.remove(key)
if len(cur.keys) == 0:
cur.remove()
def getMaxKey(self) -> str:
return next(iter(self.root.prev.keys)) if self.root.prev is not self.root else ""
def getMinKey(self) -> str:
return next(iter(self.root.next.keys)) if self.root.next is not self.root else ""
# 作者:LeetCode-Solution
# 链接:https://leetcode-cn.com/problems/all-oone-data-structure/solution/quan-o1-de-shu-ju-jie-gou-by-leetcode-so-7gdv/
# 来源:力扣(LeetCode)
# 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。 |
def frange(x, y, jump=1.0):
'''
Range for floats.
Parameters:
x: range starting value, will be included.
y: range ending value, will be excluded
jump: the step value. Only positive steps are supported.
Return:
a generator that yields floats
Usage:
>>> list(frange(0, 1, 0.2))
[0.0, 0.2, 0.4, 0.6000000000000001, 0.8]
>>> list(frange(1, 0, 0.2))
[1.0]
>>> list(frange(0.0, 0.05, 0.1))
[0.0]
>>> list(frange(0.0, 0.15, 0.1))
[0.0, 0.1]
'''
i = 0.0
x = float(x) # Prevent yielding integers.
y = float(y) # Comparison converts y to float every time otherwise.
x0 = x
epsilon = jump / 2.0
yield x # yield always first value
while x + epsilon < y:
i += 1.0
x = x0 + i * jump
yield x |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pdb
import seaborn as sns
if __name__ == '__main__':
sns.set(style="ticks")
hatches = ['----', '/', 'xxx', '///', '---']
colors = ['#FFA500', '#FF0000', '#0000FF', '#05FF05']
df = pd.read_pickle('df.obj')
df = df[df['strategy'] == 'title']
fg = sns.factorplot(x='data_set', y='val', hue='rec_type', size=10,
data=df[df['is_random'] == True],
kind='bar',
palette=['#FFA500', '#FF0000', '#0000FF',
'#05FF05'],
hue_order=['rbar', 'rb', 'rbiw', 'rbmf'],
)
for bidx, bar in enumerate(fg.ax.patches):
# bar.set_fill(False)
# bar.set_hatch(hatches[bidx % 4])
bar.set_edgecolor(colors[int(bidx % 4)])
bar.set_alpha(0.75)
plt.title('random')
fg = sns.factorplot(x='data_set', y='val', hue='rec_type', size=10,
data=df[(df['is_random'] == True) & (df['N'] == 5)],
kind='bar',
palette=['#FFA500', '#FF0000', '#0000FF',
'#05FF05'],
hue_order=['rbar', 'rb', 'rbiw', 'rbmf'],
)
for bidx, bar in enumerate(fg.ax.patches):
# bar.set_fill(False)
# bar.set_hatch(hatches[bidx % 4])
bar.set_edgecolor(colors[int(bidx % 4)])
bar.set_alpha(0.75)
plt.title('rating-based')
# pdb.set_trace()
plt.show()
|
#!/usr/bin/env python
"""Test suite for aospy.io module."""
import sys
import unittest
import aospy.utils.io as io
class AospyIOTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
class TestIO(AospyIOTestCase):
def test_dmget(self):
io.dmget(['/home/Spencer.Clark/archive/imr_skc/control/'
'gfdl.ncrc3-default-repro/1/history/'
'00010101.atmos_month.nc'])
io.dmget('/home/Spencer.Clark/archive/imr_skc/control/'
'gfdl.ncrc3-default-repro/1/history/'
'00010101.atmos_month.nc')
if __name__ == '__main__':
sys.exit(unittest.main())
|
from datetime import datetime
import orm
from .database import database, metadata
class BaseModel(orm.Model):
__abstract__ = True
id = orm.Integer(primary_key=True)
created_at = orm.DateTime(allow_null=True, default=datetime.now())
updated_at = orm.DateTime(allow_null=True)
deleted_at = orm.DateTime(allow_null=True)
|
n = int(input())
h = int(input())
s = float(input())
print('NUMBER = {}\nSALARY = U$ {:.2f}'.format(n, (h * s))) |
# This file is part of beets.
# Copyright 2016, Stig Inge Lea Bjornsen.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the `importadded` plugin."""
import os
import unittest
from test.test_importer import ImportHelper, AutotagStub
from beets import importer
from beets.util import displayable_path, syspath
from beetsplug.importadded import ImportAddedPlugin
_listeners = ImportAddedPlugin.listeners
def preserve_plugin_listeners():
"""Preserve the initial plugin listeners as they would otherwise be
deleted after the first setup / tear down cycle.
"""
if not ImportAddedPlugin.listeners:
ImportAddedPlugin.listeners = _listeners
def modify_mtimes(paths, offset=-60000):
for i, path in enumerate(paths, start=1):
mstat = os.stat(path)
os.utime(syspath(path), (mstat.st_atime, mstat.st_mtime + offset * i))
class ImportAddedTest(unittest.TestCase, ImportHelper):
# The minimum mtime of the files to be imported
min_mtime = None
def setUp(self):
preserve_plugin_listeners()
self.setup_beets()
self.load_plugins('importadded')
self._create_import_dir(2)
# Different mtimes on the files to be imported in order to test the
# plugin
modify_mtimes(mfile.path for mfile in self.media_files)
self.min_mtime = min(os.path.getmtime(mfile.path)
for mfile in self.media_files)
self.matcher = AutotagStub().install()
self.matcher.macthin = AutotagStub.GOOD
self._setup_import_session()
self.importer.add_choice(importer.action.APPLY)
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
self.matcher.restore()
def find_media_file(self, item):
"""Find the pre-import MediaFile for an Item"""
for m in self.media_files:
if m.title.replace('Tag', 'Applied') == item.title:
return m
raise AssertionError("No MediaFile found for Item " +
displayable_path(item.path))
def assertEqualTimes(self, first, second, msg=None): # noqa
"""For comparing file modification times at a sufficient precision"""
self.assertAlmostEqual(first, second, places=4, msg=msg)
def assertAlbumImport(self): # noqa
self.importer.run()
album = self.lib.albums().get()
self.assertEqual(album.added, self.min_mtime)
for item in album.items():
self.assertEqual(item.added, self.min_mtime)
def test_import_album_with_added_dates(self):
self.assertAlbumImport()
def test_import_album_inplace_with_added_dates(self):
self.config['import']['copy'] = False
self.config['import']['move'] = False
self.config['import']['link'] = False
self.config['import']['hardlink'] = False
self.assertAlbumImport()
def test_import_album_with_preserved_mtimes(self):
self.config['importadded']['preserve_mtimes'] = True
self.importer.run()
album = self.lib.albums().get()
self.assertEqual(album.added, self.min_mtime)
for item in album.items():
self.assertEqualTimes(item.added, self.min_mtime)
mediafile_mtime = os.path.getmtime(self.find_media_file(item).path)
self.assertEqualTimes(item.mtime, mediafile_mtime)
self.assertEqualTimes(os.path.getmtime(item.path),
mediafile_mtime)
def test_reimported_album_skipped(self):
# Import and record the original added dates
self.importer.run()
album = self.lib.albums().get()
album_added_before = album.added
items_added_before = {item.path: item.added
for item in album.items()}
# Newer Item path mtimes as if Beets had modified them
modify_mtimes(items_added_before.keys(), offset=10000)
# Reimport
self._setup_import_session(import_dir=album.path)
self.importer.run()
# Verify the reimported items
album = self.lib.albums().get()
self.assertEqualTimes(album.added, album_added_before)
items_added_after = {item.path: item.added
for item in album.items()}
for item_path, added_after in items_added_after.items():
self.assertEqualTimes(items_added_before[item_path], added_after,
"reimport modified Item.added for " +
displayable_path(item_path))
def test_import_singletons_with_added_dates(self):
self.config['import']['singletons'] = True
self.importer.run()
for item in self.lib.items():
mfile = self.find_media_file(item)
self.assertEqualTimes(item.added, os.path.getmtime(mfile.path))
def test_import_singletons_with_preserved_mtimes(self):
self.config['import']['singletons'] = True
self.config['importadded']['preserve_mtimes'] = True
self.importer.run()
for item in self.lib.items():
mediafile_mtime = os.path.getmtime(self.find_media_file(item).path)
self.assertEqualTimes(item.added, mediafile_mtime)
self.assertEqualTimes(item.mtime, mediafile_mtime)
self.assertEqualTimes(os.path.getmtime(item.path),
mediafile_mtime)
def test_reimported_singletons_skipped(self):
self.config['import']['singletons'] = True
# Import and record the original added dates
self.importer.run()
items_added_before = {item.path: item.added
for item in self.lib.items()}
# Newer Item path mtimes as if Beets had modified them
modify_mtimes(items_added_before.keys(), offset=10000)
# Reimport
import_dir = os.path.dirname(list(items_added_before.keys())[0])
self._setup_import_session(import_dir=import_dir, singletons=True)
self.importer.run()
# Verify the reimported items
items_added_after = {item.path: item.added
for item in self.lib.items()}
for item_path, added_after in items_added_after.items():
self.assertEqualTimes(items_added_before[item_path], added_after,
"reimport modified Item.added for " +
displayable_path(item_path))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
import maya.cmds as cmds
import random
class randoValuez():
def __init__(self):
self.MaxAndMinWin = "Randomizer"
self.RandomValues()
def RandomValues(self):
self.delete()
self.MaxAndMinWin = cmds.window('MaxAndMinWin')
self.colLayout = cmds.columnLayout()
cmds.floatFieldGrp('MaximumField', label='Max', parent=self.colLayout)
cmds.floatFieldGrp('MinimumField', label='Min', parent=self.colLayout)
cmds.button(label="Randomize Translate All Axis'", parent=self.colLayout, command=lambda x: self.randomizer())
cmds.button(label="Randomize Translate X", parent=self.colLayout, command=lambda x: self.randomizerX())
cmds.button(label="Randomize Translate Y", parent=self.colLayout, command=lambda x: self.randomizerY())
cmds.button(label="Randomize Translate Z", parent=self.colLayout, command=lambda x: self.randomizerZ())
cmds.button(label="Randomize Rotation", parent=self.colLayout, command=lambda x: self.randomizerRotate())
cmds.button(label="Reset Rotation", parent=self.colLayout, command=lambda x: self.resetRotate())
cmds.intFieldGrp('ScaleMaxNumber', label='Max Scale', parent=self.colLayout)
cmds.intFieldGrp('ScaleMinNumber', label='Min Scale', parent=self.colLayout)
cmds.button(label="Randomize Scale", parent=self.colLayout, command=lambda x: self.randomizerScale())
cmds.button(label="Reset Scale", parent=self.colLayout, command=lambda x: self.resetScale())
cmds.showWindow(self.MaxAndMinWin)
def delete(self):
if (cmds.window('MaxAndMinWin', exists=True)): cmds.deleteUI('MaxAndMinWin')
def randomizer(self):
# make the Selection an array
self.sel = cmds.ls(sl=True)
print self.sel
# the fields from the window entered as these.
self.MaxNumEnter = cmds.floatFieldGrp('MaximumField', q=True, v=True)[0]
self.MinNumEnter = cmds.floatFieldGrp('MinimumField', q=True, v=True)[0]
#self.numRandom = random.randrange(self.MinNumEnter, self.MaxNumEnter)
#print self.numRandom
for Object in self.sel:
self.numRandom = random.randrange(self.MinNumEnter, self.MaxNumEnter)
cmds.setAttr (Object + '.translateX', self.numRandom)
self.numRandom = random.randrange(self.MinNumEnter, self.MaxNumEnter)
cmds.setAttr(Object + '.translateY', self.numRandom)
self.numRandom = random.randrange(self.MinNumEnter, self.MaxNumEnter)
cmds.setAttr(Object + '.translateZ', self.numRandom)
def randomizerX(self):
self.sel = cmds.ls(sl=True)
self.MaxNumEnter = cmds.floatFieldGrp('MaximumField', q=True, v=True)[0]
self.MinNumEnter = cmds.floatFieldGrp('MinimumField', q=True, v=True)[0]
for Object in self.sel:
self.numRandom = random.randrange(self.MinNumEnter, self.MaxNumEnter)
cmds.setAttr(Object + '.translateX', self.numRandom)
def randomizerY(self):
self.sel = cmds.ls(sl=True)
self.MaxNumEnter = cmds.floatFieldGrp('MaximumField', q=True, v=True)[0]
self.MinNumEnter = cmds.floatFieldGrp('MinimumField', q=True, v=True)[0]
for Object in self.sel:
self.numRandom = random.randrange(self.MinNumEnter, self.MaxNumEnter)
cmds.setAttr(Object + '.translateY', self.numRandom)
def randomizerZ(self):
self.sel = cmds.ls(sl=True)
self.MaxNumEnter = cmds.floatFieldGrp('MaximumField', q=True, v=True)[0]
self.MinNumEnter = cmds.floatFieldGrp('MinimumField', q=True, v=True)[0]
for Object in self.sel:
self.numRandom = random.randrange(self.MinNumEnter, self.MaxNumEnter)
cmds.setAttr(Object + '.translateZ', self.numRandom)
def randomizerScale(self):
self.sel = cmds.ls(sl=True)
self.MaxNumEnter = cmds.intFieldGrp('ScaleMaxNumber', q=True, v=True)[0]
self.MinNumEnter = cmds.intFieldGrp('ScaleMinNumber', q=True, v=True)[0]
for Object in self.sel:
self.numRandom = random.randrange(self.MinNumEnter, self.MaxNumEnter)
cmds.setAttr (Object + '.scaleX', self.numRandom)
cmds.setAttr (Object + '.scaleY', self.numRandom)
cmds.setAttr (Object + '.scaleZ', self.numRandom)
def resetScale(self):
self.sel = cmds.ls(sl=True)
for Object in self.sel:
self.numRandom = random.randrange(self.MinNumEnter, self.MaxNumEnter)
cmds.setAttr (Object + '.scaleX', 1)
cmds.setAttr (Object + '.scaleY', 1)
cmds.setAttr (Object + '.scaleZ', 1)
def randomizerRotate(self):
self.sel = cmds.ls(sl=True)
self.MaxNumEnter = 359
self.MinNumEnter = 0
for Object in self.sel:
self.numRandom = random.randrange(self.MinNumEnter, self.MaxNumEnter)
cmds.setAttr(Object + '.rotateX', self.numRandom)
self.numRandom = random.randrange(self.MinNumEnter, self.MaxNumEnter)
cmds.setAttr(Object + '.rotateY', self.numRandom)
self.numRandom = random.randrange(self.MinNumEnter, self.MaxNumEnter)
cmds.setAttr(Object + '.rotateZ', self.numRandom)
def resetRotate(self):
self.sel = cmds.ls(sl=True)
for Object in self.sel:
cmds.setAttr(Object + '.rotateX', 0)
cmds.setAttr(Object + '.rotateY', 0)
cmds.setAttr(Object + '.rotateZ', 0)
randoValuez() |
#!/usr/bin/python
#from pylab import plot,show,norm
#from pylab import plot,show,norm
#import numpy
import sys
from csv import reader, writer
from decimal import *
#from Carbon.Aliases import true
def load_csv(filename):
samples = list()
with open(filename, 'r') as fd:
csv_reader = reader(fd)
for row in csv_reader:
#row.insert(0, '1') # bias
samples.append(row)
return samples
# pos 0 has feature 1
# pos 1 has feature 2
# pos 2 has true label
def convert_to_float(samples, column):
for row in samples:
row[column] = float(row[column].strip())
return
def mean(rows):
total = 0
for x in rows:
total += x
elements = len(rows)
return total / elements
def funct(rows):
m = mean(rows)
diff = sum((i-m)**2 for i in rows)
return diff
def stdev(rows):
"""Calculates the population standard deviation"""
diff = funct(rows)
dev = diff/len(rows)
return dev**0.5
def scale(samples):
trainset = []
for row in samples:
features = []
features.append(row[0])
features.append(row[1])
z1 = (row[0] - mean(features)) / stdev(features)
z2 = (row[1] - mean(features)) / stdev(features)
par = []
par.append(1.0) # bias
par.append(z1) # feature 1 scaled
par.append(z2) # feature 2 scaled
par.append(row[2]) # label
trainset.append(par)
return trainset
def f(betas, features):
"""Receives the betas (betas) and one row of features, plus label (features).
"""
result = 0
i = 0
for x in features[:-1]: # ignore the label
result += betas[i] * x
i += 1
return result
def calculate_sigma(alpha, betas, features):
"""Receives the betas (betas) and one row of features, plus label (features).
"""
f_x = f(betas, features)
tlabel = features[len(features)-1]
summa = 0
for x in features[:-1]:
summa = (f_x - tlabel) * x # minus label
return summa
def gradient_descent(alpha, betas, features):
"""Receives the learning rate (alpha), the betas (betas) and one row of features, plus label (features)
"""
total_sigma = calculate_sigma(alpha, betas, features)
n = (len(features) - 1)
result = total_sigma * 1 / n
result = result * alpha
return result
def risk(betas, features):
"""Receives the betas (betas) and one row of features, plus label (features)
"""
f_x = f(betas, features)
tlabel = features[len(features)-1]
summa = 0
for x in features[:-1]:
partial = Decimal((f_x - tlabel), 4)
#print partial
summa = partial**2 # minus label
n = (len(features) - 1)
result = float(summa * 1 / (2*n))
return result
def main(script, *args):
if len(sys.argv) != 3:
print "Error in arguments!"
sys.exit()
trainset = load_csv(sys.argv[1])
columns = len(trainset[0])
for i in range(columns):
convert_to_float(trainset, i)
scaled_trainset = scale(trainset) # bias, features plus label
#learning_rates = [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
learning_rates = [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 0.4]
min_risk = Decimal(9999.0)
for alpha in learning_rates:
iterations = 0
risk_betas = 0
ant_risk = Decimal(9999.0)
betas = [0 for _ in range(len(scaled_trainset[0])-1)]
convergence = False
while iterations < 100 and not convergence:
iterations += 1
if risk_betas == ant_risk:
convergence = True
print "convergence with alpha=", alpha, "iterations=", iterations - 1, "risk=", risk_betas
else:
ant_risk = risk_betas
for row in scaled_trainset:
risk_betas = risk(betas, row)
gradient = gradient_descent(alpha, betas, row)
betas[0] = betas[0] - gradient
betas[1] = betas[1] - gradient
betas[2] = betas[2] - gradient
if risk_betas < min_risk:
print "***alpha=", alpha, " risk=", risk_betas
min_risk = risk_betas
#fd = open(sys.argv[2],'w')
#output = writer(fd)
#perceptron.calculate(trainset, output)
#fd.close()
#plot_convergence(trainset, perceptron.w)
#plot_trainset(trainset)
#show()
if __name__ == '__main__':
main(*sys.argv)
|
#!/usr/bin/env python
# coding=utf-8
def request(func):
def wrapper():
print("hello, %s" % func.__name__)
func()
print("goodby, %s" % func.__name__)
return wrapper
@request
@request
def hello():
print("hi hulk")
hello() |
from django.shortcuts import render
from django.template import loader
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from .models import Place , Type
def index(request):
image = Place.objects.all()
the_type = Type.objects.all()
return render(request,'myplace/index.html',{'image':image,'the_type':the_type})
def details(request , select_id):
picture = get_object_or_404(Place, pk=select_id)
the_type = Type.objects.all()
data1 = "myplace/static/myplace/" + str(picture.place_name) + "/p1.txt"
data2 = "myplace/static/myplace/" + str(picture.place_name) + "/p2.txt"
data3 = "myplace/static/myplace/" + str(picture.place_name) + "/p3.txt"
block1 = open(data1, "r").read()
block2 = open(data2, "r").read()
block3 = open(data3, "r").read()
return render(request,'myplace/details.html',{'picture':picture, 'block1':block1, 'block2':block2, 'block3':block3,'the_type':the_type})
def catalog(request):
image = Place.objects.all()
the_type = Type.objects.all()
return render(request,'myplace/catalog.html',{'image':image,'the_type':the_type})
def thefilter(request, typenaja):
image = Place.objects.filter(place_type=typenaja)
the_type = Type.objects.all()
return render(request,'myplace/catalog.html',{'image':image,'the_type':the_type})
def about(request):
the_type = Type.objects.all()
return render(request,'myplace/about.html',{'the_type':the_type})
|
import pygame as pg
from os import path
from settings import *
from sprites import *
from tilemap import *
from pytmx import TiledObjectGroup
from platform import system
from sys import exit
from pygame.locals import *
class Game:
def __init__(self):
pg.init()
flags = FULLSCREEN | DOUBLEBUF
self.screen = pg.display.set_mode((WIDTH, HEIGHT), flags)
pg.display.set_caption(TITLE)
self.clock = pg.time.Clock()
self.font_name = pg.font.match_font(FONT_NAME)
self.gamemap = 'img/FirstMap.tmx'
self.attack = False
pg.mouse.set_visible(False)
def load_data(self):
game_folder = path.dirname(__file__)
img_folder = path.join(game_folder, 'img')
snd_folder = path.join(game_folder, 'snd')
if system() != 'Darwin':
pg.mixer.music.load(path.join(snd_folder, 'background.mp3'))
pg.mixer.music.set_volume(0.15)
pg.mixer.music.play(-1, 0)
self.walk_sound = pg.mixer.Sound('snd/walk.mp3')
self.victory_sound = pg.mixer.Sound('snd/victory.mp3')
self.walk_sound.set_volume(0.03)
self.victory_sound.set_volume(0.5)
self.map = Map(path.join(img_folder, self.gamemap))
self.playerspritesheet = SpriteSheet(path.join(img_folder, SPRITESHEETPLAYER))
self.worldspritesheet = SpriteSheet(path.join(img_folder, SPRITESHEETWORLD))
self.swordspritesheet = SpriteSheet(path.join(img_folder, 'sword.png'))
self.player_img = self.playerspritesheet.get_image(*PLAYER_IMG_NORMAL).convert()
self.woodensword = self.swordspritesheet.get_image(*WOODEN_SWORD).convert()
self.metalsword = self.swordspritesheet.get_image(*METAL_SWORD).convert()
self.epicsword = self.swordspritesheet.get_image(*EPIC_SWORD).convert()
self.walkdown1 = self.playerspritesheet.get_image(*WALKDOWN1).convert()
self.walkdown2 = self.playerspritesheet.get_image(*WALKDOWN2).convert()
self.walkdown3 = self.playerspritesheet.get_image(*WALKDOWN3).convert()
self.walkdown4 = self.playerspritesheet.get_image(*WALKDOWN4).convert()
self.walkdown = [self.walkdown1, self.walkdown2, self.walkdown3, self.walkdown4]
self.walkright1 = self.playerspritesheet.get_image(*WALKRIGHT1).convert()
self.walkright2 = self.playerspritesheet.get_image(*WALKRIGHT2).convert()
self.walkright3 = self.playerspritesheet.get_image(*WALKRIGHT3).convert()
self.walkright4 = self.playerspritesheet.get_image(*WALKRIGHT4).convert()
self.walkright = [self.walkright1, self.walkright2, self.walkright3, self.walkright4]
self.walkleft1 = self.playerspritesheet.get_image(*WALKLEFT1).convert()
self.walkleft2 = self.playerspritesheet.get_image(*WALKLEFT2).convert()
self.walkleft3 = self.playerspritesheet.get_image(*WALKLEFT3).convert()
self.walkleft4 = self.playerspritesheet.get_image(*WALKLEFT4).convert()
self.walkleft = [self.walkleft1, self.walkleft2, self.walkleft3, self.walkleft4]
self.walkup1 = self.playerspritesheet.get_image(*WALKUP1).convert()
self.walkup2 = self.playerspritesheet.get_image(*WALKUP2).convert()
self.walkup3 = self.playerspritesheet.get_image(*WALKUP3).convert()
self.walkup4 = self.playerspritesheet.get_image(*WALKUP4).convert()
self.walkup = [self.walkup1, self.walkup2, self.walkup3, self.walkup4]
self.attackdown4 = self.playerspritesheet.get_image(*ATTACKDOWN4).convert()
self.attackleft2 = self.playerspritesheet.get_image(*ATTACKLEFT2).convert()
self.attackright2 = self.playerspritesheet.get_image(*ATTACKRIGHT2).convert()
def new(self):
self.load_data()
self.sword = self.woodensword
self.cursor = pg.transform.rotate(self.sword, 135)
self.all_sprites = pg.sprite.Group()
self.walls = pg.sprite.Group()
self.ground = pg.sprite.Group()
self.swords = pg.sprite.Group()
layer_index = 0
for layer in self.map.txmdata.visible_layers:
if isinstance(layer, TiledTileLayer):
if layer.name == "PlayerLayer":
self.player = Player(self, 4, 4)
for i in range(self.map.txmdata.height):
for b in range(self.map.txmdata.width):
if self.map.txmdata.get_tile_image(b, i, layer_index):
MapTile(self, b, i, self.map.txmdata.get_tile_image(b, i,layer_index))
layer_index += 1
if isinstance(layer, TiledObjectGroup):
for obj in layer:
Obstacle(self, obj.x, obj.y, obj.width, obj.height)
self.camera = Camera(self.map.width, self.map.height)
def run(self):
self.playing = True
while self.playing:
self.dt = self.clock.tick(FPS) / 1000
self.events()
self.update()
self.draw()
def quit(self):
pg.quit()
exit()
def update(self):
self.player.update()
self.camera.update(self.player)
print(self.clock.get_fps())
def draw(self):
for sprite in self.all_sprites:
self.screen.blit(sprite.image, self.camera.apply(sprite))
self.screen.blit(self.cursor, pg.mouse.get_pos())
pg.display.update()
def events(self):
for event in pg.event.get():
if event.type == pg.QUIT:
self.quit()
if event.type == pg.KEYDOWN:
if event.key == pg.K_SPACE and not self.attack:
self.attack = True
if self.player.direction == 'down':
self.playersword = Sword(self, self.player.rect.centerx+2, self.player.rect.bottom-11, self.player, -90)
self.player_img = self.attackdown4
if self.player.direction == 'left':
self.playersword = Sword(self, self.player.rect.x-27, self.player.rect.y+20, self.player, -180)
self.player_img = self.attackleft2
if self.player.direction == 'up':
self.playersword = Sword(self, self.player.rect.centerx-9, self.player.rect.top-26, self.player, -270)
self.player_img = self.walkup2
if self.player.direction == 'right':
self.playersword = Sword(self, self.player.rect.right-6, self.player.rect.centery-2, self.player, -0)
self.player_img = self.attackright2
if pg.sprite.spritecollide(self.playersword, self.walls, False):
self.playersword.kill()
self.attack = False
if event.key == pg.K_5:
self.sword = self.metalsword
if event.key == pg.K_4:
self.sword = self.woodensword
if event.key == pg.K_6:
self.sword = self.epicsword
if event.type == pg.KEYUP and self.attack:
if event.key == pg.K_SPACE:
self.playersword.kill()
self.attack = False
def draw_text(self, text, size, color, x, y):
font = pg.font.Font(self.font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (int(x), int(y))
self.screen.blit(text_surface, text_rect)
def show_start_screen(self):
self.screen.fill(GREEN)
self.draw_text("Trial of the Sword", 30, WHITE, WIDTH/2, HEIGHT/3)
self.draw_text("Search the maze for the sword in the stone!", 20, WHITE, WIDTH/2, HEIGHT/2)
self.draw_text("Press 1 for easy, 2 for moderate, 3 for hard.", 20, WHITE, WIDTH/2, HEIGHT* 3/4)
pg.display.flip()
self.wait_for_key()
def wait_for_key(self):
self.gamemap = 'FirstMap.tmx'
waiting = False
while waiting:
keys = pg.key.get_pressed()
self.clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
waiting = False
self.playing = False
if keys[pg.K_1]:
pass
waiting = False
if keys[pg.K_2]:
self.gamemap = 'ModerateMap.txt'
waiting = False
if keys[pg.K_3]:
self.gamemap = 'HardMap.txt'
waiting = False
def show_go_screen(self):
if system() != 'Darwin':
pg.mixer.music.fadeout(2000)
pg.mixer.music.stop()
self.walk_sound.stop()
self.walk_sound.stop()
self.screen.fill(ORANGE)
self.draw_text("You found the Sword!!", 36, WHITE, WIDTH/2, HEIGHT/3)
self.draw_text("Play again!", 20, WHITE, WIDTH/2, HEIGHT/2)
self.draw_text("Press 1 for moderate, 2 for hard, 3 for inasane.", 20, WHITE, WIDTH/2, HEIGHT* 3/4)
pg.display.flip()
self.victory_sound.play()
self.wait_for_key()
g = Game()
g.show_start_screen()
while True:
g.new()
g.run()
|
from gevent import monkey; monkey.patch_all()
from gevent.pywsgi import WSGIServer
import web
web.config.debug = False
import gevent
import yaml
import sys
from ezbake.configuration.EzConfiguration import EzConfiguration
from ezbake.configuration.helpers import ZookeeperConfiguration, SystemConfiguration
from ezbake.configuration.loaders.PropertiesConfigurationLoader import PropertiesConfigurationLoader
from ezbake.configuration.loaders.DirectoryConfigurationLoader import DirectoryConfigurationLoader
from ezbake.configuration.security.CryptoImplementations import SharedSecretTextCryptoImplementation
from ezbake.discovery import ServiceDiscoveryClient
from ezbake.base.thriftapi import EzBakeBaseService
from ezbake.reverseproxy.thriftapi import EzReverseProxy
from ezbake.reverseproxy.thriftapi.ttypes import *
from ezbake.reverseproxy.thriftapi.constants import SERVICE_NAME as EzFrontendServiceName
from ezbake.frontend.thriftapi import EzFrontendService
from ezbake.frontend.thriftapi.ttypes import ServerCertInfo, EzFrontendCertException
from ezbake.thrift.transport.EzSSLSocket import TSSLSocket
from thrift import Thrift
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
import urllib
from netifaces import interfaces, ifaddresses, AF_INET
import os
import re
import signal
import socket
from random import choice, shuffle
import logging
import time
from socketio import socketio_manage
from socketio.server import SocketIOServer
from socketio.namespace import BaseNamespace
from socketio.mixins import BroadcastMixin
import json
from collections import (defaultdict)
from base64 import urlsafe_b64encode, urlsafe_b64decode
logger = logging.getLogger('efe-ui_control')
logger.setLevel(logging.INFO)
TEMPLATES_DIR = './html'
zkConfig = None
gState = None
tt_verify = r"_Ez_EFE"
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(sh)
current_milli_time = lambda: int(round(time.time() * 1000))
get_registration_server = lambda reg: reg.UserFacingUrlPrefix.split('/')[0].strip()
get_registration_location = lambda reg: '/' + reg.UpstreamHostAndPort.partition('/')[2].strip()
get_registration_upstream = lambda reg: tuple(reg.UpstreamHostAndPort.split(':'))
get_registration_upstream_path = lambda reg: reg.UpstreamPath.strip()
is_path_registered = lambda reglist, path: len([ reg.UserFacingUrlPrefix for reg in reglist if re.search(path, reg.UserFacingUrlPrefix)])
space = lambda : ' '
ellipsis = lambda: '<div class="login-icon icon-rotate"><span aria-hidden="true" class="ezbicon-ellipsis"></span></div>'
ellipsis_link = lambda div: """<a href="javascript:ToggleContent('{div}')">{ellipsis}</a>""".format(div=div, ellipsis=ellipsis())
def buildUrlPrefix(config, base):
if len(config['FullUserFacingUrl']) > 0:
ufup = config['FullUserFacingUrl']
if '/' not in ufup:
ufup += '/'
return ufup
if len(config['UserFacingUrlPrefix']) > 0:
prefix = config['UserFacingUrlPrefix']+'.'
else:
prefix = ''
ufup = prefix+base+'/'+config['UserFacingUrlSuffix']
return ufup
class GlobalState(object):
def __init__(self):
self.url=''
self.runningServerConfigurations = {}
self.mainserver = None
self.internalhostname = None
self.socket_resource_path = []
self.serverConfigurations = {}
self.testServerConfigurations = {}
try:
with (open('ezconfiguration.yaml')) as ssl_config_file:
ssl_config = yaml.safe_load(ssl_config_file)
self.configOverrideDir = ssl_config['override_props_dir']
self.keyfile=os.path.join(ssl_config['ssldir'],'application.priv')
self.certfile=os.path.join(ssl_config['ssldir'],'application.crt')
self.ca_certs=os.path.join(ssl_config['ssldir'],'ezbakeca.crt')
self.ezbakesecurityservice_pub=os.path.join(ssl_config['ssldir'],'ezbakesecurityservice.pub')
except Exception:
logger.exception("Exception in loading ezconfiguration.yaml")
raise RuntimeError("Unable to load ezconfiguration.yaml file")
class WSGILog(object):
def __init__(self):
self.log = logger
def write(self, string):
self.log.info(string.rstrip("\n"))
def getFirstExposedInterface():
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr':'No IP addr'}] )]
for address in addresses:
if not address.startswith('127'):
return address
def getEfeServers():
rtn = []
ezd = ServiceDiscoveryClient(zkConfig.getZookeeperConnectionString())
for endpoint in ezd.get_endpoints('EzBakeFrontend', EzFrontendServiceName):
name,port = endpoint.split(':',1)
rtn.append((name,port))
return rtn
def getEfeClient():
servers = getEfeServers()
shuffle(servers)
client = None
#try all available servers
for host, port in servers:
try:
socket = TSSLSocket(host=host, port=int(port), ca_certs=gState.ca_certs, cert=gState.certfile, key=gState.keyfile, verify_pattern=tt_verify)
client = EzFrontendService.Client(TBinaryProtocol.TBinaryProtocol(TTransport.TBufferedTransport(socket)))
client._iprot.trans.open()
logger.info('Connected to Efe: %s:%d' % (host, int(port)))
break
except Exception as ex:
logger.warn('Error in connecting to %s:%d - %s' % (host, int(port), str(ex)))
client = None
if client is None:
raise RuntimeError('Could not establish a thrift connection to Efe client. Exhausted all available servers %s' % str(servers))
return client
def returnEfeClient(client):
if isinstance(client, EzBakeBaseService.Client):
try:
client._iprot.trans.close()
except Exception as ex:
logger.warn('Exception in closing returned Efe client: %s', str(ex))
del client
def htmlprint(dictObj, indent=0):
p=[]
p.append('<ul>\n')
for k,v in dictObj.iteritems():
if isinstance(v, dict):
p.append('<li>'+ str(k)+ ':')
p.append(printitems(v))
p.append('</li>')
else:
p.append('<li>'+ str(k)+ ': '+ str(v)+ '</li>')
p.append('</ul>\n')
return '\n'.join(p)
def startServer(configurationNumber):
if configurationNumber not in gState.runningServerConfigurations:
current = gState.testServerConfigurations[configurationNumber]
resource=[]
if current.get('isWebSocket'):
urls = ('/'+current['UpstreamPath']+'/','hello',
'/'+current['UpstreamPath']+'/upload','upload',
'/'+current['UpstreamPath']+'/wstest','loadWSClientPage',
'/'+current['UpstreamPath']+'/socket.io/(.*)','webSocket',
'/'+current['UpstreamPath']+'/socket.io.js','sendjs'
)
resource.append(current['UpstreamPath']+'/')
gState.socket_resource_path.append(current['UpstreamPath'])
else:
urls = ('/'+current['UpstreamPath']+'/','hello',
'/'+current['UpstreamPath']+'/upload','upload',)
resource.append('socket.io')
socket_io_resource = ''.join(resource)
app = web.application(urls, globals()).wsgifunc()
wsgifunc = app
if current.get('validateUpstreamConnection'):
cert_regs = gevent.ssl.CERT_REQUIRED
else:
cert_regs = gevent.ssl.CERT_OPTIONAL
logger.info('Starting server with configuration {config_number} for AppName "{app_name}" with full SSL validation'.format(config_number=str(configurationNumber), app_name= current.get('AppName')))
runningserver = SocketIOServer((gState.internalhostname, current['UpstreamPort']), wsgifunc,
keyfile=gState.keyfile, certfile=gState.certfile, ca_certs=gState.ca_certs, cert_reqs=cert_regs,
log=WSGILog(), resource=socket_io_resource, policy_server=False)
gState.runningServerConfigurations[configurationNumber] = runningserver
runningserver.serve_forever()
gState.runningServerConfigurations.pop(configurationNumber, None)
logger.info('starting test server with configuration{0}'.format(str(configurationNumber)))
def stopServer(configurationNumber):
if configurationNumber in gState.runningServerConfigurations:
current = gState.testServerConfigurations[configurationNumber]
if gState.socket_resource_path:
if current['UpstreamPath'] in gState.socket_resource_path:
gState.socket_resource_path.remove(current['UpstreamPath'])
gState.runningServerConfigurations[configurationNumber].stop()
def buildRegistration(configuration):
if 'UpstreamHost' in configuration:
uhp = configuration['UpstreamHost']+':'+str(configuration['UpstreamPort'])
else:
uhp = gState.internalhostname +':'+str(configuration['UpstreamPort'])
registration = UpstreamServerRegistration(UserFacingUrlPrefix=buildUrlPrefix(configuration, gState.url),
AppName=configuration.get('AppName'),
UpstreamHostAndPort=uhp,
UpstreamPath=configuration.get('UpstreamPath')+'/',
timeout=configuration.get('timeout', 10),
timeoutTries=configuration.get('timeoutTries', 3),
uploadFileSize=configuration.get('uploadFileSize', 2),
sticky=configuration.get('sticky', False),
disableChunkedTransferEncoding=configuration.get('disableChunkedTransferEncoding', False),
validateUpstreamConnection=configuration.get('validateUpstreamConnection', False),
contentServiceType=configuration.get('ContentService', ContentServiceType.DYNAMIC_ONLY))
return registration
def registerServer(configuration,host,port):
try:
socket = TSSLSocket(host=host, port=port, ca_certs=gState.ca_certs, cert=gState.certfile, key=gState.keyfile, verify_pattern=tt_verify)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = EzReverseProxy.Client(protocol)
transport.open()
registration = buildRegistration(configuration)
client.addUpstreamServerRegistration(registration)
transport.close()
except Exception as e:
logger.exception('Exception in registering server: {0}'.format(str(e)))
def deregisterServer(deregistration,host,port):
try:
socket = TSSLSocket(host=host, port=port, ca_certs=gState.ca_certs, cert=gState.certfile, key=gState.keyfile, verify_pattern=tt_verify)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = EzReverseProxy.Client(protocol)
transport.open()
client.removeUpstreamServerRegistration(deregistration)
transport.close()
except Exception as e:
logger.exception('Exception in deregistering server: {0}'.format(str(e)))
def registerSelf():
logger.info("registering with Efe")
client = getEfeClient()
registration = UpstreamServerRegistration(UserFacingUrlPrefix=gState.url+'/ezfrontend/', AppName='ezfrontend', UpstreamHostAndPort=gState.internalhostname+':'+str(gState.port), UpstreamPath="ezfrontend/", timeout=10, timeoutTries=3, uploadFileSize=256, sticky=True, authOperations=set([AuthorizationOperation.USER_INFO, AuthorizationOperation.USER_JSON]), validateUpstreamConnection=False)
client.addUpstreamServerRegistration(registration)
returnEfeClient(client)
logger.info("registered with Efe")
def deregisterSelf():
logger.info("deregistering with Efe")
client = getEfeClient()
registration = UpstreamServerRegistration(UserFacingUrlPrefix=gState.url+'/ezfrontend/', AppName='ezfrontend', UpstreamHostAndPort=gState.internalhostname+':'+str(gState.port), UpstreamPath="ezfrontend/", timeout=10, timeoutTries=3, uploadFileSize=256, sticky=True, authOperations=set([AuthorizationOperation.USER_INFO, AuthorizationOperation.USER_JSON]),validateUpstreamConnection=False)
client.removeUpstreamServerRegistration(registration)
returnEfeClient(client)
logger.info("deregistered with Efe")
def deregisterServerFromConfig(config,host,port):
deregistration = buildRegistration(config)
deregisterServer(deregistration,host,port)
def removeReverseProxiedPath(path,host,port):
socket = TSSLSocket(host=host, port=port, ca_certs=gState.ca_certs, cert=gState.certfile, key=gState.keyfile, verify_pattern=tt_verify)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = EzReverseProxy.Client(protocol)
transport.open()
client.removeReverseProxiedPath(path)
transport.close()
def removeReverseProxiedPathFromConfig(config,host,port):
path = buildUrlPrefix(config,gState.url)
removeReverseProxiedPath(path,host,port)
def isUpstreamServerRegistered(config,host,port): #UpstreamServerRegistration
socket = TSSLSocket(host=host, port=int(port), ca_certs=gState.ca_certs, cert=gState.certfile, key=gState.keyfile, verify_pattern=tt_verify)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = EzReverseProxy.Client(protocol)
transport.open()
registration = buildRegistration(config)
rtn = client.isUpstreamServerRegistered(registration)
transport.close()
return rtn
def isReverseProxiedPathRegistered(config,host,port): #string
socket = TSSLSocket(host=host, port=int(port), ca_certs=gState.ca_certs, cert=gState.certfile, key=gState.keyfile, verify_pattern=tt_verify)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = EzReverseProxy.Client(protocol)
transport.open()
rtn = client.isReverseProxiedPathRegistered(buildUrlPrefix(config,gState.url))
transport.close()
return rtn
def isEfeHealthy(host,port):
try:
socket = TSSLSocket(host=host, port=int(port), ca_certs=gState.ca_certs, cert=gState.certfile, key=gState.keyfile, verify_pattern=tt_verify)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = EzReverseProxy.Client(protocol)
transport.open()
return client.ping()
except TTransport.TTransportException as ex:
return False
def pingNoSSL(host,port):
socket = TSocket.TSocket(host,port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = EzReverseProxy.Client(protocol)
transport.open()
try:
client.ping()
except:
print "ping w/out ssl failed as expected"
def getAllUpstreamServerRegistrations(host,port):
socket = TSSLSocket(host=host, port=int(port), ca_certs=gState.ca_certs, cert=gState.certfile, key=gState.keyfile, verify_pattern=tt_verify)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = EzReverseProxy.Client(protocol)
transport.open()
rtn = client.getAllUpstreamServerRegistrations()
transport.close()
return rtn
def removeCertsForHosts(hosts):
if not hosts:
return
feClient = getEfeClient()
for host in hosts:
if feClient.isServerCertPresent(host):
try:
feClient.removeServerCerts(host)
logger.info("Deleted Cert and Key hostname=%s", host)
except EzFrontendCertException:
logger.exception("Exception in deleting server cert for host %s", host)
returnEfeClient(feClient)
class BasePage(object):
"""
Base class for all pages
"""
def __init__(self):
self.title = "Frontend"
self.cssfiles = []
self.userDN = '--'
self.userCN = 'Unknown'
self.userJson = ''
self.content = []
def render(self):
return web.template.render(TEMPLATES_DIR).base_page(''.join(self.content),
self.title,
self.cssfiles,
self.userCN)
def getUserInfoHeader(self):
user_info = web.ctx.env.get('HTTP_EZB_VERIFIED_USER_INFO')
user_info_signature = web.ctx.env.get('HTTP_EZB_VERIFIED_SIGNATURE')
if not user_info or not user_info_signature:
logger.error("unauthorized access attempt. EZB User Info headers not sent")
raise ValueError('EZB User Info headers not sent')
if not verify_sign(gState.ezbakesecurityservice_pub, user_info_signature, user_info):
logger.error("unauthorized access (EZB User Info signature mismatch)")
raise ValueError('EZB User Info signature mismatch')
return user_info
def getUserJsonHeader(self):
user_json = web.ctx.env.get('HTTP_EZB_USER_INFO_JSON')
user_json_signature = web.ctx.env.get('HTTP_EZB_USER_INFO_JSON_SIGNATURE')
if not user_json or not user_json_signature:
logger.error("unauthorized access attempt. EZB User Json headers not sent")
raise ValueError('EZB User Json headers not sent')
if not verify_sign(gState.ezbakesecurityservice_pub, user_json_signature, user_json):
logger.error("unauthorized access (EZB User Json signature mismatch)")
raise ValueError('EZB User Json signature mismatch')
return user_json
def validateUser(self, authorize=True):
try:
proxyToken = json.loads(self.getUserInfoHeader())
tokenExpiration = int(proxyToken.get('notAfter'))
if current_milli_time() > tokenExpiration:
raise ValueError('ezb proxy token token has expired: %i' % tokenExpiration)
self.userDN = proxyToken.get('x509').get('subject')
self.userCN = getCn(self.userDN)
if authorize:
self.userJson = self.getUserJsonHeader()
if not (isUserAuthroized(self.userCN, self.userJson) or validateCn(self.userCN)):
raise ValueError('attempt by user with CN: %s\nUserJson: %s', self.userCN, self.userJson)
except AttributeError:
raise ValueError('unable to parse proxy token json string')
except ValueError as ex:
logger.error('Unauthorized access - %s', str(ex))
raise web.unauthorized()
def GET(self, _=None):
logger.debug('GET request: %s%s', web.ctx.home, web.ctx.query)
self.validateUser()
def POST(self, _=None):
logger.debug('POST request: %s%s', web.ctx.home, web.ctx.query)
self.validateUser()
class ActionPage(BasePage):
def __init__(self):
super(ActionPage, self).__init__()
def GET(self, name=None):
super(ActionPage, self).POST(name)
self.content.append('<div id="UserJson" style="display:none;">' + self.userJson + '</div>')
self.content.append('<a href="/ezfrontend/control/"><div class="btn blue-btn">main page</div></a>')
self.content.append('<div class="divider"></div>')
def POST(self, name=None):
super(ActionPage, self).POST(name)
def WANotFound():
"""
Not found handler
"""
page = BasePage()
page.validateUser(authorize=False)
return web.notfound(web.template.render(TEMPLATES_DIR).notfound(page.userCN))
class efestate(ActionPage):
def __init__(self):
super(efestate, self).__init__()
#self.title = self.title + ' - State'
def GET(self, name=None):
super(efestate, self).GET(name)
logger.info('%s accessed by {%s}', self.__class__.__name__, self.userDN)
self.title = self.title + ' - {0} State'.format(name)
host, port = name.split(':',1)
if (host, port) not in getEfeServers():
self.content.append('<div class="message error-message">EFE %s:%s not found</div>' % (host, port))
else:
registrations = []
try:
registrations = getAllUpstreamServerRegistrations(host,int(port))
except RuntimeError as ex:
logger.warn('Unable to get all upstream server registrations from %s:%d - %s', host, int(port), str(ex))
self.content.append('<div class="message error-message">Unable to get all upstream server registrations from %s:%d</div>' % (host, int(port)))
return self.render()
for registration in registrations:
url = registration.UserFacingUrlPrefix
registration_var = ['<div class="card shadow1">',
'<ul>',
' <li>URL: <a href="https://{url}">https://{url}/a></li>'.format(url=url),
' <li>UpstreamHostAndPort: {ushp}</li>'.format(ushp=registration.UpstreamHostAndPort),
' <li>AppName: {appname}</li>'.format(appname=registration.AppName),
' <li>timeout: {timeout}</li>'.format(timeout=registration.timeout),
' <li>timeoutTries: {tries}</li>'.format(tries=registration.timeoutTries),
' <li>uploadFileSize: {filesize}</li>'.format(filesize=registration.uploadFileSize),
' <li>sticky: {sticky}</li>'.format(sticky=str(registration.sticky)),
' <li>disableChunkedTransferEncoding: {chunked}</li>'.format(chunked=str(registration.disableChunkedTransferEncoding)),
' <li>UpstreamPath: {usp}</li>'.format(usp=registration.UpstreamPath),
' <li>UserFacingUrlPrefix: {url}</li>'.format(url=url),
'</ul>',
'<form action="/ezfrontend/efestate/" method="post">',
' <a href="javascript:<a href="javascript:;" onclick="parentNode.submit();">',
' <div class="btn red-btn">Deregister Instance</div> </a>',
' <input type="hidden" name="deregister"',
' value="{host}|{port}|{url}|{appname}|{ushp}|{uspath}"/></form>'.format(host=host, port=port, url=url, appname=registration.AppName, ushp=registration.UpstreamHostAndPort, uspath=registration.UpstreamPath),
'<form action="/ezfrontend/efestate/" method="post">',
' <a href="javascript:<a href="javascript:;" onclick="parentNode.submit();">',
' <div class="btn red-btn">Remove Path / All Instances</div> </a>',
' <input type="hidden" name="removeReverseProxiedPath"',
' value="{host}|{port}|{url}"/></form>'.format(host=host, port=port, url=url),
'</div>']
self.content.extend(registration_var)
return self.render()
def POST(self, name=None):
super(efestate, self).GET(name)
logger.info('%s accessed by {%s}', self.__class__.__name__, self.userDN)
referer = web.ctx.env.get('HTTP_REFERER')
data = urllib.unquote(web.data()).decode('utf8')
action, tmp = data.split('=', 1)
if '|' not in tmp:
logger.error("ERROR - bad post data from user {%s} to page efestate. Data logged above", self.userDN)
raise web.BadRequest()
if action == 'deregister':
logger.info("{%s} Deregistering %s", self.userDN, data)
host, port, ufup, an, uhp, up = tmp.split('|', 5)
registration = UpstreamServerRegistration(UserFacingUrlPrefix=ufup, AppName=an, UpstreamHostAndPort=uhp, UpstreamPath=up, timeout=10, timeoutTries=3, uploadFileSize=256, sticky=True)
deregisterServer(registration, host, int(port))
elif action == 'removeReverseProxiedPath':
logger.info("{%s} Deregistering reverse proxied path %s", self.userDN, data)
host, port, path = tmp.split('|', 2)
removeReverseProxiedPath(path, host, int(port))
else:
logger.info('{%s} - bad post. Reason: unsupported action (%s)', self.userDN, str(action))
raise web.BadRequest()
raise web.seeother(referer)
def isEfeServersSynchronized(serverlist=None):
"""
For all the servers in the serverlist, get the registrations and
check if they match.
serverlist is a list of tuples [(host1, port2),(host2, port2)..]
"""
if not serverlist:
return False
# If one healthy server registered. it's already synchronized
if len(serverlist) > 1:
# Get the registration of the first EFE
host, port = serverlist[0]
regfirst = sorted(getAllUpstreamServerRegistrations(host, port), key=lambda reg: (str(reg.UserFacingUrlPrefix), str(reg.UpstreamPath), str(reg.UpstreamHostAndPort)))
# Compare it with the reset of the EFEs
for host, port in serverlist[1:]:
regcurrent = sorted(getAllUpstreamServerRegistrations(host, port), key=lambda reg: (str(reg.UserFacingUrlPrefix), str(reg.UpstreamPath), str(reg.UpstreamHostAndPort)))
# Current EFE is not matched yet with first EFE
if not len(regfirst) == len(regcurrent):
# If number of registration does not match, it is not synchronized
return False
# Compare all upstream registration
for count, _ in enumerate(regcurrent):
# Check the content by checking if they have equal instance
# dictionaries
if not regfirst[count].__dict__ == regcurrent[count].__dict__:
return False
return True
def add_elllipsis(html_var, count, div_name, msg_type, message):
html_var.append('<div class="message {msg_type}">{ellipsis}{count} {msg}'.format(msg_type=msg_type, count=count, ellipsis=ellipsis_link(div_name), msg=message))
html_var.append('<div id="{div}" style="display:none;">'.format(div=div_name))
class control(BasePage):
def __init__(self):
super(control, self).__init__()
self.title = self.title + ' - Control'
def GET(self, name=None):
super(control, self).GET(name)
logger.info('%s accessed by {%s}', self.__class__.__name__, self.userDN)
html_var = ['<div id="UserJson" style="display:none;">',
self.userJson,
'</div>',
'<div>',
'<h3>Front Ends:</h3>'
]
# Create a list of registered efes
registered_efe =[(host, int(port)) for host, port in getEfeServers()]
# Check if you can ping
healthy_efe = []
nhealthy_efe = []
for host, port in registered_efe:
if isEfeHealthy(host, port):
healthy_efe.append((host, port))
else:
nhealthy_efe.append((host, port))
# Check if they are synchronized
synced = isEfeServersSynchronized(serverlist=healthy_efe)
if healthy_efe:
if synced:
# Display in GREEN
add_elllipsis(html_var, len(healthy_efe), 'div_sync', 'success-message', 'EFE Healthy')
# Display the servers in columns
for count, (host, port) in enumerate(healthy_efe):
if count % 4 == 0:
html_var.append('<div class="col4-set">')
html_var.append('<div class="col-{count}"><a href="/ezfrontend/efestate/{host}:{port}">{host}:{port}</a></div>'.format(count=(count + 1) % 4, host=host, port=port))
if (count + 1) % 4 == 0 or (count + 1) == len(healthy_efe):
html_var.append('</div>')
html_var.append('</div>') # for div_sync
html_var.append('</div>') # for success-message
else:
# Display in RED
add_elllipsis(html_var, len(healthy_efe), 'div_notsync', 'error-message', 'EFE not Syncronized')
# Display the servers in columns
for count, (host, port) in enumerate(healthy_efe):
if count % 4 == 0:
html_var.append('<div class="col4-set">')
html_var.append('<div class="col-{count}"><a href="/ezfrontend/efestate/{host}:{port}">{host}:{port}</a></div>'.format(count=(count + 1) % 4, host=host, port=port))
if (count + 1) % 4 == 0 or (count + 1) == len(healthy_efe):
html_var.append('</div>')
html_var.append('</div>') # for div_notsync
html_var.append('</div>') # for success-message
if nhealthy_efe:
# Display in ORANGE
add_elllipsis(html_var, len(nhealthy_efe), 'div_uhealthy', 'system-message', 'EFE not Healthy')
# Display the servers in columns
for count, (host, port) in enumerate(nhealthy_efe):
if count % 4 == 0:
html_var.append('<div class="col4-set">')
html_var.append('<div class="col-{count}">{host}:{port}</div>'.format(count=(count + 1) % 4, host=host, port=port))
if (count + 1) % 4 == 0 or (count + 1) == len(nhealthy_efe):
html_var.append('</div>')
html_var.append('</div>') # for div_notsync
html_var.append('</div>') # for system-message
links_to_pages = ['<div class="col4-set">',
'<div class="col-1"><a href="/ezfrontend/manage/"><div class="btn blue-btn">Manage Certificates</div></a></div>',
'<div class="col-2"><a href="#"><div class="btn gray-btn">Manage Static Contents</div></a></div>',
'<div class="col-3"><a href="/ezfrontend/upstreams/"><div class="btn blue-btn">View Registrations</div></a></div>',
'<div class="col-4"><a href="/ezfrontend/register/"><div class="btn blue-btn">Register an instance</div></a></div>',
'</div>'
]
divider = ['<div class="divider"></div>']
self.content.append(''.join(html_var + divider + links_to_pages))
return self.render()
'''
Class to manage certificates
'''
class manage(ActionPage):
def __init__(self):
super(manage, self).__init__()
self.title = self.title + ' - Manage Certificates'
def GET(self, name=None):
super(manage, self).GET(name)
logger.info('%s accessed by {%s}', self.__class__.__name__, self.userDN)
message = ''
if name:
msg = urlsafe_b64decode(str(name))
status = dict()
status[str(msg.split('|')[0])] = msg.split('|')[1:]
if 'NOK' in status:
message = '<div class="message error-message">{errmsg}</div>'.format(errmsg=''.join(status['NOK']))
if 'OK' in status:
message = '<div class="message success-message">{errmsg}</div>'.format(errmsg=''.join(status['OK']))
rtn_upl = []
rtn_del = []
upload_servers = defaultdict(list)
delete_servers = defaultdict(list)
path = web.ctx.env['PATH_INFO'].strip('/')
efeClient = getEfeClient()
registrations = efeClient.getAllUpstreamServerRegistrations()
for reg in registrations:
#map registrations to host_name
serverName = get_registration_server(reg)
host_name, _ = get_registration_upstream(reg)
if serverName:
#if we have a specified user facing server name, use that instead
host_name = serverName
if efeClient.isServerCertPresent(host_name):
delete_servers[host_name].append(reg)
else:
upload_servers[host_name].append(reg)
if upload_servers:
card_start = ['<div class="card shadow3">',
'<form action="/ezfrontend/uploadcert" method="post" enctype="multipart/form-data">'
]
for server, registrations in upload_servers.iteritems():
server_name = 'Server: {server}'.format(server=server)
divider = '<div class="divider"></div>'
loc_lst =[]
# Get all the locations for this server
for reg in registrations:
path = get_registration_upstream_path(reg)
upstream_host, upstream_port = get_registration_upstream(reg)
loc_lst.append('{path} => {upstream_host}:{upstream_port}'.format(path=path, upstream_host=upstream_host, upstream_port=upstream_port))
more = '<div id="div_more_upload" style="display:none;">{{{usinfo}}}</div>'.format(usinfo=', '.join(loc_lst))
file_upload_button =['<label for="type">Certificate</label>',
'<span>',
' <div class="fileUpload btn file-btn">',
' <span>Choose File</span>',
' <input id="certBtn" type="file" class="upload" name="upload_cert:{server}">'.format(server=server),
' </div>',
' <input id="uploadCert" placeholder="Choose Certificate file" disabled="disabled" class="transparent half inline">',
'</span>',
'<label for="type">Key</label>',
'<span>',
' <div class="fileUpload btn file-btn">',
' <span>Choose File</span>',
' <input id="keyBtn" type="file" class="upload" name="upload_key:{server}">'.format(server=server),
' </div>',
' <input id="uploadKey" placeholder="Choose Key file" disabled="disabled" class="transparent half inline">',
'</span>',
'<script type="text/javascript">',
' document.getElementById("certBtn").onchange = function () {',
' document.getElementById("uploadCert").value = this.value;',
' };',
' document.getElementById("keyBtn").onchange = function () {',
' document.getElementById("uploadKey").value = this.value;',
' };',
'</script>'
]
upload_button=['<div><input type="submit" class="btn file-upload-btn" value="Upload"></div>',
'</form>'
]
rtn_upl.extend('{card_start}{ellipsis}{server}{more}{divider}{cert}{card_end}{upload}'.format(card_start=''.join(card_start),
ellipsis=ellipsis_link('div_more_upload'),
server=server_name,
more=more,
cert=''.join(file_upload_button),
upload=''.join(upload_button),
divider=divider,
card_end='</div>'))
if delete_servers:
card_start = ['<div class="card shadow3">',
'<form action="/ezfrontend/deletecert" method="post" enctype="multipart/form-data">',
]
for server, registrations in delete_servers.iteritems():
server_name = 'Server: {server}'.format(server=server)
divider = '<div class="divider"></div>'
loc_lst =[]
for reg in registrations:
path = get_registration_upstream_path(reg)
upstream_host, upstream_port = get_registration_upstream(reg)
loc_lst.append('{path} => {upstream_host}:{upstream_port}'.format(path=path, upstream_host=upstream_host, upstream_port=upstream_port))
more = '<div id="div_more_delete" style="display:none;">{{{usinfo}}}</div>'.format(usinfo=', '.join(loc_lst))
delete_list = ['<span>',
' <input type="checkbox" class="regular-checkbox" id="checkboxdel" name="delete_:{server}" value="True"><label for="checkboxdel"></label>'.format(server=server),
' <div class="tag">Select to delete Certificate and key</div>',
'</span>'
]
delete_button=['<div><input type="submit" class="btn file-delete-btn" value="Delete"></div>',
'</form>'
]
rtn_del.extend('{card_start}{ellipsis}{server}{more}{divider}{delist}{card_end}{delete}'.format(card_start=''.join(card_start),
ellipsis=ellipsis_link('div_more_delete'),
server=server_name,
more=more,
delist=''.join(delete_list),
delete=''.join(delete_button),
divider=divider,
card_end='</div>'))
returnEfeClient(efeClient)
# Display status message if any
self.content.extend(message)
if rtn_upl:
self.content.extend('<h3>Update Certificates:</h3>')
self.content.extend(rtn_upl)
if rtn_del:
# divider between Update and delete
self.content.append('<div class="divider"></div>')
if rtn_del:
self.content.extend('<h3>Remove Certificates:</h3>')
self.content.extend(rtn_del)
return self.render()
def POST(self, name=None):
super(manage, self).POST(name)
return
class uploadCerts:
'''
Upload cert and key of host(s)
'''
def POST(self):
redirect = '/ezfrontend/manage/'
form = web.input()
serverCerts = defaultdict(dict)
for k, v in form.iteritems():
if not (k.startswith('upload_') and len(str(v)) > 0):
continue
cert_type, host_name = k.split(':')[0], k.split(':')[1]
if 'cert' in cert_type:
serverCerts[host_name]['crt'] = str(v)
else:
serverCerts[host_name]['key'] = str(v)
if serverCerts:
feClient = getEfeClient()
#save certs to database and update zookeeper
status='OK'
rtn=[]
for server, certData in serverCerts.iteritems():
try:
feClient.addServerCerts(server, ServerCertInfo(certificateContents=certData['crt'], keyContents=certData['key']))
logger.info("Uploaded Cert and Key for %s", server)
rtn.append("Uploaded Certificate and Key for host: %s" % server)
except EzFrontendCertException as e:
status='NOK'
logger.error("Error in Uploading Cert and Key for %s: %s", server, str(e))
rtn.append("Error in Uploading Certificate and Key for host: %s" % server)
returnEfeClient(feClient)
if 'NOK' in status:
value = 'NOK|{0}'.format(''.join(rtn))
else:
value = 'OK|{0}'.format(''.join(rtn))
raise web.seeother('{redirect}/{status}'.format(redirect=redirect, status=urlsafe_b64encode(value)))
class deleteCerts:
'''
delete cert and key of host(s)
'''
def POST(self):
redirect = '/ezfrontend/manage/'
form = web.input()
hostsToDelete = set()
rtn=[]
for k, v in form.iteritems():
if not k.startswith('delete_'):
continue
hostsToDelete.add(k.split(':')[1])
if hostsToDelete:
removeCertsForHosts(hostsToDelete)
for h in hostsToDelete:
rtn.append("Deleted Certificate and Key for host: %s <br>" %(h))
value = 'OK|{0}'.format(''.join(rtn))
raise web.seeother('{redirect}/{status}'.format(redirect=redirect, status=urlsafe_b64encode(value)))
def isServerListening(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
s.close()
return True
except socket.error as ex:
s.close()
return False
class RegisterWebPage(ActionPage):
def __init__(self):
super(RegisterWebPage, self).__init__()
self.title = self.title + ' - App Registeration'
def GET(self, name=None):
super(RegisterWebPage, self).GET(name)
logger.info('%s accessed by {%s}', self.__class__.__name__, self.userDN)
message=''
if name:
msg = urlsafe_b64decode(str(name))
status=dict()
status[str(msg.split('|')[0])] = msg.split('|')[1:]
if 'NOK' in status:
message = '<div class="message error-message">{errmsg}</div>'.format(errmsg=''.join(status['NOK']))
if 'OK' in status:
reg = dict(item.split(':') for value in status['OK'] for item in str(value).split(','))
message_without_end_div = '<div class="message success-message">{ellipsis} {msg} {app}'.format(msg='Registered ', app=reg['AppName'], ellipsis=ellipsis_link('div_more'))
reginfo_in_card = '<div class="card shadow1"> <p style="color:black">{msg}</p> </div>'.format(msg=str(reg))
more = '<div id="div_more" style="display:none;">{reginfo} </div>'.format(reginfo=reginfo_in_card)
message = '{msg} {more} </div> '.format(msg=message_without_end_div, more=more)
html_var = [message,
'<h3>Register an Instance:</h3>',
'<div class="card shadow3">',
'<form action="/ezfrontend/reginstance" method="post">',
'<fieldset>',
#ROW1
'<div class="col3-set">',
' <div class="col-1"><label>URL</label><input type="text" name="UserFacingUrlPrefix" > </div>',
' <div class="col-2"> </div>',
' <div class="col-3"> </div>',
'</div>',
#ROW2
'<div class="col3-set">',
' <div class="col-1"><label>App Name</label><input type="text" name="AppName" > </div>',
' <div class="col-2"><label>Upstream Path</label><input type="text" name="UpstreamPath" > </div>',
' <div class="col-3"> </div>',
'</div>',
#ROW3
'<div class="col3-set">',
' <div class="col-1"><label>Upstream Host</label><input type="text" name="UpstreamHost" > </div>',
' <div class="col-2"><label>Upstream Port</label><input type="text" name="UpstreamPort" > </div>',
' <div class="col-3"> </div>',
'</div>',
#ROW4
'<div class="col3-set">',
' <div class="col-1"><label>Timeout</label><input type="text" name="timeout" value=10>[1-120]s </div>',
' <div class="col-2"><label>Timeout Tries</label><input type="text" name="timeoutTries" value=1>[1-10] </div>',
' <div class="col-3"><label>Upload File Size</label><input type="text" name="uploadFileSize" value=2>[1-{max}]MB </div>'.format(max=100),
'</div>',
#Checkboxes
'<div class="mar-t30"></div>',
'<h3 class="green"><font color="black">Flags</font></h3>',
'<ul>',
'<li>',
'<input type="checkbox" class="regular-checkbox" id="checkbox1" name="sticky" value="True"><label for="checkbox1"></label>',
'<div class="tag">Sticky session</div>',
'</li>',
'<li>',
'<input type="checkbox" class="regular-checkbox" id="checkbox2" name="disableChunkedTransferEncoding" value="True"><label for="checkbox2"></label>',
'<div class="tag">Chunk Transfer</div>',
'</li>',
'</ul>',
#submit button
'<input type="submit" class="green-btn mar-t20" name="submit" id="submit" value="Submit" >',
'</fieldset>',
'</form>',
'</div>']
# Get current registrations
efeClient = getEfeClient()
registrations = efeClient.getAllUpstreamServerRegistrations()
test_app_name = 'EFE_ui_test_app'
app = list()
for i in range(1, 3):
if is_path_registered(registrations, '{test_app}_{number}'.format(test_app=test_app_name, number=i)):
app.append('<input type="submit" class="red-btn mar-t20" name="{test_app}_{number}" id="submit" value="Remove Test App {number}" >'.format(test_app=test_app_name, number=i))
else:
app.append('<input type="submit" class="green-btn mar-t20" name="{test_app}_{number}" id="submit" value="Add Test App {number}" >'.format(test_app=test_app_name, number=i))
test_app_var = ['<div class="card shadow3">',
' <form action="/ezfrontend/reginstance" method="post">',
' <div class="col3-set">',
' <div class="col-1">{test_app}</div>'.format(test_app=app[0]),
' <div class="col-2">{test_app}</div>'.format(test_app=app[1]),
' <div class="col-3"> </div>',
' </div>',
'</div>']
self.content.append(''.join(html_var + test_app_var))
return self.render()
class RegisterInstance:
'''
Register an instance.
Register/Deregister Test App, start/stop Test server
'''
def POST(self):
redirect = '/ezfrontend/register/'
logger.info("accessed RegisterInstance")
form = web.input()
us_registration = dict()
deregister = False
configuration_number = None
test_app_1 ="EFE_ui_test_app_1"
test_app_2 ="EFE_ui_test_app_2"
if test_app_1 in form:
us_registration['UpstreamPort'] = int(31210)
us_registration['AppName'] = "TestApp1"
us_registration['UpstreamPath'] = test_app_1
us_registration['UserFacingUrlPrefix'] = ''
us_registration['UserFacingUrlSuffix'] = test_app_1 + '/'
us_registration['FullUserFacingUrl'] = ''
us_registration['isWebSocket'] = False
configuration_number = 0
if 'Remove' in form[test_app_1]:
deregister = True
elif test_app_2 in form:
us_registration['UpstreamPort'] = int(31211)
us_registration['AppName'] = "TestApp2"
us_registration['UpstreamPath'] = test_app_2
us_registration['UserFacingUrlPrefix'] = ''
us_registration['UserFacingUrlSuffix'] = test_app_2 + '/'
us_registration['FullUserFacingUrl'] = ''
us_registration['isWebSocket'] = True
configuration_number = 1
if 'Remove' in form[test_app_2]:
deregister = True
else:
# Not ezFrontend UI test apps
try:
if len(form['AppName']) == 0 or len(form['UserFacingUrlPrefix']) == 0:
raise ValueError
us_registration['AppName'] = str(form['AppName'])
# Remove xxx:// prefix if any
us_registration['UserFacingUrlPrefix'] = str(form['UserFacingUrlPrefix']).split('//')[-1]
us_registration['FullUserFacingUrl'] = us_registration['UserFacingUrlPrefix']
except ValueError:
value='NOK|Needs AppName and URL for registration'
raise web.seeother('{redirect}/{nok}'.format(redirect=redirect, nok=urlsafe_b64encode(value)))
us_registration['UpstreamPort'] = form['UpstreamPort']
us_registration['UpstreamHost'] = form['UpstreamHost']
us_registration['UpstreamPath'] = form['UpstreamPath']
us_registration['UserFacingUrlSuffix'] = ''
# Common for all
us_registration['timeout'] = int(form.get('timeout', 10))
us_registration['timeoutTries'] = int(form.get('timeoutTries', 1))
us_registration['uploadFileSize'] = int(form.get('uploadFileSize', 2))
us_registration['sticky'] = True if form.get('sticky') else False
us_registration['disableChunkedTransferEncoding'] = True if form.get('disableChunkedTransferEncoding') else False
efeServers = getEfeServers()
host, port = choice(efeServers)
port = int(port)
# If registered/deregistered Test App start/stop it
if configuration_number >= 0:
gState.testServerConfigurations[configuration_number] = us_registration
status='NOK|Unable to register'
if not deregister:
try:
registerServer(us_registration, host, port)
if configuration_number >=0 and not isServerListening(host, us_registration['UpstreamPort']):
gevent.spawn(startServer, configuration_number)
status = 'OK'
except Exception as e:
status = 'NOK|{0}'.format(str(e))
else:
try:
deregisterServerFromConfig(us_registration, host, port)
if configuration_number >=0:
stopServer(configuration_number)
status = 'OK'
except Exception as e:
status = 'NOK|{0}'.format(str(e))
if 'NOK' in status:
value = '{0}'.format(status)
else:
value = 'OK|{0}'.format(','.join(['{k}:{v}'.format(k=str(key), v=str(value)) for(key, value) in us_registration.items()]))
# Ignore Test App deregistration
if deregister:
value = 'IGNORE|Ignore'
raise web.seeother('{redirect}/{ok}'.format(redirect=redirect, ok=urlsafe_b64encode(value)))
class ViewRegistrations(ActionPage):
def __init__(self):
super(ViewRegistrations, self).__init__()
def GET(self, name=None):
super(ViewRegistrations, self).GET(name)
logger.info('%s accessed by {%s}', self.__class__.__name__, self.userDN)
efeServers = getEfeServers()
if name:
hp_match = re.match('^(.*?):(.*?)$', name)
if not hp_match:
self.title = self.title + ' - View Registerations'
self.content.append('<div class="message error-message">Invalid EFE host: {0}</div>'.format(name))
return self.render()
self.title = self.title + ' - {0} Registrations'.format(name)
host, port = hp_match.group(1), hp_match.group(2)
if (host, port) not in efeServers:
self.content.append('<div class="message error-message">EFE %s:%s not found</div>' % (host, port))
return self.render()
else:
#choose a random EFE host
host, port = choice(efeServers)
self.title = self.title + ' - View Registrations'
registrations = []
try:
registrations = getAllUpstreamServerRegistrations(host, int(port))
except RuntimeError as ex:
logger.warn('Unable to get all upstream server registrations from %s:%d - %s', host, int(port), str(ex))
self.content.append('<div class="message error-message">Unable to get all upstream server registrations from %s:%d</div>' % (host, int(port)))
return self.render()
# Sort the registrations by AppName
reg_dict = defaultdict(list)
for reg in registrations:
reg_dict[reg.AppName].append(reg)
# For all the App registered in alphabetical order
for app in sorted(reg_dict, key=lambda app_name: str(app_name).upper()):
registration = reg_dict[app][0]
url = registration.UserFacingUrlPrefix
registration_var = ['<div class="card shadow1">',
'<h3>{appname}</h3>'.format(appname=registration.AppName),
'URL: <a href="https://{url}">https://{url}</a>'.format(url=url),
'<div class="col3-set">',
' <div class="col-1">UserFacingUrlPrefix: {url}</div>'.format(url=url),
' <div class="col-2">sticky: {sticky}</div>'.format(sticky=str(registration.sticky)),
' <div class="col-3">disableChunkedTransferEncoding: {chunked}</div>'.format(chunked=str(registration.disableChunkedTransferEncoding)),
'</div>']
# For all the registrations under this App name
upfz_list, usp_list, to_list, tot_list, ushp_list= list(), list(), list(), list(), list()
number_of_col = 4
for registration in reg_dict[app]:
# For all the registered instances, create a list of registered parameters
upfz_list.append(('uploadFileSize: {filesize}'.format(filesize=registration.uploadFileSize), registration.uploadFileSize))
usp_list.append(('UpstreamPath: {usp}'.format(usp=registration.UpstreamPath), str(registration.UpstreamPath)))
ushp_list.append(('UpstreamHostAndPort: {ushp}'.format(ushp=registration.UpstreamHostAndPort), str(registration.UpstreamHostAndPort)))
to_list.append(('timeout: {timeout}'.format(timeout=registration.timeout), registration.timeout))
tot_list.append(('timeoutTries: {tries}'.format(tries=registration.timeoutTries), registration.timeoutTries))
# Arrange the registration in column
remove_instance_button = ['<form action="/ezfrontend/efestate/" method="post">',
' <a href="javascript:<a href="javascript:;" onclick="parentNode.submit();">',
' <div class="btn red-btn">Deregister Instance</div> </a>',
' <input type="hidden" name="deregister"']
remove_app_button = [ '<form action="/ezfrontend/efestate/" method="post">',
' <a href="javascript:<a href="javascript:;" onclick="parentNode.submit();">',
' <div class="btn red-btn">Remove App</div> </a>',
' <input type="hidden" name="removeReverseProxiedPath"',
' value="{host}|{port}|{url}"/></form>'.format(host=host, port=port, url=url)]
# Build the html page
border_style =''
self.content.extend(registration_var)
# populate the columns
for i in range(0, len(ushp_list), number_of_col):
self.content.append('<div class="divider"></div>')
self.content.append('<div class="col4-set">')
for count, _ in enumerate(ushp_list[i:i + number_of_col]):
self.content.append('<div class="col-{count}"{border_style}>{value}</div>'.format(count=(count % number_of_col) + 1, value=ushp_list[i + count][0], border_style=border_style))
self.content.append('</div>')
self.content.append('<div class="col4-set">')
for count, _ in enumerate(usp_list[i:i + number_of_col]):
self.content.append('<div class="col-{count}"{border_style}>{value}</div>'.format(count=(count % number_of_col) + 1, value=usp_list[i + count][0], border_style=border_style))
self.content.append('</div>')
self.content.append('<div class="col4-set">')
for count, _ in enumerate(upfz_list[i:i + number_of_col]):
self.content.append('<div class="col-{count}"{border_style}>{value}</div>'.format(count=(count % number_of_col) + 1, value=upfz_list[i + count][0], border_style=border_style))
self.content.append('</div>')
self.content.append('<div class="col4-set">')
for count, _ in enumerate(to_list[i:i + number_of_col]):
self.content.append('<div class="col-{count}"{border_style}>{value}</div>'.format(count=(count % number_of_col) + 1, value=to_list[i + count][0], border_style=border_style))
self.content.append('</div>')
self.content.append('<div class="col4-set">')
for count, _ in enumerate(tot_list[i:i+ number_of_col]):
self.content.append('<div class="col-{count}"{border_style}>{value}</div>'.format(count=(count % number_of_col) + 1, value=tot_list[i + count][0], border_style=border_style))
self.content.append('</div>')
# Add Deregister instance button if there is multiple instances
if len(upfz_list) > 1:
self.content.append('<div class="col4-set">')
for count, _ in enumerate(upfz_list[i:i + number_of_col]):
self.content.append('<div class="col-{count}"{border_style}>{button}value="{host}|{port}|{url}|{appname}|{ushp}|{uspath}"/></form></div>'.format(button=''.join(remove_instance_button),
count=(count % number_of_col) + 1, host=host, port=port, url=url, appname=str(app), ushp=ushp_list[i + count][1], uspath=usp_list[i + count][1], border_style=border_style))
self.content.append('</div>')
self.content.append('<div class="divider"></div>')
self.content.extend(remove_app_button)
self.content.append('</div>') # for card shadow1
return self.render()
class hello(BasePage):
def __init__(self):
super(hello, self).__init__()
self.title = self.title + ' - TestApp'
def GET(self, _=None):
self.validateUser(authorize=False)
logger.info('%s accessed by {%s}', self.__class__.__name__, self.userDN)
headers = []
for key in web.ctx.env:
headers.append('<li>'+str(key)+': '+str(web.ctx.env[key])+'</li>')
qp = web.input()
queryParameters = []
for key in qp:
queryParameters.append(key + ":\t" + qp[key])
rtn = []
rtn.append('<html>')
rtn.append('<!doctype html>')
rtn.append('<html lang="en">')
rtn.append('<head>')
rtn.append('<script src="/ezbstatic/components/platform/platform.js" type="text/javascript"></script>')
rtn.append('<link rel="import" href="/ezbstatic/components/classification-banner/classification-banner.html">')
rtn.append('</head>')
rtn.append('<body>')
rtn.append('<classification-banner class="banner-component"></classification-banner>')
rtn.append('<br/><h3>port</h3>'+''+'<h3>headers</h3><ul>'+"\n".join(headers)+'</ul><h3>query parameters</h3><ul>'+"\n".join(queryParameters)+'</ul>')
path = web.ctx.env['PATH_INFO'].strip('/')
# Upload form
rtn.append('<h3>Upload</h3>')
rtn.append('<form action="/' + path + '/upload" method="post" enctype="multipart/form-data">')
rtn.append('<input type="file" name="upload_file">')
rtn.append('<input type="submit" value="Upload">')
rtn.append('</form>')
if path in gState.socket_resource_path:
rtn.append('<h3>WebSocket</h3>')
web_socket_url = 'https://'+ str(web.ctx.env.get('HTTP_X_ORIGINAL_HOST'))+'/'+ path + '/wstest'
rtn.append('<a href = "' + web_socket_url + '">WebSocket Test </a>')
rtn.append('</body></html>')
return ''.join(rtn)
class loadWSClientPage:
'''
Load WebSocket Test page
'''
def GET(self):
path = web.ctx.env['PATH_INFO'].strip('/').split('/')[0]
if path in gState.socket_resource_path:
return web.template.render(TEMPLATES_DIR).websocket_test_page(web.ctx.env.get('HTTP_X_ORIGINAL_HOST', 'localhost'), path)
# This should not happen
logger.error('Web Socket Test Page called without enabling SocketIOServer')
raise web.internalerror('Web Socket Test Page called without enabling SocketIOServer')
class sendjs:
'''
Send node.io.js file for the client
'''
def GET(self):
try:
js = open('{0}/socket.io.js'.format(TEMPLATES_DIR))
return js.read()
except:
logger.info('socket.io.js Not Found')
return "<h1>Not Found</h1>"
class webSocket:
'''
Service Websocket request
'''
def GET(self, name):
# Extract the resource path
path = web.ctx.env['PATH_INFO'].strip('/').split('/')[0]
if path in gState.socket_resource_path:
# It must be socket.io request, strip the resource path
path = web.ctx.env['PATH_INFO'].strip('/'+path+'/')
if path.startswith("socket.io"):
socketio_manage(web.ctx.env , {
'/test': dateNamespace,
}, request = name)
else:
logger.info('socket.io not in PATH_INFO')
return "<h1>Not Found</h1>"
class dateNamespace(BaseNamespace, BroadcastMixin):
'''
Updates epoch time, ip address and
DN constantly. Echos back received message.
'''
def recv_connect(self):
dn = str(self.environ['HTTP_X_CLIENT_CERT_S_DN'])
ip = str(self.environ.get('REMOTE_ADDR', 'UNKNOWN'))
def sendTime():
while True:
dtime = time.time()
self.emit('time_data', {
'time':int(dtime),
'ipaddr':ip,
'DN':dn
})
gevent.sleep(0.1)
self.spawn(sendTime)
def on_msg(self, message):
'''
Echo back the received message
'''
self.emit('msg',{'message':message})
class upload:
'''
Upload file into /tmp dir
'''
def POST(self):
length = web.ctx.env['CONTENT_LENGTH']
# Note: upload_file is the 'name' in form
form = web.input(upload_file={})
upload_dir = '/tmp'
if 'upload_file' in form:
filepath = form.upload_file.filename.replace('\\', '/')
filename = filepath.split('/')[-1]
fout = open(upload_dir + '/' + filename, 'w')
fout.write(form.upload_file.file.read())
fout.close()
logger.info('uploaded ' + length + ' bytes into file:' + upload_dir + '/' + filename)
htmlText = '<!doctype html><html lang="en"><head>'
htmlText += '<script src="/ezbstatic/components/platform/platform.js" type="text/javascript"></script>'
htmlText += '<link rel="import" href="/ezbstatic/components/classification-banner/classification-banner.html">'
htmlText += '</head><body><classification-banner class="banner-component"></classification-banner>'
htmlText += "<h1>Upload</h1>Uploaded file: \"" + filename + "\" of length: " + length + " bytes into " + upload_dir
return htmlText
return ("<h1> Could not read file </h1>")
def verify_sign(public_key_loc, signature, data):
'''
Verifies with a public key from whom the data came that it was indeed
signed by their private key
param: public_key_loc Path to public key
param: signature String signature to be verified
return: Boolean. True if the signature is valid; False otherwise.
'''
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA256
from base64 import b64decode
from base64 import b64encode
pub_key = open(public_key_loc, "r").read()
rsakey = RSA.importKey(pub_key)
signer = PKCS1_v1_5.new(rsakey)
digest = SHA256.new()
# Assumes the data is base64 encoded to begin with
#digest.update(b64decode(data))
#digest.update(b64encode(data))
digest.update(data)
if signer.verify(digest, b64decode(signature)):
logger.info("SIGNATURE VERIFIED FOR: %s" % str(digest))
return True
logger.error("Signature not verified for:%s " % data)
return False
def getCn(subject):
csvs = subject.split(',')
for csv in csvs:
kv = csv.strip().rstrip()
key, value = kv.split('=', 1)
if key == 'CN':
return value
logger.error("no CN in subject: %s" % subject)
raise web.unauthorized
def validateCn(cn):
if cn.startswith('_Ez_'):
logger.info("access granted to special cert with prefix _Ez_: %s", cn)
return True
try:
with (open('authorized_users.yaml')) as userfile:
authorized_users = yaml.safe_load(userfile)
if authorized_users is not None:
for authorized_user in authorized_users:
if cn == authorized_user:
logger.info("validated access for user (%s) using authorization file", cn)
return True
except IOError as ex:
logger.info("Unable to validate CN against authorized_users file: %s", str(ex))
return False
def isUserAuthroized(cn, userJsonInfo):
ezAdminProject = '_Ez_internal_project_'
ezAdminGroup = '_Ez_administrator'
userCreds = json.loads(userJsonInfo)
for groups in [p.get('groups') for p in userCreds.get('projects') if ('projectName' in p and p.get('projectName') == ezAdminProject)]:
if ezAdminGroup in groups:
logger.info("validated access for user (%s) using json header", cn)
return True
return False
def getEzProperties():
#load default configurations
config = EzConfiguration()
logger.info("loaded default ezbake configuration properties")
#load configuration overrides
overrideLoader = DirectoryConfigurationLoader(gState.configOverrideDir)
config = EzConfiguration(PropertiesConfigurationLoader(config.getProperties()), overrideLoader)
logger.info("loaded property overrides")
#load cryptoImpl
cryptoImpl = SystemConfiguration(config.getProperties()).getTextCryptoImplementer()
if not isinstance(cryptoImpl, SharedSecretTextCryptoImplementation):
logger.warn("Couldn't get a SharedSecretTextCryptoImplementation. Is the EZB shared secret set properly?")
return config.getProperties(cryptoImpl)
def handler(signalnum,frame):
gState.mainserver.stop()
def main():
import logging.handlers
wfh = logging.handlers.WatchedFileHandler('/opt/ezfrontend-ui/ezfrontend-ui.log')
wfh.setLevel(logging.INFO)
wfh.setFormatter(formatter)
logger.addHandler(wfh)
# comment the next line to also send the log to the terminal
logger.removeHandler(sh)
logger.info('. starting.')
global gState
gState = GlobalState()
signal.signal(signal.SIGTERM, handler)
ezProps = getEzProperties()
global zkConfig
zkConfig = ZookeeperConfiguration(ezProps)
gState.port = int(ezProps.get('efe.tester.port', -1))
gState.url = ezProps.get('web.application.external.domain')
gState.internalhostname = ezProps.get('internal_hostname')
urls = ('/ezfrontend/control/', 'control',
'/ezfrontend/efestate/(.*)', 'efestate',
'/ezfrontend/manage/(.*)', 'manage',
'/ezfrontend/uploadcert', 'uploadCerts',
'/ezfrontend/deletecert', 'deleteCerts',
'/ezfrontend/register/(.*)', 'RegisterWebPage',
'/ezfrontend/reginstance', 'RegisterInstance',
'/ezfrontend/upstreams/(.*)', 'ViewRegistrations',
)
app = web.application(urls, globals())
app.notfound = WANotFound
wsgifunc = app.wsgifunc()
try:
registerSelf()
except Exception:
logger.exception('Error in registering with Efe')
return
try:
gState.mainserver = WSGIServer((gState.internalhostname, gState.port), wsgifunc,
keyfile=gState.keyfile, certfile=gState.certfile, ca_certs=gState.ca_certs, cert_reqs=gevent.ssl.CERT_OPTIONAL,
log=WSGILog())
gState.mainserver.serve_forever()
except Exception:
logger.exception('Exception raised while running server')
try:
deregisterSelf()
except Exception:
logger.exception('Error in deregistering with Efe')
logger.info('done. exiting.')
if __name__ == '__main__':
main()
|
import pytest
import os
import numpy as np
from .. import utils
from .. import templates
from .. import filters
from .. import photoz
from .. import filters
from . import test_filters
from . import test_templates
ez = None
# Single redshift for testing
z_spec = 1.0
# Additional catalog objects with random noise
NRND = 16
def make_fake_catalog(SN=20):
"""
Make a fake photometric catalog
"""
data_path = test_filters.test_data_path()
os.chdir(data_path)
#### Generate data
res = filters.FilterFile('filters/FILTER.RES.latest')
templ = test_templates.test_read_template_ascii()
### WFC3 SED + K + IRAC
f_numbers = [209, 211, 214, 217, 202, 203, 205, 269, 18, 19]
f_list = [res[f] for f in f_numbers]
### Photometry from a single template
fnu = templ.integrate_filter(f_list, z=z_spec)
### Norm to F160W
i_f160 = -4
flux_f160 = 1. # microJy
fnu *= flux_f160/fnu[i_f160]
### Add noise
#SN = 20
efnu = fnu/SN
### Make table
tab = photoz.Table()
tab['id'] = np.arange(NRND+1, dtype=int)+1
tab['z_spec'] = z_spec
tab['ra'] = 150.1
tab['dec'] = 2.5
### Simpler filter names for catalog
f_names = []
for f in f_list:
f_name = f.name.split(' ')[0].split('/')[-1].split('.dat')[0]
f_name = f_name.replace('irac_tr','ch')
f_name = f_name.replace('hawki_k','k').split('_')[0]
f_names.append(f_name)
### Translate file
translate_file = 'zphot.translate.test'
np.random.seed(0)
with open(translate_file,'w') as fp:
for i, f in enumerate(f_names):
tab[f'f_{f}'] = fnu[i] + np.append(0, np.random.normal(size=NRND)*efnu[i])
tab[f'e_{f}'] = efnu[i]
fp.write(f'f_{f} F{f_numbers[i]}\n')
fp.write(f'e_{f} E{f_numbers[i]}\n')
### ASCII catalog
cat_file = 'eazy_test.cat'
tab.write(cat_file, overwrite=True, format='ascii.commented_header')
tab.write(cat_file+'.fits', overwrite=True, format='fits')
return tab, cat_file, translate_file
def test_full_photoz():
"""
End-to-end test
"""
global ez
tab, cat_file, translate_file = make_fake_catalog(SN=20)
data_path = test_filters.test_data_path()
os.chdir(data_path)
### Parameters
params = {}
params['CATALOG_FILE'] = cat_file
params['MAIN_OUTPUT_FILE'] = 'eazy_test'
# Galactic extinction
params['MW_EBV'] = 0.0
params['Z_STEP'] = 0.01
params['Z_MIN'] = 0.5
params['Z_MAX'] = 2.1
params['SYS_ERR'] = 0.02
params['PRIOR_ABZP'] = 23.9 # uJy
params['PRIOR_FILTER'] = 205 # f160W
params['PRIOR_FILE'] = 'templates/prior_F160W_TAO.dat'
params['FILTERS_RES'] = 'filters/FILTER.RES.latest'
params['TEMPLATES_FILE'] = 'templates/fsps_full/fsps_QSF_12_v3.param'
params['VERBOSITY'] = 1
params['FIX_ZSPEC'] = False
### Initialize object
self = photoz.PhotoZ(param_file=None, translate_file=translate_file,
zeropoint_file=None, params=params, load_prior=True,
load_products=False)
# FITS catalog
params['CATALOG_FILE'] = cat_file+'.fits'
ez = photoz.PhotoZ(param_file=None, translate_file=translate_file,
zeropoint_file=None, params=params, load_prior=True,
load_products=False)
def test_photoz_methods():
global ez
### Catalog subset
ez.fit_parallel(idx=np.where(ez.cat['id'] < 2)[0], fitter='nnls')
### Full catalog, fitting methods
for fitter in ['lstsq','bounded','nnls']:
ez.fit_parallel(fitter=fitter)
###### Methods
# Specified zbest
ez.fit_at_zbest(zbest=np.full(NRND+1, z_spec),
prior=False, beta_prior=False,
get_err=False, clip_wavelength=1100, fitter='nnls',
selection=None, n_proc=0, par_skip=10000)
# default zbest
ez.fit_at_zbest(zbest=None, prior=False, beta_prior=False,
get_err=False, clip_wavelength=1100, fitter='nnls',
selection=None, n_proc=0, par_skip=10000)
# priors
for prior in [True, False]:
for beta_prior in [True, False]:
ez.fit_at_zbest(zbest=None, prior=prior, beta_prior=beta_prior,
get_err=False, clip_wavelength=1100, fitter='nnls',
selection=None, n_proc=0, par_skip=10000)
return ez
def test_sps_parameters():
"""
Derived parameters
"""
global ez
### Run all photo-zs
ez.fit_parallel(fitter='nnls')
### SPS parameters
zout, hdu = ez.standard_output(zbest=None, rf_pad_width=0.5, rf_max_err=2,
prior=True, beta_prior=True, simple=True)
assert(np.allclose(zout['z_phot'][0], z_spec, atol=0.1*(1+z_spec)))
coeffs_norm = ez.coeffs_best[0,:]/ez.coeffs_best[0,:].max()
assert(np.argmax(coeffs_norm) == 0)
assert(np.sum(coeffs_norm) < 1.1)
### All zout data
# zdict = {}
# for k in zout.colnames:
# zdict[k] = zout[k][0]
zdict = {
'nusefilt': 10,
'lc_min': 3353.6304006459895,
'lc_max': 45020.33785230743,
'z_phot': 0.99673086,
'z_phot_chi2': 0.0034560256,
'z_phot_risk': 0.035717614,
'z_min_risk': 0.9649467,
'min_risk': 0.030646123,
'z_raw_chi2': 1.0046412,
'raw_chi2': 0.026701305,
'z025': 0.8389708,
'z160': 0.92368615,
'z500': 0.9808423,
'z840': 1.0210177,
'z975': 1.0537113,
'restU': 0.41497922,
'restU_err': 0.017939776,
'restB': 0.8218043,
'restB_err': 0.03606099,
'restV': 0.9209713,
'restV_err': 0.035820752,
'restJ': 1.0253503,
'restJ_err': 0.023321927,
'dL': 6580.476033271505,
'mass': 1338116524.1430814,
'sfr': 0.016536130604908845,
'Lv': 3424873079.787285,
'LIR': 390855097.57019836,
'MLv': 0.39070543432406274,
'Av': 0.06089298262931584,
'rest270': 0.11237647,
'rest270_err': 0.011739388,
'rest274': 0.23368931,
'rest274_err': 0.012826629,
'rest120': 0.12594292,
'rest120_err': 0.008697912,
'rest121': 0.18348014,
'rest121_err': 0.005728759,
'rest156': 0.37412244,
'rest156_err': 0.0184125,
'rest157': 0.8652829,
'rest157_err': 0.031125754,
'rest158': 0.9475169,
'rest158_err': 0.018318474,
'rest159': 0.9954984,
'rest159_err': 0.030312747,
'rest160': 1.0260057,
'rest160_err': 0.021030843,
'rest161': 1.0253503,
'rest161_err': 0.023321927,
'rest162': 1.012053,
'rest162_err': 0.025552988,
'rest163': 0.7521126,
'rest163_err': 0.02862215,
'DISTMOD': 43.340373559176065}
for k in zdict:
assert(np.allclose(zout[k][0], zdict[k], rtol=0.1))
### user-specified zbest
z2, _ = ez.standard_output(zbest=np.full(NRND+1, z_spec),
rf_pad_width=0.5, rf_max_err=2,
prior=True, beta_prior=True, simple=True)
assert(np.allclose(z2['z_phot'], z_spec, rtol=1.e-2))
# confirm that z2 has both 'z_pdf' and 'z_phot' columns and that they're different
assert( np.any(z2['z_pdf'] != z2['z_phot']) )
# confirm that z2['z_pdf'] == zout['z_phot']
assert( np.all(z2['z_pdf'] == zout['z_phot']) )
# confirm that zout['z_phot'] == zout['z_pdf']
assert( np.all(zout['z_pdf'] == zout['z_phot']) )
### Check that sps parameters are different...
def test_fit_stars():
"""
Fit phoenix star library for Star/Galaxy separation
"""
global ez
ez.fit_phoenix_stars()
assert(np.allclose(ez.star_chi2[0,0], 1930.887))
def test_photoz_figures():
"""
Figures generated with PhotoZ object
"""
import matplotlib.pyplot as plt
global ez
### SED figure
fig, data = ez.show_fit(id=0, id_is_idx=True, show_fnu=False)
fig.savefig('eazy_test.sed.png', dpi=72)
assert(isinstance(fig, plt.Figure))
assert(isinstance(data, dict))
fig = ez.show_fit(id=1, show_fnu=False)
fig = ez.show_fit(id=1, show_fnu=True)
fig = ez.show_fit(id=1, show_fnu=2)
fig = ez.show_fit(id=1, show_components=True)
fig = ez.zphot_zspec()
fig = ez.zphot_zspec(zmin=0, zmax=2)
fig.savefig('eazy_test.zphot_zspec.png', dpi=72)
plt.close('all')
|
'''
This module defines :class:`ChannelIndex`, a container for multiple
data channels.
:class:`ChannelIndex` derives from :class:`Container`,
from :module:`neo.core.container`.
'''
import numpy as np
import quantities as pq
from neo.core.container import Container
class ChannelIndex(Container):
'''
A container for indexing/grouping data channels.
This container has several purposes:
* Grouping all :class:`AnalogSignal`\\s and
:class:`IrregularlySampledSignal`\\s inside a :class:`Block` across
:class:`Segment`\\s;
* Indexing a subset of the channels within an :class:`AnalogSignal` and
:class:`IrregularlySampledSignal`\\s;
* Container of :class:`Unit`\\s. Discharges of multiple neurons
(:class:`Unit`\'s) can be seen on the same channel.
*Usage 1* providing channel IDs across multiple :class:`Segment`::
* Recording with 2 electrode arrays across 3 segments
* Each array has 64 channels and is data is represented in a single
:class:`AnalogSignal` object per electrode array
* channel ids range from 0 to 127 with the first half covering
electrode 0 and second half covering electrode 1
>>> from neo.core import (Block, Segment, ChannelIndex,
... AnalogSignal)
>>> from quantities import nA, kHz
>>> import numpy as np
...
>>> # create a Block with 3 Segment and 2 ChannelIndex objects
>>> blk = Block()
>>> for ind in range(3):
... seg = Segment(name='segment %d' % ind, index=ind)
... blk.segments.append(seg)
...
>>> for ind in range(2):
... channel_ids=np.arange(64)+ind
... chx = ChannelIndex(name='Array probe %d' % ind,
... index=np.arange(64),
... channel_ids=channel_ids,
... channel_names=['Channel %i' % chid
... for chid in channel_ids])
... blk.channel_indexes.append(chx)
...
>>> # Populate the Block with AnalogSignal objects
>>> for seg in blk.segments:
... for chx in blk.channel_indexes:
... a = AnalogSignal(np.random.randn(10000, 64)*nA,
... sampling_rate=10*kHz)
... # link AnalogSignal and ID providing channel_index
... a.channel_index = chx
... chx.analogsignals.append(a)
... seg.analogsignals.append(a)
*Usage 2* grouping channels::
* Recording with a single probe with 8 channels, 4 of which belong to a
Tetrode
* Global channel IDs range from 0 to 8
* An additional ChannelIndex is used to group subset of Tetrode channels
>>> from neo.core import Block, ChannelIndex
>>> import numpy as np
>>> from quantities import mV, kHz
...
>>> # Create a Block
>>> blk = Block()
>>> blk.segments.append(Segment())
...
>>> # Create a signal with 8 channels and a ChannelIndex handling the
>>> # channel IDs (see usage case 1)
>>> sig = AnalogSignal(np.random.randn(1000, 8)*mV, sampling_rate=10*kHz)
>>> chx = ChannelIndex(name='Probe 0', index=range(8),
... channel_ids=range(8),
... channel_names=['Channel %i' % chid
... for chid in range(8)])
>>> chx.analogsignals.append(sig)
>>> sig.channel_index=chx
>>> blk.segments[0].analogsignals.append(sig)
...
>>> # Create a new ChannelIndex which groups four channels from the
>>> # analogsignal and provides a second ID scheme
>>> chx = ChannelIndex(name='Tetrode 0',
... channel_names=np.array(['Tetrode ch1',
... 'Tetrode ch4',
... 'Tetrode ch6',
... 'Tetrode ch7']),
... index=np.array([0, 3, 5, 6]))
>>> # Attach the ChannelIndex to the the Block,
>>> # but not the to the AnalogSignal, since sig.channel_index is
>>> # already linked to the global ChannelIndex of Probe 0 created above
>>> chx.analogsignals.append(sig)
>>> blk.channel_indexes.append(chx)
*Usage 3* dealing with :class:`Unit` objects::
* Group 5 unit objects in a single :class:`ChannelIndex` object
>>> from neo.core import Block, ChannelIndex, Unit
...
>>> # Create a Block
>>> blk = Block()
...
>>> # Create a new ChannelIndex and add it to the Block
>>> chx = ChannelIndex(index=None, name='octotrode A')
>>> blk.channel_indexes.append(chx)
...
>>> # create several Unit objects and add them to the
>>> # ChannelIndex
>>> for ind in range(5):
... unit = Unit(name = 'unit %d' % ind,
... description='after a long and hard spike sorting')
... chx.units.append(unit)
*Required attributes/properties*:
:index: (numpy.array 1D dtype='i')
Index of each channel in the attached signals (AnalogSignals and
IrregularlySampledSignals). The order of the channel IDs needs to
be consistent across attached signals.
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
:channel_names: (numpy.array 1D dtype='S')
Names for each recording channel.
:channel_ids: (numpy.array 1D dtype='int')
IDs of the corresponding channels referenced by 'index'.
:coordinates: (quantity array 2D (x, y, z))
Physical or logical coordinates of all channels.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Container of*:
:class:`AnalogSignal`
:class:`IrregularlySampledSignal`
:class:`Unit`
'''
_container_child_objects = ('Unit',)
_data_child_objects = ('AnalogSignal', 'IrregularlySampledSignal')
_single_parent_objects = ('Block',)
_necessary_attrs = (('index', np.ndarray, 1, np.dtype('i')),)
_recommended_attrs = ((('channel_names', np.ndarray, 1, np.dtype('S')),
('channel_ids', np.ndarray, 1, np.dtype('i')),
('coordinates', pq.Quantity, 2)) +
Container._recommended_attrs)
def __init__(self, index, channel_names=None, channel_ids=None,
name=None, description=None, file_origin=None,
coordinates=None, **annotations):
'''
Initialize a new :class:`ChannelIndex` instance.
'''
# Inherited initialization
# Sets universally recommended attributes, and places all others
# in annotations
super().__init__(name=name,
description=description,
file_origin=file_origin,
**annotations)
# Defaults
if channel_names is None:
channel_names = np.array([], dtype='S')
if channel_ids is None:
channel_ids = np.array([], dtype='i')
# Store recommended attributes
self.channel_names = np.array(channel_names)
self.channel_ids = np.array(channel_ids)
self.index = np.array(index)
self.coordinates = coordinates
def __getitem__(self, i):
'''
Get the item or slice :attr:`i`.
'''
index = self.index.__getitem__(i)
if self.channel_names.size > 0:
channel_names = self.channel_names[index]
if not channel_names.shape:
channel_names = [channel_names]
else:
channel_names = None
if self.channel_ids.size > 0:
channel_ids = self.channel_ids[index]
if not channel_ids.shape:
channel_ids = [channel_ids]
else:
channel_ids = None
obj = ChannelIndex(index=np.arange(index.size),
channel_names=channel_names,
channel_ids=channel_ids)
obj.block = self.block
obj.analogsignals = self.analogsignals
obj.irregularlysampledsignals = self.irregularlysampledsignals
# we do not copy the list of units, since these are related to
# the entire set of channels in the parent ChannelIndex
return obj
|
from django.shortcuts import render,redirect
from .models import NewUser
from django.contrib import messages
from django.views.generic.base import View
from django.views.generic.base import TemplateView
#renders index page
class indexPage(TemplateView):
template_name = 'index.html'
#view for user registration
class userReg(View):
def post(self,request):
firstname = request.POST['firstname']
lastname = request.POST['lastname']
username = request.POST['username']
password = request.POST['password']
NewUser(firstname=firstname,lastname=lastname,username=username,password=password).save()
messages.success(request,"user : " + firstname + " added successfully... ")
return render(request,'registration.html')
def get(self,request):
return render(request,'registration.html')
#view for login
class loginPage(View):
def post(self,request):
try:
UserDetails = NewUser.objects.get(username = request.POST['username'], password = request.POST['password'])
UserNameDetails = NewUser.objects.get(username = request.POST['username'])
request.session['username'] = UserDetails.username
request.session['firstname'] = UserNameDetails.firstname
request.session['lastname'] = UserNameDetails.lastname
return render(request,'quiz.html')
except NewUser.DoesNotExist as e:
messages.success(request,"username / password is invalid...! ")
return render(request,'login.html')
def get(self,request):
return render(request,'login.html')
#logout view
class logout(View):
def get(self, request, *args, **kwargs):
#return render(request, "index.html")
try:
del request.sesssion['username']
except:
return render(request,'index.html')
#rendering back to quiz page
class back(TemplateView):
template_name = 'quiz.html' |
from helper.my_type import MyType
class Manufacture:
def __init__(self):
self._name = None
def set_name(self, name: str) -> None:
MyType.check("manufacturer name", name, str)
self._name = name
def get_name(self) -> str:
return self._name
|
#!/usr/bin/python
import time
input()
print('started')
start=time.time()
end=start
lap=1
try:
while True:
input()
laptime=round(time.time()-end,2)
tottime=round(time.time()-start,2)
print('Lap %s: %s %s'%(lap,tottime,laptime))
lap+=1
last=time.time()
except KeyboardInterrupt:
print('\nDone')
|
from entity import Entity
# effects act like normal entities, but do not experience collision detection
class Effect(Entity):
def __init__(self,x,y,w,h,dx,dy):
Entity.__init__(self,x,y,w,h,0,0)
self.dx = dx
self.dy = dy
self.debug_color = (0,100,0)
self.feels_gravity = False
def _step(self,room):
self._feel_gravity()
self.x += self.dx
self.y += self.dy
return (True,[])
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Aiming for this to be generated, so keep it simple
"""
import numpy as np
from opticks.ana.shape import X, SEllipsoid, STubs, STorus, SCons, SSubtractionSolid, SUnionSolid, SIntersectionSolid
class x019(X):
"""
G4VSolid* make_solid()
{
G4VSolid* d = new G4Ellipsoid("PMT_20inch_inner_solid_1_Ellipsoid0x4c91130", 249.000000, 249.000000, 179.000000, -179.000000, 179.000000) ; // 3
G4VSolid* g = new G4Tubs("PMT_20inch_inner_solid_2_Tube0x4c91210", 0.000000, 75.951247, 23.782510, 0.000000, CLHEP::twopi) ; // 4
G4VSolid* i = new G4Torus("PMT_20inch_inner_solid_2_Torus0x4c91340", 0.000000, 52.010000, 97.000000, -0.000175, CLHEP::twopi) ; // 4
G4ThreeVector A(0.000000,0.000000,-23.772510);
G4VSolid* f = new G4SubtractionSolid("PMT_20inch_inner_solid_part20x4cb2d80", g, i, NULL, A) ; // 3
G4ThreeVector B(0.000000,0.000000,-195.227490);
G4VSolid* c = new G4UnionSolid("PMT_20inch_inner_solid_1_20x4cb30f0", d, f, NULL, B) ; // 2
G4VSolid* k = new G4Tubs("PMT_20inch_inner_solid_3_EndTube0x4cb2fc0", 0.000000, 45.010000, 57.510000, 0.000000, CLHEP::twopi) ; // 2
G4ThreeVector C(0.000000,0.000000,-276.500000);
G4VSolid* b = new G4UnionSolid("PMT_20inch_inner_solid0x4cb32e0", c, k, NULL, C) ; // 1
G4VSolid* m = new G4Tubs("Inner_Separator0x4cb3530", 0.000000, 254.000000, 92.000000, 0.000000, CLHEP::twopi) ; // 1
G4ThreeVector D(0.000000,0.000000,92.000000);
G4VSolid* a = new G4SubtractionSolid("PMT_20inch_inner2_solid0x4cb3870", b, m, NULL, D) ; // 0
return a ;
}
a
b m(D)
c k(C)
bulb+neck endtube
d f(B)
bulb neck
g(B) i(B+A)
tubs torus
"""
def __init__(self):
d = SEllipsoid( "d", [249.000, 179.000 ] )
g = STubs( "g", [75.951247,23.782510] )
i = STorus( "i", [ 52.010000, 97.000000] )
A = np.array( [0, -23.772510] )
f = SSubtractionSolid( "f", [g,i, A ] )
B = np.array( [0, -195.227490] )
c = SUnionSolid( "c", [d, f, B] )
k = STubs( "k", [45.010000, 57.510000] )
C = np.array( [0, -276.500000] )
b = SUnionSolid( "b", [c, k, C] )
m = STubs( "m", [254.000000, 92.000000] )
D = np.array( [0, 92.000000] )
a = SSubtractionSolid( "a", [b, m, D ] )
X.__init__(self, a ) # sets root
if __name__ == '__main__':
x = x019()
print(repr(x))
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure environment files can be suppressed.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
test.run_gyp('use-custom-environment-files.gyp',
'-G', 'ninja_use_custom_environment_files')
# Make sure environment files do not exist.
if os.path.exists(test.built_file_path('environment.x86')):
test.fail_test()
if os.path.exists(test.built_file_path('environment.x64')):
test.fail_test()
test.pass_test()
|
import csv
def readCsv():
with open('test.csv', 'r') as f: # 使用上下文打开csv文件
reader = csv.reader(f) # 创建csv阅读器对象
# next(reader)
# 方式一:通过将读取的数据用列表生成式返回
# db = [item for item in reader] # 列表生成式,将读取到的数据放在列表中
# print(type(db))
# 方式二:循环读取文件数据
for item in reader:
print(item)
def readCsvDict(): # 操作字典那样读取数据,把表头的第一行作为key
with open('test.csv', 'r') as f:
reader = csv.DictReader(f)
# next(reader)
for item in reader:
print(item)
def writeCsv():
with open('test1.csv', 'a', newline='') as f: # newline=''指定该参数,表示不写入空行
row = ['3', 'www.sofu.com']
# write = csv.writer(f) # 创建csv写入器对象
# 方式一:一次写入一行
# write.writerow(row)
# 方式二:一次写入多行
# data = [['4', 'www.12306.com'],
# ['5', 'www.jd.com']]
# for row in data:
# write.writerow(row)
# 方式三:使用DictWriter类,将内容以字典形式写入
headers = ['caseID', 'URL'] # 数据表头
datas = [{'caseID': '6', 'URL': 'www.albb.com'},
{'caseID': '7', 'URL': 'www.vip.com'}]
writer = csv.DictWriter(f, headers) # 这里传入表头,作为第一行的数据
writer.writeheader()
# for row in datas:
# writer.writerow(row)
writer.writerows(datas) # 一次写入多行
print("写入完成!")
# readCsvDict()
# readCsv()
writeCsv() |
import player_info
def count():
for i in range(10):
print(i)
count() |
def capitalize(s,ind):
return "".join([i.upper() if c in ind else i for c, i in enumerate(s)])
'''
Given a string and an array of integers representing indices,
capitalize all letters at the given indices.
For example:
capitalize("abcdef",[1,2,5]) = "aBCdeF"
capitalize("abcdef",[1,2,5,100]) = "aBCdeF". There is no index 100.
The input will be a lowercase string with no spaces and an array of digits.
'''
|
#!/usr/bin/env python3
# coding: utf-8
import math
#双向
class BiWardNgram():
def __init__(self,word_dic_path,trans_dic_path):
self.word_dict = {} #词语频次词典
self.trans_dict = {} #每个词后接词的出现个数
self.word_counts = 0 #语料库中词总数
self.word_types = 0 #语料库中词种数
wordict_path = word_dic_path
transdict_path = trans_dic_path
self.init(wordict_path, transdict_path)
'''初始化模型'''
def init(self, wordict_path, transdict_path):
self.word_dict = self.load_model(wordict_path)
self.trans_dict = self.load_model(transdict_path)
self.word_types = len(self.word_dict)
self.word_counts = sum(self.word_dict.values())
'''加载模型'''
def load_model(self, model_path):
f = open(model_path, 'r')
a = f.read()
word_dict = eval(a)
f.close()
return word_dict
#计算基于ngram的句子生成概率
def compute_likelihood(self, seg_list):
p = 0
# 由于概率很小,对连乘做了取对数处理转化为加法
for pos, words in enumerate(seg_list):
if pos < len(seg_list)-1:
# 乘以后面词的条件概率
word1, word2 = words, seg_list[pos+1]
if word1 not in self.trans_dict.keys():
# 加1平滑, 让该词至少出现1次
p += math.log(1.0 / self.word_counts)
else:
# 加1平滑
fenzi, fenmu = 1.0, self.word_counts
#转移概率 p(y|x) = p(yi/y) = count(w)/(count(w1)+ count(w2) + count(w3)+ ..
for key in self.trans_dict[word1]:
if key == word2:
fenzi += self.trans_dict[word1][word2]
fenmu += self.trans_dict[word1][key]
# log(p(w0)*p(w1|w0)*p(w2|w1)*p(w3|w2)) == log(w0)+ log(p(w1|w0))+ log(p(w2|w1)) + log(p(w3|w2))
p += math.log(fenzi / fenmu)
# 乘以第一个词的概率
if ( pos == 0 and words != '<BEG>' ) or ( pos == 1 and seg_list[0] == '<BEG>'):
if words in self.word_dict.keys():
p += math.log((float(self.word_dict[words]) + 1.0) / (self.word_types + self.word_counts))
else:
# 加1平滑
p += math.log(1.0/ (self.word_types + self.word_counts))
return p
#最大前向分词
def max_forward(self, sentence):
# 1.从左向右取待切分汉语句的m个字符作为匹配字段,m为大机器词典中最长词条个数。
# 2.查找大机器词典并进行匹配。若匹配成功,则将这个匹配字段作为一个词切分出来。
window_size = 5
cutlist = []
index = 0
while index < len(sentence):
matched = False
for i in range(window_size, 0, -1):
cand_word = sentence[index: index + i]
if cand_word in self.word_dict.keys():
cutlist.append(cand_word)
matched = True
break
# 如果没有匹配上,则按字符切分
if not matched:
i = 1
cutlist.append(sentence[index])
index += i
return cutlist
#最大后向分词
def max_backward(self, sentence):
# 1.从右向左取待切分汉语句的m个字符作为匹配字段,m为大机器词典中最长词条个数。
# 2.查找大机器词典并进行匹配。若匹配成功,则将这个匹配字段作为一个词切分出来。
window_size = 5
cutlist = []
index = len(sentence)
while index > 0:
matched = False
for i in range(window_size, 0, -1):
tmp = (i + 1)
cand_word = sentence[index - tmp: index]
# 如果匹配上,则将字典中的字符加入到切分字符中
if cand_word in self.word_dict.keys():
cutlist.append(cand_word)
matched = True
break
# 如果没有匹配上,则按字符切分
if not matched:
tmp = 1
cutlist.append(sentence[index - 1])
index -= tmp
return cutlist[::-1]
'''分词'''
def cut_main(self, sentence):
seg_list1 = self.max_forward(sentence)
seg_list2 = self.max_backward(sentence)
seg_list = []
# differ_list1和differ_list2分别记录两个句子词序列不同的部分,用于消除歧义
differ_list1 = []
differ_list2 = []
# pos1和pos2记录两个句子的当前字的位置,cur1和cur2记录两个句子的第几个词
pos1 = pos2 = 0
cur1 = cur2 = 0
while 1:
if cur1 == len(seg_list1) and cur2 == len(seg_list2):
break
if pos1 == pos2:
if len(seg_list1[cur1]) == len(seg_list2[cur2]):
pos1 += len(seg_list1[cur1])
pos2 += len(seg_list2[cur2])
# 说明此时得到两个不同的词序列,根据bigram选择概率大的
# 注意算不同的时候要考虑加上前面一个词和后面一个词,拼接的时候再去掉即可
if len(differ_list1) > 0:
differ_list1.insert(0, seg_list[-1])
differ_list2.insert(0, seg_list[-1])
if cur1 < len(seg_list1) - 1:
differ_list1.append(seg_list1[cur1])
differ_list2.append(seg_list2[cur2])
p1 = self.compute_likelihood(differ_list1)
p2 = self.compute_likelihood(differ_list2)
if p1 > p2:
differ_list = differ_list1
else:
differ_list = differ_list2
differ_list.remove(differ_list[0])
if cur1 < len(seg_list1) - 1:
differ_list.remove(seg_list1[cur1])
for words in differ_list:
seg_list.append(words)
differ_list1 = []
differ_list2 = []
seg_list.append(seg_list1[cur1])
cur1 += 1
cur2 += 1
# pos1相同,len(seg_list1[cur1])不同,向后滑动,不同的添加到list中
elif len(seg_list1[cur1]) > len(seg_list2[cur2]):
differ_list2.append(seg_list2[cur2])
pos2 += len(seg_list2[cur2])
cur2 += 1
else:
differ_list1.append(seg_list1[cur1])
pos1 += len(seg_list1[cur1])
cur1 += 1
else:
# pos1不同,而结束的位置相同,两个同时向后滑动
if pos1 + len(seg_list1[cur1]) == pos2 + len(seg_list2[cur2]):
differ_list1.append(seg_list1[cur1])
differ_list2.append(seg_list2[cur2])
pos1 += len(seg_list1[cur1])
pos2 += len(seg_list2[cur2])
cur1 += 1
cur2 += 1
elif pos1 + len(seg_list1[cur1]) > pos2 + len(seg_list2[cur2]):
differ_list2.append(seg_list2[cur2])
pos2 += len(seg_list2[cur2])
cur2 += 1
else:
differ_list1.append(seg_list1[cur1])
pos1 += len(seg_list1[cur1])
cur1 += 1
return seg_list
'''分词主函数'''
def cut(self, sentence):
return self.cut_main(sentence)
if __name__ =="__main__":
cuter = BiWardNgram('./word_dict.model','./trans_dict.model')
sentence = "习近平在慰问电中表示,惊悉贵国克麦罗沃市发生火灾,造成重大人员伤亡和财产损失。我谨代表中国政府和中国人民,并以我个人的名义,对所有遇难者表示沉痛的哀悼,向受伤者和遇难者家属致以深切的同情和诚挚的慰问。"
seg_sentence = cuter.cut(sentence)
print("original sentence: " , sentence)
print("segment result: ", seg_sentence)
|
listas = [1, 2, 3, 4, 5, 6, -5]
print(listas)
print(listas[1])
del listas[1]
print(listas)
#Agregar valores en nuestra lista
listas.append('string')
print(listas) |
#!/usr/bin/python3
def no_c(my_string):
new_str = ""
for i in range(0, len(my_string)):
ascii_num = ord(my_string[i])
if ascii_num != 67 and ascii_num != 99:
new_str += my_string[i]
return new_str
|
#!/usr/bin/env python
# encoding: utf-8
# @author: Zhipeng Ye
# @contact: Zhipeng.ye19@xjtlu.edu.cn
# @file: calculate_ngram3.py
# @time: 2020-01-14 01:27
# @desc:
import codecs
import math
import os
import re
import sys
import traceback
sys.stdout = codecs.getwriter('utf-8')(sys.stdout.detach())
class LanguageModelContent:
def __init__(self, possibility, words, punishment=''):
self.possibility = possibility
self.words = words
self.punishment = punishment
def __str__(self):
return str(self.possibility) + '\t' + self.words + '\t' + self.punishment
if __name__ == "__main__":
ngram2 = {}
dir = sorted(os.listdir('/Data_SSD/zhipengye/zhipengye/LM/processed_data/small_full_chinese_filtered'))
ngram2_files = [file_name for file_name in dir if file_name>= 'ngrams-00001-of-00394' and file_name <= 'ngrams-00029-of-00394']
ngram3_files = [file_name for file_name in dir if file_name>= 'ngrams-00030-of-00394' and file_name <= 'ngrams-00132-of-00394']
for file_name in ngram2_files:
with open('/Data_SSD/zhipengye/zhipengye/LM/processed_data/small_full_chinese_filtered/'+file_name,encoding='utf-8') as file:
for line in file:
segments = line.split('\t')
words = segments[0].replace(' ','')
count = float(segments[1])
ngram2[words] = count
for file_name in ngram3_files:
model_list = []
with open('/Data_SSD/zhipengye/zhipengye/LM/processed_data/small_full_chinese_filtered/'+file_name,encoding='utf-8') as file:
for line in file:
segments = line.split('\t')
count = float(segments[1])
words = segments[0].split(' ')
first_word = words[0]
second_word = words[1]
third_word = words[2]
gram2_count = ngram2.get(first_word+second_word)
if gram2_count is not None:
possibility = math.log10(count/gram2_count)
words = segments[0]
model = LanguageModelContent(possibility, segments[0])
model_list.append(model)
with open('/Data_SSD/zhipengye/zhipengye/LM/completed.data/data/gram3/'+file_name, 'a',encoding='utf-8') as file:
for model in model_list:
file.write(str(model) + '\n') |
import pandas as pd
import numpy as np
my_list = list('abcd')
my_array = np.arange(4)
my_serie = pd.Series(dict(zip(my_list, my_array)))
print(my_serie.to_frame().reset_index()) |
import configparser
import os
import tkinter
from enum import Enum
from tkinter.filedialog import askopenfilename
class CalculationType(Enum):
MEDALS = "meals"
SHARDS = "shards"
def isMedalCalc(self):
return self.value == self.MEDALS.value
def isShardCalc(self):
return self.value == self.SHARDS.value
class MainSettings:
debugFile = 'medalcalc/settings.ini'
calculationType: CalculationType
config: configparser.ConfigParser = None
@staticmethod
def init():
file = MainSettings.__readConfig(fileDialog=True)
# file = MainSettings.__readConfig(fileName=MainSettings.debugFile)
# file = MainSettings.__readConfig()
if not file:
input("A settings file could not be loaded.")
raise Exception("Settings file could not be loaded")
print(f"Using settings file: {file}")
isMedalCalc = "dungeons" in MainSettings.config.sections()
if isMedalCalc:
MainSettings.calculationType = CalculationType.MEDALS
else:
MainSettings.calculationType = CalculationType.SHARDS
@staticmethod
def __readConfig(fileName: str = "", fileDialog: bool = False) -> str:
path = fileName
# path = MainSettings.debugFile
# config = configparser.ConfigParser()
# readInput = config.read(path)
# if len(readInput) == 0:
# path = MainSettings.__readFile()
# config = configparser.ConfigParser()
# readInput = config.read(path)
if fileDialog:
path = MainSettings.__readFile()
config = configparser.ConfigParser()
readInput = config.read(path)
if len(readInput) == 0:
return ""
MainSettings.config = config
return os.path.basename(path)
@staticmethod
def __readFile():
tkinter.Tk().withdraw()
return tkinter.filedialog.askopenfilename(
filetypes=[("Settings file", "*.ini")],
title="Choose a settings file."
)
@staticmethod
def isShardCalc():
return MainSettings.calculationType.isShardCalc()
@staticmethod
def isMedalCalc():
return MainSettings.calculationType.isMedalCalc()
|
from django.shortcuts import render
import numpy as np
import pandas as pd
from django.views.generic import ListView, View
from django.views import View
m_df = pd.read_csv('ipl_analysis/matches.csv')
d_df = pd.read_csv('ipl_analysis/deliveries.csv')
class Index(View):
template_name = "index.html"
def get(self, request):
seasons_list = m_df['season'].unique()
return render(request, self.template_name, context={'seasons':seasons_list })
class IPLMatchStats(View):
template_name = "index.html"
df_by_season = m_df[m_df['season']==2017]
def loc_won_loss_percent(self):
max_played_loc = self.df_by_season['city'].value_counts().keys()[0]
win_loss_df = self.df_by_season[self.df_by_season[
'city']==max_played_loc][['team1','team2','winner']]
win_loss_df['loser'] = np.where(
win_loss_df['team1']==win_loss_df['winner'],
win_loss_df['team2'],win_loss_df['team1'])
win_loss_df = win_loss_df[['winner','loser']]
win_in_city = win_loss_df.winner.value_counts().to_dict()
loss_in_city = win_loss_df.loser.value_counts().to_dict()
win_loss_per = pd.DataFrame(columns=[
'teams','win_percent','loss_percent'])
win_loss_per['teams'] = np.unique(
win_loss_df[['winner','loser']].values)
for key, value in win_in_city.items():
for i in range(len(win_loss_per['teams'])):
if win_loss_per.loc[i]['teams']==key:
win_loss_per.loc[i]['win_percent']=value
for key, value in loss_in_city.items():
for i in range(len(win_loss_per['teams'])):
if win_loss_per.loc[i]['teams']==key:
win_loss_per.loc[i]['loss_percent']= value
win_loss_per['win_percent'].fillna(0, inplace=True)
win_loss_per['loss_percent'].fillna(0, inplace=True)
win_loss_per['win_percent'], win_loss_per['loss_percent'] =\
win_loss_per['win_percent']*100/(
win_loss_per['win_percent']+win_loss_per['loss_percent']),\
win_loss_per['loss_percent']*100/(
win_loss_per['win_percent']+win_loss_per['loss_percent'])
data = []
for i in range(len(win_loss_per['teams'])):
data.append({
'team':win_loss_per.loc[i]['teams'],
'win': win_loss_per.loc[i]['win_percent'],
'loss':win_loss_per.loc[i]['loss_percent']
})
return data
def get(self, request, season=None):
seasons_list = m_df['season'].unique()
if season:
#filter by season
self.df_by_season = m_df[m_df['season']==season]
#1. top four winner teams of season
top_four_teams = self.df_by_season['winner'].value_counts()[:4]
#2. which team won the most number of tosses in the season
top_toss_winners = self.df_by_season['toss_winner'].value_counts()
#3. which player won the maximum number of Player of the Match awards in the whole season
top_player_of_match = self.df_by_season[
'player_of_match'].value_counts()[:1]
#4. which team won max matches in the whole season
team_won_max_matches = top_four_teams[:1]
#5. which location has the most number of wins for the top team
max_played_location = self.df_by_season[self.df_by_season[
'winner']==team_won_max_matches.keys()[0]][
'city'].value_counts()
#6. which % of teams decided to bat when they won the toss
total_toss_winners = self.df_by_season['toss_winner'].count()
toss_winner_bat_first = self.df_by_season['toss_winner'][
self.df_by_season['toss_decision']=='bat'].count()
percentage_of_toss_winner_bat_first=\
(toss_winner_bat_first*100)/total_toss_winners
#7. which location hosted most number of matches and win % and loss % for the season
loc_won_loss_percent = self.loc_won_loss_percent()
#8. which team won by the highest margin of runs for the season
run_margin = self.df_by_season[self.df_by_season.win_by_runs==\
self.df_by_season.win_by_runs.max()][['winner','win_by_runs']]
#9. Which team won by the highest number of wickets for the season
team_won_high_num = self.df_by_season[['winner', 'win_by_wickets']
].groupby('winner').sum()
team_won_high_num = team_won_high_num.sort_values(
by='win_by_wickets', ascending=False)
#10. How many times has a team won the toss and the match
team_win_toss_and_match = self.df_by_season[
self.df_by_season['toss_winner']==self.df_by_season['winner']]\
['toss_winner'].value_counts().to_dict()
context = {
'seasons': seasons_list,
'is_season': season,
'top_four_teams': top_four_teams.keys(),
'top_toss_winner': top_toss_winners.keys()[0],
'top_player_of_match': top_player_of_match.keys()[0],
'team_won_max_matches': team_won_max_matches.keys()[0],
'max_played_location': max_played_location[:1].keys()[0],
'percentage_of_toss_winner_bat_first':
percentage_of_toss_winner_bat_first,
'win_loss_per':loc_won_loss_percent,
'run_margin': run_margin.values,
'team_won_high_num':team_won_high_num.iloc[0],
'team_win_toss_and_match':team_win_toss_and_match,
}
return render(request, self.template_name, context)
|
# ============LICENSE_START=======================================================
# Copyright (c) 2017-2021 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
import argparse
import array
import asyncio
import collections
import datetime
import errno
import inspect
import json
import logging
import logging.handlers
import os
import pprint
import re
import requests
import signal
import socket
import string
import sys
import time
import traceback
import trapd_settings
import trapd_settings as tds
import unicodedata
import uuid as uuid_mod
from collections import Counter
from onap_dcae_cbs_docker_client.client import get_config
from pysnmp.carrier.asyncio.dgram import udp, udp6
# from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.entity import engine, config
from pysnmp.entity.rfc3413 import ntfrcv
from pysnmp.proto.api import v2c
from trapd_exit import cleanup_and_exit
from trapd_file_utils import roll_all_logs, open_eelf_logs, roll_file, open_file, close_file
from trapd_get_cbs_config import get_cbs_config
from trapd_http_session import init_session_obj
from trapd_logging import ecomp_logger, stdout_logger
from trapd_logging import stdout_logger
from trapd_runtime_pid import save_pid, rm_pid
install_reqs = parse_requirements("requirements.txt", session=PipSession())
reqs = [str(ir.req) for ir in install_reqs]
setup(
name="dcaegen2-collectors-snmptrap",
description="snmp trap receiver for ONAP docker image",
version="1.4.0",
packages=find_packages(),
author="Dave L",
author_email="dl3158@att.com",
license="Apache 2",
keywords="",
url="",
install_requires=reqs,
)
|
class WaitlistNames:
logi = "logi"
dps = "dps"
sniper = "sniper"
xup_queue = "queue"
other = "other"
DEFAULT_PREFIX = "default"
|
import re
def find_episode_titles(series):
raise NotImplementedError()
def best_movie_from_year(year, minimum_count = 10000):
raise NotImplementedError()
def episode_count():
raise NotImplementedError()
def series_average_ratings():
raise NotImplementedError()
|
# -*- coding: utf-8 -*-
"""Application configuration."""
import os
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('FLASKAPP_SECRET')
APP_DIR = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple'
# SQLAlchemy settings
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
# SMTP Settings for sending mails
SMTP_SERVER = os.environ.get('MAILGUN_SMTP_SERVER')
SMTP_PORT = os.environ.get('MAILGUN_SMTP_PORT')
SMTP_LOGIN = os.environ.get('MAILGUN_SMTP_LOGIN')
SMTP_PASSWORD = os.environ.get('MAILGUN_SMTP_PASSWORD')
SMTP_TARGET = os.environ.get('SMTP_TARGET')
|
"""add type check utils"""
def type_check(obj: object, type_name: str) -> bool:
"""
circulation dependency problems can be resolved by TYPE_CHECKING,
but this can not resolve NO type linting problems. eg:
if isinstance(msg, Contact):
pass
in this problem, program don't import Contact at running time. So, it will
throw a Exception, which will not be threw
:param obj:
:param type_name:
:return:
"""
if hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
return obj.__class__.__name__ == type_name
return False
|
# https://www.hackerrank.com/challenges/py-collections-deque
'''collections.deque()
A deque is a double-ended queue.
It can be used to add or remove elements from both ends.
Deques support thread safe, memory efficient appends and pops
from either side of the deque with approximately
the same performance in either direction.
Click on the link to learn more about deque() methods.
Click on the link to learn more about various approaches
to working with deques: Deque Recipes.
Example
>>> from collections import deque
>>> d = deque()
>>> d.append(1)
>>> print d
deque([1])
>>> d.appendleft(2)
>>> print d
deque([2, 1])
>>> d.clear()
>>> print d
deque([])
>>> d.extend('1')
>>> print d
deque(['1'])
>>> d.extendleft('234')
>>> print d
deque(['4', '3', '2', '1'])
>>> d.count('1')
1
>>> d.pop()
'1'
>>> print d
deque(['4', '3', '2'])
>>> d.popleft()
'4'
>>> print d
deque(['3', '2'])
>>> d.extend('7896')
>>> print d
deque(['3', '2', '7', '8', '9', '6'])
>>> d.remove('2')
>>> print d
deque(['3', '7', '8', '9', '6'])
>>> d.reverse()
>>> print d
deque(['6', '9', '8', '7', '3'])
>>> d.rotate(3)
>>> print d
deque(['8', '7', '3', '6', '9'])
'''
from collections import deque
d = deque()
N = int(input().strip())
for _ in range(N):
data = input().strip().split(' ')
if len(data) == 2:
med, val = data
result = getattr(deque, med)(d, int(val))
else:
med = data[0]
result = getattr(deque, med)(d)
for i in d:
print(i, end=' ')
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from textwrap import dedent
import pytest
from pants.backend.kotlin.dependency_inference import kotlin_parser, symbol_mapper
from pants.backend.kotlin.dependency_inference.rules import (
InferKotlinSourceDependencies,
KotlinSourceDependenciesInferenceFieldSet,
)
from pants.backend.kotlin.dependency_inference.rules import rules as dep_inference_rules
from pants.backend.kotlin.target_types import KotlinSourcesGeneratorTarget
from pants.backend.kotlin.target_types import rules as kotlin_target_type_rules
from pants.build_graph.address import Address
from pants.core.util_rules import config_files, source_files
from pants.engine.addresses import Addresses, UnparsedAddressInputs
from pants.engine.internals.parametrize import Parametrize
from pants.engine.rules import QueryRule
from pants.engine.target import (
DependenciesRequest,
ExplicitlyProvidedDependencies,
InferredDependencies,
Targets,
)
from pants.jvm import jdk_rules
from pants.jvm.dependency_inference import artifact_mapper
from pants.jvm.dependency_inference import symbol_mapper as jvm_symbol_mapper
from pants.jvm.resolve import jvm_tool
from pants.jvm.testutil import maybe_skip_jdk_test
from pants.jvm.util_rules import rules as jvm_util_rules
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*config_files.rules(),
*jvm_tool.rules(),
*dep_inference_rules(),
*kotlin_parser.rules(),
*symbol_mapper.rules(),
*kotlin_target_type_rules(),
*source_files.rules(),
*jvm_util_rules(),
*jdk_rules.rules(),
*artifact_mapper.rules(),
*jvm_symbol_mapper.rules(),
QueryRule(Addresses, [DependenciesRequest]),
QueryRule(ExplicitlyProvidedDependencies, [DependenciesRequest]),
QueryRule(InferredDependencies, [InferKotlinSourceDependencies]),
QueryRule(Targets, [UnparsedAddressInputs]),
],
target_types=[KotlinSourcesGeneratorTarget],
objects={"parametrize": Parametrize},
)
rule_runner.set_options(args=[], env_inherit=PYTHON_BOOTSTRAP_ENV)
return rule_runner
@maybe_skip_jdk_test
def test_infer_kotlin_imports_same_target(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
kotlin_sources(name = 't')
"""
),
"A.kt": dedent(
"""\
package org.pantsbuild.a
class A {}
"""
),
"B.kt": dedent(
"""\
package org.pantsbuild.b
class B {}
"""
),
}
)
target_a = rule_runner.get_target(Address("", target_name="t", relative_file_path="A.kt"))
target_b = rule_runner.get_target(Address("", target_name="t", relative_file_path="B.kt"))
assert rule_runner.request(
InferredDependencies,
[InferKotlinSourceDependencies(KotlinSourceDependenciesInferenceFieldSet.create(target_a))],
) == InferredDependencies([])
assert rule_runner.request(
InferredDependencies,
[InferKotlinSourceDependencies(KotlinSourceDependenciesInferenceFieldSet.create(target_b))],
) == InferredDependencies([])
@maybe_skip_jdk_test
def test_infer_kotlin_imports_with_cycle(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
kotlin_sources(name = 'a')
"""
),
"A.kt": dedent(
"""\
package org.pantsbuild.a
import org.pantsbuild.b.B
class A {}
"""
),
"sub/BUILD": dedent(
"""\
kotlin_sources(name = 'b',)
"""
),
"sub/B.kt": dedent(
"""\
package org.pantsbuild.b
import org.pantsbuild.a.A
class B {}
"""
),
}
)
target_a = rule_runner.get_target(Address("", target_name="a", relative_file_path="A.kt"))
target_b = rule_runner.get_target(Address("sub", target_name="b", relative_file_path="B.kt"))
assert rule_runner.request(
InferredDependencies,
[InferKotlinSourceDependencies(KotlinSourceDependenciesInferenceFieldSet.create(target_a))],
) == InferredDependencies([target_b.address])
assert rule_runner.request(
InferredDependencies,
[InferKotlinSourceDependencies(KotlinSourceDependenciesInferenceFieldSet.create(target_b))],
) == InferredDependencies([target_a.address])
@maybe_skip_jdk_test
def test_infer_kotlin_imports_ambiguous(rule_runner: RuleRunner, caplog) -> None:
ambiguous_source = dedent(
"""\
package org.pantsbuild.a
class A {}
"""
)
rule_runner.write_files(
{
"a_one/BUILD": "kotlin_sources()",
"a_one/A.kt": ambiguous_source,
"a_two/BUILD": "kotlin_sources()",
"a_two/A.kt": ambiguous_source,
"b/BUILD": "kotlin_sources()",
"b/B.kt": dedent(
"""\
package org.pantsbuild.b
import org.pantsbuild.a.A
class B {}
"""
),
"c/BUILD": dedent(
"""\
kotlin_sources(
dependencies=["!a_two/A.kt"],
)
"""
),
"c/C.kt": dedent(
"""\
package org.pantsbuild.c
import org.pantsbuild.a.A
class C {}
"""
),
}
)
target_b = rule_runner.get_target(Address("b", relative_file_path="B.kt"))
target_c = rule_runner.get_target(Address("c", relative_file_path="C.kt"))
# Because there are two sources of `org.pantsbuild.a.A`, neither should be inferred for B. But C
# disambiguates with a `!`, and so gets the appropriate version.
caplog.clear()
assert rule_runner.request(
InferredDependencies,
[InferKotlinSourceDependencies(KotlinSourceDependenciesInferenceFieldSet.create(target_b))],
) == InferredDependencies([])
assert len(caplog.records) == 1
assert "The target b/B.kt imports `org.pantsbuild.a.A`, but Pants cannot safely" in caplog.text
assert rule_runner.request(
InferredDependencies,
[InferKotlinSourceDependencies(KotlinSourceDependenciesInferenceFieldSet.create(target_c))],
) == InferredDependencies([Address("a_one", relative_file_path="A.kt")])
@maybe_skip_jdk_test
def test_infer_same_package_via_consumed_symbol(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
kotlin_sources(name = 'a')
"""
),
"A.kt": dedent(
"""\
package org.pantsbuild.kotlin.example
class A {
def grok() {}
}
"""
),
"Main.kt": dedent(
"""\
package org.pantsbuild.kotlin.example
def main(args: Array<String>) {
val a = A()
a.grok()
}
"""
),
}
)
target_a = rule_runner.get_target(Address("", target_name="a", relative_file_path="A.kt"))
target_main = rule_runner.get_target(Address("", target_name="a", relative_file_path="Main.kt"))
assert rule_runner.request(
InferredDependencies,
[InferKotlinSourceDependencies(KotlinSourceDependenciesInferenceFieldSet.create(target_a))],
) == InferredDependencies([])
assert rule_runner.request(
InferredDependencies,
[
InferKotlinSourceDependencies(
KotlinSourceDependenciesInferenceFieldSet.create(target_main)
)
],
) == InferredDependencies([target_a.address])
|
a = input("정수 입력(a) :")
b = input("정수 입력(b) :")
if (a % 2 == 0) and (b % 2 == 0) :
print("두 수 모두 짝수입니다.")
if (a % 2 == 0) or (b % 2 == 0) :
print("두 수 중 하나 이상이 짝수입니다.") |
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('value-benchmarks-2019-01.csv')
x1, y1 = [], []
x2, y2 = [], []
for n, r in df.iterrows():
name = '%s-%s-%s' % (r['func'], r['depth'], r['sparsity'])
if r['sparsity'] == 1:
x1.append(name)
y1.append(r['ns'])
else:
x2.append(name)
y2.append(r['ns'])
plt.barh(x1, y1, label='dense')
plt.barh(x2, y2, label='sparse')
ax = plt.gca()
ax.set_xscale("log")
plt.legend()
plt.show()
|
l, r = int(input()), int(input()) / 100
count = 1
result = 0
while True:
l = int(l*r)
if l <= 5:
break
result += (2**count)*l
count += 1
print(result)
|
# -*- coding: utf-8 -*-
# netos/urls.py
from django.conf.urls import url
from django.contrib import admin
from django.urls import path
from . import views # import of application views
app_name = 'netos' # application namespace
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^login/$', views.loginPage, name='login'),
url(r'^logout/$', views.logoutPage, name='logout'),
# url(r'^<str:room_name>/$', views.room, name='room'),
]
|
rlen,r=map(int,input().split())
if rlen<r:
rlen,r=r,rlen
l=[]
for m in range(r):
temp=list(map(int,input().split()))
temp.sort()
l.append(temp)
for i in range(rlen):
t=[]
for j in range(r):
t.append(l[j][i])
t.sort()
for j in range(r):
l[j][i]=t[j]
for i in range(r):
print(*l[i])
|
print("布尔表达式")
print(True,False)
# result: True,False
print(True == 1)
# result: True
print(True + 2)
# result: 3
print(True + False*3)
# result: 1
print(3 > 2)
# result :True
print((1 < 3)*10)
# reuslt: 10
print('-'*70)
print("条件分支")
# 例1 判断天气
weather = 'sunny'
if weather =='sunny':
print("shopping")
elif weather =='cloudy':
print("playing football")
else:
print("learning python")
# result: shopping
# 例2 选择两个数的较大者
import math
a = math.pi
b = math.sqrt(9.5)
if a>b:
print(a)
else:
print(b)
# result: 3.14159265359
# 例3 3个数简单排序
first = 1
second = 2
third = 3
if second<third:
t = second
second = third
third = t
if first<second:
t = first
first = second
second = t
if second<third:
t = second
second = third
third = t
print(first,'>',second,'>',third)
#result: 3 > 2 > 1
print('-'*70)
print('''while循环''')
# 例1 1到1000求和
a = 1000
s = 0
while a:
s+=a
a-=1
print(s)
# result: 500500
# 例2 简单计算
while True:
s = eval(input('1+2='))
if s ==3:
print('答案正确')
break
if s>=0 and s<=9:
continue
print('答案是个位数')
print('-'*70)
print('''简单for循环''')
# 对列表和字符串进行迭代
for a in ['e','f','g']:
print(a, end=' ')
# result:e f g
print()
for a in 'string':
print(a, end=' ')
# result:s t r i n g
print()
print('-'*70)
print('''range()函数''')
print(list(range(2,9)))
# result: [2, 3, 4, 5, 6, 7, 8]
print(list(range(2,9,3))) # 相邻元素的间隔为3
# result: [2, 5, 8]
print('-'*70)
# 直接使用for循环难以改变序列元素
L = [1,2,3]
for a in L:
a+=1 # a不是引用,L中对应的元素没有发生改变
print(L)
# resukt: [1,2,3]
# range()与len()函数遍历序列并修改元素
for i in range(len(L)):
L[i]+=1 # 通过索引访问
print(L)
# result: [2,3,4]
print('-'*70)
print('''循环中的else语句''')
# 简单搜索质数
for n in range(2,10):
for x in range(2,n):
if n%x ==0:# 含有非普通因子
print(n,'equals',x,'*',n/x)
break
else:
print(n,'是一个质数')# 仅含有普通因子,说明这是一个质数
|
from django.apps import AppConfig
class LoggingDbConfig(AppConfig):
name = 'logging_db'
|
from django.http import HttpResponse
from django.shortcuts import render
import operator
def home(request):
return render(request,'index.html')
def count(request):
fullname = request.GET['fullname']
worldlist = fullname.split();
worddictionary = {}
for word in worldlist:
if word in worddictionary:
#increase
worddictionary[word] +=1
else:
#add to the dictionary
worddictionary[word]=1
sortedword = sorted(worddictionary.items(),key=operator.itemgetter(1),reverse=True)
return render(request,'count.html',{'fullname':fullname,'count':len(worldlist),'worddictionarys':sortedword})
def about(request):
return render(request,'about.html') |
#!/usr/bin/python
# -*- coding: latin-1
###############################################################################
###############################################################################
## Title: brain_inspect.py #
## Author: Jose Etxeberria Mendez #
## Release date: 19/02/2018 #
## Brief description: Analize brain slices, locate and characterize #
## temporal lobule. #
## Language version: Python 2.7.12 #
## Tested over OS: Ubuntu 16.04 LTS #
## Tested over processor: Intel(R) Core(TM) i7-3632QM CPU (x86_64) #
###############################################################################
###############################################################################
## Large description: #
## This file executes secuencially the different commands that together #
## form an 3D images inspection and the treatement of their corresponding#
## 2D slices to extract images information. Brain temporal lobule must be#
## located and it must be analized the necesity of including domain #
## knowledge in the different vision process description levels. #
## #
## Main steps: #
## 1 - Obtain 2D slices from 3D images #
## 2 - Locate blobs in 2D #
## 3 - Identify objects in 2D #
## 4 - Characterize turns and grooves of gray matter (GM) #
###############################################################################
## Notification: The current program is expanded in a Jupyter Notebook that#
## allows the user to interact with the code in a easy way #
## to achieve a better comprehension. The steps to follow to #
## properly execute the notebook, having access to the #
## associated files, are explained in the README.md file of #
## the repository that contains such files and the notebook: #
## https://github.com/jetxeberria/computer_vision.git #
###############################################################################
###############################################################################
## Descripción extendida: #
## Este archivo se encarga de la ejecución secuencial de las distintas #
## órdenes que en conjunto conforman la inspección de una serie de #
## imágenes 3D y el procesamiento de sus cortes 2D correspondientes para #
## extraer información de las imágenes. Se debe localizar el lobulo #
## temporal del cerebro y analizar la necesidad de aporte de conocimiento#
## del dominio en los distintos niveles de descripción del proceso de #
## visión. #
## #
## Pasos principales: #
## 1 - Obtención de cortes 2D a partir de imágenes 3D #
## 2 - Localización de blobs en 2D #
## 3 - Identificación de objetos en 2D #
## 4 - Caracterización de giros y surcos de la materia gris (GM) #
###############################################################################
## Notificación: El presente programa es expandido en un Notebook Jupyter #
## que permite al usuario interactuar con el código de una #
## manera fácil para lograr una mejor comprensión. Los pasos #
## a seguir para ejecutar correctamente el notebook, teniendo#
## acceso a los ficheros asociados, están explicados en el #
## archivo README.md del repositorio que contiene dichos #
## ficheros y el notebook: #
## https://github.com/jetxeberria/computer_vision.git #
###############################################################################
###############################################################################
# At starting point the user is suppossed to have the following directory
# structure:
# .
# ├── brain_inspect.py
# ├── computer_vision_notebook_02.ipynb
# ├── computer_vision_notebook_01.ipynb
# ├── datos
# │ ├── I3TCSF.hdr
# │ ├── I3TCSF.img
# │ ├── I3TGM.hdr
# │ ├── I3TGM.img
# │ ├── I3T.hdr
# │ ├── I3T.img
# │ ├── I3TWM.hdr
# │ ├── I3TWM.img
# │ └── preprocess
# │ ├── CSF/
# │ ├── GM/
# │ ├── I3T/
# │ └── WM/
# ├── lib
# │ ├── blob_lib.py
# │ ├── image_manager.py
# │ ├── imobj_lib.py
# │ ├── locate_blobs.py
# │ ├── logger_lib.py
# │ ├── matter_lib.py
# │ ├── MRI_inspector.py
# │ ├── slice_lib.py
# │ └── __init__.py
# └── README.md
###############################################################################
##---------------------------------------------------------------------------##
###############################################################################
from matplotlib import pyplot as plt # Used for image showing
import time # Executing time calculator
import numpy as np # Used for arrays management
import lib.logger_lib as logger # Log creation
import lib.MRI_inspector as mri # 3D objects management
import sys # Used to locate file in disk
source_dir = sys.path[0]
plt.rcParams["image.cmap"] = "gray"
# Start log
log = logger.Logger(source_dir+'/evaluation/logs/log.txt')
start_time = time.clock()
log_file_heading = 'File: '+__file__.split('/')[-1]+' '
log_class_heading = log_file_heading+'Class: None '
log_method_heading = log_class_heading+'Method: None ::: '
log.write_log('log',log_method_heading+'Start time: '+str(start_time)\
+' seconds.')
###############################################################################
##---------------------------------------------------------------------------##
###############################################################################
# Step 1: Obtain 2D slices from 3D images.
# A: Include data files path and names.
# B: Make matter objects with the file names and path.
# C: Read image into matter_objects.
# D: Preprocess image.
# E: Make slices objects respect to each matter.
# F: Save each slice matrix in a common image format. (PNG)
# G: Check the behaviour and management of PNG. (Only in notebook)
## Step 1.A: Include data files path and names.
inspector = mri.MRI_inspect(log)
inspector.set_files_path(source_dir+'/datos/')
inspector.set_files_names(['I3TWM.hdr','I3TGM.hdr','I3TCSF.hdr','I3T.hdr'])
## Step 1.B: Make matter objects with the file names and path.
inspector.make_matter_objs()
for matter_obj in inspector.matter_obj_list:
print ('Files processing initialization:')
## Step 1.C: Read image into matter_objects.
print ('Reading file {}...'.format(matter_obj.file_MRI_name))
matter_obj.read_file()
## Step 1.D: Preprocess image.
print ('Preprocessing 3D image...')
matter_obj.preprocess()
print ('Binarizing 3D image...')
matter_obj.binarize(threshold=80)
## Step 1.E: Make slices objects respect to each matter.
print ('Making slice objects of 3D image...')
matter_obj.make_slice_objs()
print ('Done.\n')
## Step 1.F: Save each slice matrix in a common image format. (PNG)
if '-save' in sys.argv:
for matter_obj in inspector.matter_obj_list:
print ('Saving slices of matter \'{}\'...'.format(matter_obj.name))
for slice_obj in matter_obj.slices_obj_list:
slice_obj.save_slice(source_dir+'/datos/preprocessed/'+matter_obj.name+'/',\
fext='.png')
print ('Done.\n')
## Step 1.G: Check the behaviour and management of PNG (Only in notebook)
###############################################################################
##---------------------------------------------------------------------------##
###############################################################################
# Step 2: Locate blobs in 2D
# A: Segment slices into different blobs by differently labelling them.
# B: Make blob objects respect to each slice.
# C: Locate centroids and inner regions for future processing.
# D: Plot blobs labelled with centroids as label position.
# It is performed blob localization and objects creation for WM, GM and CSF:
print ('Blobs processing:')
for matter_obj in inspector.matter_obj_list[:-2]:
print ('Finding blobs of slices of matter {}...'.format(matter_obj.name))
for slice_obj in matter_obj.slices_obj_list:
## Step 2.A: Segment slices into different blobs.
slice_obj.find_slice_labels()
## Step 2.B: Make blob objects respect to each slice
slice_obj.make_blob_objs()
print ('Blob objects of each slice are created.')
print ('Done.\n')
## Step 2.C: Locate centroids and inner regions for future processing.
for matter_obj in inspector.matter_obj_list[:-2]:
print ('Locating centroids and inner regions of blobs of slices of matter '\
'{}...'.format(matter_obj.name))
for slice_obj in matter_obj.slices_obj_list:
for blob_obj in slice_obj.blobs_obj_list:
blob_obj.find_blob_centroid()
blob_obj.find_inner_region()
print ('Centroids and inner regions located.'\
'{}...'.format(matter_obj.name))
print ('Done.\n')
## Step 2.D: Plot blobs labelled with centroids as label position.
if '-plot' in sys.argv:
for matter_obj in inspector.matter_obj_list[:-1]:
print ('Plotting slices labelled of matter \'{}\''\
'...'.format(matter_obj.name))
out_plot_dir = source_dir+'/datos/labelled/'+matter_obj.name+'/'
for slice_obj in matter_obj.slices_obj_list:
slice_obj.plot_slice_labels(out_plot_dir, f_ext='.png')
print ('Slices labelled stored at \'{}\'.'.format(out_plot_dir))
print ('Done.\n')
################################################################################
###---------------------------------------------------------------------------##
################################################################################
#
## Step 3: Identify objects in 2D
## A: Explicitly name matter objects to ease management
## B: Make Image Object slices. Each slice stores the slices of all matters.
## C: Find objects type 1 (GM) by checking their relation with White Matter.
## D: Make Image Objects type 1 class instances for each found matching blob.
## E: Find objects type 2 (GM+WM) by checking the relation between themselves.
## F: Make Image Objects type 2 class instances for each found matching blob.
## G: Find objects type 3 (GM+WM, global) by checking the relation between
## themselves and performing blob erosion
## H: Make Image Objects type 3 class instances for each found matching blob.
#
### Step 3.A: Explicitly name matter objects to ease management
#i3twm, i3tgm, i3tcsf, i3t = inspector.essay_matter_access(inspector.matter_obj_list)
#
### Step 3.B: Make Image Object slices.
print ('Structure image objects information in a classes family')
print ('Making objects \'imobj_slice\' to store in a unique object the slices of '\
'different matters...')
inspector.make_imobj_slice_objects(inspector.matter_obj_list)
print ('Done.\n')
print ('Finding objects of type \'imobj_type1\' for each \'imobj_slice\'. Each '\
'object stores a blob that satisfices the type 1 object constraints...')
count=0
for imobj_slice in inspector.imobj_slice_obj_list:
## Step 3.C: Find objects type 1 (GM)
imobj_slice.find_imobjs_type1()
## Step 3.D: Make Image Objects type 1
imobj_slice.make_imobj_objects_type1()
if len(imobj_slice.type1_imobjs_list) > 0:
count += 1
print ('{} imslices have objects of type \'imobj_type1\''.format(count))
print ('Finding objects of type \'imobj_type2\' for each \'imobj_slice\'. Each '\
'object stores a blob pair that satisfices the type 2 object constraints...')
count=0
for imobj_slice in inspector.imobj_slice_obj_list:
## Step 3.E: Find objects type 2 (GM+WM)
imobj_slice.find_imobjs_type2()
## Step 3.F: Make Image Objects type 2
imobj_slice.make_imobj_objects_type2()
if len(imobj_slice.type2_imobjs_list) > 0:
count += 1
print ('{} imslices have objects of type \'imobj_type2\''.format(count))
print ('Done.\n')
###############
##########
#####
#slice_proof = inspector.imobj_slice_obj_list[125]
#slice_proof.find_imobjs_type3()
#slice_proof.make_imobj_objects_type3()
#if len(slice_proof.type3_imobjs_list) > 0:
# for imobj3 in slice_proof.type3_imobjs_list:
# print ('in imslice {} there are a imobj of type {} at position {}'\
# .format(imobj3.imslice_number, imobj3.obj_type, imobj3.imobj_position))
# fig, ax0 = plt.subplots()
# ax0.imshow(slice_proof.type3_imobjs_list[0].imobj_lbl, cmap='nipy_spectral')
# ax0.set_title('imobj3-{}_slc{}.imobj_lbl'\
# .format(slice_proof.type3_imobjs_list[0].imobj_position,\
# slice_proof.type3_imobjs_list[0].imslice_number))
#if len(slice_proof.type3_imobjs_list) == 2:
# fig, ax1 = plt.subplots()
# ax1.imshow(slice_proof.type3_imobjs_list[1].imobj_lbl, cmap='nipy_spectral')
# ax1.set_title('imobj3-{}_slc{}.imobj_lbl'\
# .format(slice_proof.type3_imobjs_list[1].imobj_position,\
# slice_proof.type3_imobjs_list[1].imslice_number))
#plt.show()
#slice_proof.plot_imslice_labels(source_dir+'/datos/labelled_objs/imslice0.png', f_ext='.png')
#####
##########
###############
print ('Finding objects of type \'imobj_type3\' for each \'imslice\'. Each '\
'object stores a blob pair that satisfices the type 3 object constraints...')
count=0
for imobj_slice in inspector.imobj_slice_obj_list:
## Step 3.G: Find objects type 3 (GM+WM, global)
imobj_slice.find_imobjs_type3()
## Step 3.H: Make Image Objects type 3
imobj_slice.make_imobj_objects_type3()
if len(imobj_slice.type3_imobjs_list) > 0:
count += 1
print ('{} imslices have objects of type \'imobj_type3\''.format(count))
print ('Done.\n')
################################################################################
###---------------------------------------------------------------------------##
################################################################################
#
#
###############
##########
#####
if '-plot_objs' in sys.argv:
print ('Plotting image object slices with blobs of interest labelled...')
out_plot_dir = source_dir+'/datos/labelled_objs/'
count=0
for i, image_slice in enumerate(inspector.imobj_slice_obj_list):
fill = 4 - len(list(str(i)))
slice_name = 'imslice'+'0'*fill+str(i)
if len(image_slice.type1_imobjs_list) > 0 or len(image_slice.type2_imobjs_list)\
or len(image_slice.type3_imobjs_list):
image_slice.plot_imslice_labels(out_plot_dir+slice_name, f_ext='.png')
count+=1
else:
slc_shape = image_slice.wm_slice_obj.slc_arr.shape
image_slice.plot_imslice_labels(out_plot_dir+slice_name, \
f_ext='.png', shape=slc_shape)
print ('Image Objects found in {} imslices, labelled and stored at \'{}\''\
.format(count, out_plot_dir))
print ('Done.\n')
#####
##########
###############
|
class Solution:
def putMarbles(self, weights: List[int], k: int) -> int:
# [9,8,9,1]
minpq, maxpq = [], []
for i in range(len(weights)-1):
heapq.heappush(maxpq, -(weights[i]+weights[i+1]))
heapq.heappush(minpq, (weights[i]+weights[i+1]))
while len(maxpq) > k-1:
heapq.heappop(maxpq)
while len(minpq) > k-1:
heapq.heappop(minpq)
return sum(maxpq)+sum(minpq)
|
import re
from ..lib import keyword_utils
def recover_full_name(ori_sentence, func):
if func not in ori_sentence:
return ""
index = ori_sentence.index(func)
end_index = index + len(func) - 1
while index >= 0 and ori_sentence[index] != " ":
index -= 1
return ori_sentence[index+1:end_index]
def recover_causal(pre_phase, post_phase):
pre_causal_words = re.compile(r'(previous|before|earlier)', re.IGNORECASE)
post_causal_words = re.compile(r'(after|subsequent|later|until|then)', re.IGNORECASE)
if post_causal_words.search(post_phase) != None:
return "reverse"
if pre_causal_words.search(pre_phase) != None:
return "reverse"
# if pre_causal_words.search(post_phase) != None:
# return "normal"
# if post_causal_words.search(pre_phase) != None:
# return "normal"
return "normal"
def recover_relations(sentense_dep, src_id):
src_edge = sentense_dep[src_id]
phrase = src_edge['tok']
pre_phrase = ""
post_phrase = ""
candidate_rels = ['advmod', 'appos', 'cc', 'obj', 'obl', 'fixed', 'xcomp',
'flat', 'compound', 'case', 'conj', 'parataxis']
# Link all the words in candidate relations.
for child_id in src_edge['child_id']:
if sentense_dep[child_id]['deprel'] not in candidate_rels:
continue
new_tok = recover_relations(sentense_dep, child_id) # sentense_dep[child_id]['tok']
if child_id > src_id:
post_phrase = post_phrase if post_phrase == "" else post_phrase + " "
post_phrase += new_tok
else:
pre_phrase = pre_phrase if pre_phrase == "" else pre_phrase + " "
pre_phrase += new_tok
if post_phrase != "":
phrase += " " + post_phrase
if pre_phrase != "":
phrase = pre_phrase + " " + phrase
return phrase
'''
Get the causal relationship based on the action word and logical words.
'''
def get_causal(feature_dict, cur_func, func_list, dep_info, word_id, ori_sentence=""):
is_pre = keyword_utils.is_pre(cur_func['func_name'])
is_post = keyword_utils.is_post(cur_func['func_name'])
action_tok = dep_info[word_id]['tok']
pre_phase = ""
post_phase = ""
# ignore to analyze the return related sentences and the actions about notice signs
ignore_verbs = re.compile(r'(have|has|return|see|mention|note)')
if ignore_verbs.search(action_tok) != None:
return feature_dict
# Acquire the phases
for child_id in dep_info[word_id]['child_id']:
if dep_info[child_id]['deprel'] not in ['nsubj:pass', 'nsubj', 'obj', 'obl', 'acl', 'advcl']:
continue
phase = recover_relations(dep_info, child_id)
if cur_func['func_name'] in phase:
continue
else:
if child_id > word_id:
post_phase += phase
else:
pre_phase += phase
# Confirm the causal order of the sentence
causal_order = "normal"
func_pattern = re.compile(r'[0-9,a-z,A-Z,_]+\(', re.IGNORECASE)
pre_action_pattern = re.compile(r'(allocate|open|create|initiate)', re.IGNORECASE)
post_action_pattern = re.compile(r'(free|release|close|clear|clean)', re.IGNORECASE)
if "ed" == action_tok[-2:]:
if pre_action_pattern.search(action_tok) != None:
causal_order = "reverse"
elif post_action_pattern.search(action_tok) != None:
causal_order = "normal"
else:
causal_order = recover_causal(pre_phase, post_phase)
else:
if pre_action_pattern.search(action_tok) != None:
causal_order = "normal"
elif post_action_pattern.search(action_tok) != None:
causal_order = "reverse"
else:
causal_order = recover_causal(pre_phase, post_phase)
# Update the feature dict
if causal_order == "normal":
for func in func_pattern.findall(post_phase):
func_name = recover_full_name(ori_sentence, func)
if not is_post and func_name != cur_func['func_name'] and func_name in func_list:
feature_dict['post'].append(func_name)
for func in func_pattern.findall(pre_phase):
func_name = recover_full_name(ori_sentence, func)
if not is_pre and func_name != cur_func['func_name'] and func_name in func_list:
feature_dict['pre'].append(func_name)
else:
for func in func_pattern.findall(post_phase):
func_name = recover_full_name(ori_sentence, func)
if not is_pre and func_name != cur_func['func_name'] and func_name in func_list:
feature_dict['pre'].append(func_name)
for func in func_pattern.findall(pre_phase):
func_name = recover_full_name(ori_sentence, func)
if not is_post and func_name != cur_func['func_name'] and func_name in func_list:
feature_dict['post'].append(func_name)
return feature_dict
'''
Analyze the semantic inside the description about causality.
'''
def analyze_causal(dep, cur_func, func_list={}, display=False):
# Get dependency information.
analyzed_dep = dep.preprocess_dep()
# Init the feature dict.
feature_dict = {'pre': [], 'post': []}
for i, sentence in enumerate(analyzed_dep):
if sentence['root'] != sentence['action']:
continue
dep_info = sentence['dep_info']
# Skip the complex sentences which have two or more verbs.
actions_num = 0
for root_id in sentence['root']:
if dep_info[root_id]['deprel'] in ['root', 'conj']:
actions_num += 1
if actions_num > 1:
continue
for root_id in sentence['root']:
feature_dict = get_causal(feature_dict, cur_func, func_list, dep_info, root_id, dep.sentences[i])
feature_dict['pre'] = list(set(feature_dict['pre']))
feature_dict['post'] = list(set(feature_dict['post']))
if display:
print('causality:', feature_dict)
if len(feature_dict['pre']) == 0 and len(feature_dict['post']) == 0:
return {}
return feature_dict
|
bingo = [
[0, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 0],
]
hantei = 0
for m in range(5):
for n in range(5):
if bingo[m][m] == bingo[n][m]:
hantei += 1
if hantei == 5:
print(str(m))
hantei = 0
|
import numpy as np
from rpy2.robjects import numpy2ri
numpy2ri.activate()
from rpy2.robjects.packages import importr
stats = importr('stats')
# x : np.ndarray
# window : int
# -> (np.ndarray, np.ndarray, np.ndarray)
def stl_r(x, window):
ts = stats.ts(x, frequency=window)
dec = np.array(stats.stl(ts, s_window=window).rx2('time.series')).T
return dec[0], dec[1], dec[2]
|
# while practice
# 打印100以内的偶数之和
# help(range)
# 方法一
i = 0
sum = 0
while i <= 100:
sum += i
i += 2
print("sum = %d" % sum)
# 方法二
i = 0
sum = 0
while i <= 100:
if i%2 == 0:
sum += i
i += 1
print("sum = %d" % sum)
# 方法三
i = 0
sum = 0
while i <= 100:
if i%2 == 1:
i += 1
else:
sum += i
i += 1
print("sum = %d" % sum)
|
# -*- coding: utf-8 -*-
#text_manip.py
import re
import urllib2
# import unicodedata
from bs4 import BeautifulSoup
# import nltk
# from xgoogle.search import GoogleSearch, SearchError
import os
import random
def HTML_attribute_content_replace(text, attr, current, replace_with):
return text.replace('%s="%s'%(attr,current), '%s="%s'%(attr,replace_with))
def remove_after(regex, text):
found_string= re.findall(pattern=regex, string=text) #only take the first; we can run this as many times as necessary
index=-1
if found_string is not []:
for x in found_string:
x=ensure_ASCII(x)
index=text.find(x)
if index!=-1:
return text[:index]
else:
return text
def remove_HTML_perfect(html, tag, class_list=[], tag_id=None):
soup=BeautifulSoup(html)
if class_list is not [] and class_list is not None:
for class_ in class_list:
[s.extract() for s in soup(tag, class_=class_)]
if tag_id is not None:
[s.extract() for s in soup(tag, id_=tag_id)]
return soup.prettify()
def regex_and_replace_first(regex, text, replacement):
# takes a regex pattern, and replaces the text that matches that regex pattern with something else
# this replaces the first occurence, thus making it useful to repeatedly call this in a loop with different values of the argument 'replace'
to_replace=re.findall(regex, text)
text=text.replace(to_replace[0],replacement)
return text
def regex_and_replace(regex, text, replacement):
# takes a regex pattern, and replaces the text that matches that regex pattern with something else
to_replace=re.findall(regex, text)
for rep in to_replace:
text=text.replace(rep,replacement)
return text
def regex_and_remove(regex, text):
# takes a regex pattern, and removes all text that matches that regex pattern
to_remove=re.findall(regex, text)
for rem in to_remove:
text=text.replace(rem,"")
return text
def ensure_UTF8(string_data, encoding='UTF-8'):
if type(string_data) is unicode:
return string_data
else:
return unicode(string_data,'utf-8')
def ensure_ASCII(string_data): #to write to file
return ensure_UTF8(string_data).encode('ascii', 'replace')
# def convert_encoding(data, new_coding = 'UTF-8'):
# encoding = cchardet.detect(data)['encoding']
# if new_coding.upper() != encoding.upper():
# data = data.decode(encoding, data).encode(new_coding)
# return data
def rectify_folder_path(folderpath):
if folderpath[-1]!= "\\" and folderpath[-1]!= "/":
folderpath+="/"
return folderpath
def make_directory_if_not_exists(folderpath, printing=True):
# returns true if we make the directory
folderpath=rectify_folder_path(folderpath)
if not os.path.exists(folderpath):
os.makedirs(folderpath)
return True
else:
if printing:
print "\nThe folder %s already exists."%(folderpath)
return False
def list_files_in_folder_with_extension(folderpath, extension):
#just returns the filenames
ext_len=len(extension)
temp_list=get_files_in_folder(folderpath)
ret_list=[]
for file_name in temp_list:
if file_name[(-1)*ext_len:] == extension:
ret_list.append(file_name)
return ret_list
def get_files_in_folder(folderpath):
#just returns the filenames
folderpath=rectify_folder_path(folderpath)
return [ f for f in os.listdir(folderpath) if os.path.isfile(folderpath+"/"+f) ]
def make_google_search_query(necessary_topic_list=None, topic_list=None, site_list=None, daterange_from=None, daterange_to=None):
if necessary_topic_list==None and topic_list==None:
return None
query=""
if necessary_topic_list!=None:
for topic in necessary_topic_list:
query+='"%s" '%topic
if topic_list!=None:
for topic in topic_list:
query+='"%s" '%topic
if site_list!=None:
query += " site:"+site_list[0]
for i in range(1,len(site_list)):
query+=" | site:"+site_list[i]
if daterange_from!=None and daterange_to!=None and daterange_from<=daterange_to:
query+=" daterange:%s-%s"%(daterange_from, daterange_to)
return query
# '"Infosys" site:financialexpress.com/article/ | site:business-standard.com/article | site:livemint.com/companies | site:timesofindia.indiatimes.com/business/india-business/ '
def make_file_path(folderpath, filename, extension):
if folderpath[-1]!= "\\" and folderpath[-1]!= "/":
folderpath+="/"
filename=make_filename(filename)
return folderpath+filename[:255-len(folderpath)-len(extension)]+extension # Windows only allows filepaths upto 260 characters. I'm using 255 to be on the safe side.\
def make_folder_path(parent_folder_path, folder_name, char_buffer=30):
#we make a buffer to allow files with at least this length
parent_folder_path=rectify_folder_path(parent_folder_path)
folder_name=make_filename(folder_name)
folder_name=rectify_folder_path(folder_name)
if len(parent_folder_path)-char_buffer>=255: #max filepath length
print "\nERROR: cannot make folder, exceeds max OS filepath length.\n"
return None
else:
return (parent_folder_path+folder_name)[:259-char_buffer]
def make_filename(input_line):
line=re.sub("\:"," -",input_line)
line=re.sub("\?","",line)
# line=re.sub("\\\\","-",line)
line=re.sub("/","-",line)
line=re.sub('"',"'",line)
line=re.sub("\|",";",line)
line=re.sub("<","<",line)
line=re.sub(">",">",line)
line=re.sub("\\n","",line)
return line
def to_julian_date(year, month, day):
try:
if(year==int(year)):
Y=int(year)
if(month==int(month)):
M=int(month)
if(day==int(day)):
D=int(day)
except Exception:
print "Invalid date input."
return None
if M<1 or M>12:
print "Invalid date input."
return None
if D<1 or ((M==1 and D>31) or (M==2 and D>29) or (M==3 and D>31) or (M==4and D>30) or (M==5 and D>31) or (M==6 and D>31) or (M==7 and D>31) or (M==8 and D>31) or (M==9 and D>30) or (M==10 and D>31) or (M==11 and D>30) or (M==12 and D>31)):
print "Invalid date input."
return None
C = 2 - (Y/100) + (Y/400)
E = 365.25*(Y+4716)
F = 30.6001*(M+1)
julian_date= int(round(C+D+E+F-1524.5))
return julian_date
def date_split(input_date):
#date must be in YY-MM-DD format
date=input_date.split('-')
try:
Y=int(date[0])
M=int(date[1])
D=int(date[2])
except Exception:
print "Invalid date input."
return None
if M<1 or M>12:
print "Invalid date input."
return None
if D<1 or ((M==1 and D>31) or (M==2 and D>29) or (M==3 and D>31) or (M==4and D>30) or (M==5 and D>31) or (M==6 and D>31) or (M==7 and D>31) or (M==8 and D>31) or (M==9 and D>30) or (M==10 and D>31) or (M==11 and D>30) or (M==12 and D>31)):
print "Invalid date input."
return None
return (Y,M,D)
def useless_function():
i=0
while i<10000:
# time.sleep(0.01)
print "\r%d"%i, # \r is the carraige return character.
# \r moves the cursor to the beginning of the line and then keeps outputting characters as normal.
i+=1
if random.randrange(100000) == 0:
break
if i==9999:
i=0
return i
def num_to_words(num):
if num>=pow(10,12):
return str(float(num)/pow(10,12))+" trillion"
elif num>=pow(10,6):
return str(float(num)/pow(10,9))+" million"
elif num>=pow(10,3):
return str(float(num)/pow(10,3))+" thousand"
# def google_search_redirect(random_text): #throws Google off the scent
# print "\tRedirect...."
# try:
# search_query=article_supersplit(random_text)
# search_query=search_query[random.randrange(0, len(search_query))]
# search_query=search_query[random.randrange(0, len(search_query))]
# search_query=remove_empty_from_list(search_query)
# search_query=search_query[:random.randrange(3,4+len(search_query)%6)]
# search_query=' '.join(search_query)
# except Exception:
# search_query=random_text[:10]
# search_query=re.sub("\.","",search_query)
# search_query=re.sub(",","",search_query)
# search_query=re.sub("`","",search_query)
# google_search_results(search_query=search_query, number_of_results=5)
# def google_search_results(search_query, wait=40, number_of_results=10, encode=True):
# ''' DO NOT MESS WITH THIS IT IS PERFECT FOR NOW'''
# # gets AT LEAST number_of_results results
# # don't query too fast or Google will ban your IP
# # for this purpose, I have added the variable max_result_size
# print search_query
# try:
# max_result_size=10 #don't change it from this: the standard of 10 seems the least suspicious to google
# gs = GoogleSearch(search_query, random_agent=True) # does not actually search
# gs.results_per_page = max_result_size
# gs.page=0
# times_tried=0
# results=[]
# prev=0
# # print "getting results:"
# while len(results) < number_of_results:
# prev=len(results)
# times_tried+=1
# time.sleep(random.uniform(0.5*wait, 1.5*wait))
# results+=gs.get_results() # Actual search and extraction of results.
# print "\rtimes_tried: %s\tlen(results): %s\tpage_number: %s"%(times_tried, len(results), gs.page),
# print "\n"
# # We now have a list of SearchResult objects, called 'results'.
# # A SearchResult object has three attributes -- "title", "desc", and "url".
# # They are Unicode strings, so do a proper encoding before outputting them. (done below)
# if encode:
# for i in range (0, len(results)):
# results[i].title=results[i].title.encode("utf8", "ignore")
# results[i].desc=results[i].desc.encode("utf8", "ignore")
# results[i].url=results[i].url
# # random.shuffle(results)
# except SearchError, e:
# print "Search failed:\t%s" % e
# return results
def data_structure_similarity(a,b):
if len(a) < len(b):
t=a
a=b
b=t
#they must be sets
percent=len(list(set(a) & set(b)))
percent=float(percent)/float(len(set(b)))
percent=percent*100
print "\n%s elements of B are also in A are: "%percent
def string_list_merge(str_list, merge_with=" "):
# merges a list of strings into one string
return merge_with.join(str_list)
# output_string=str_list[0]
# if len(str_list)>1:
# for i in range(1,len(str_list)):
# output_string+=(merge_with+str_list[i])
# return output_string
def try_dict_index(dictionary, index):
try:
return dictionary[index]
except Exception:
print "ERROR on accessing index '%s': No such dictionary index"%(index)
return None
def remove_empty_from_list(pylist):
length=len(pylist)
if length==0:
return None
removed=0
i=0
while i < length-removed:
# print "i=%s, pylist[i]=%s, len(pylist)=%s, removed=%s"%(i,pylist[i],len(pylist),removed)
if pylist[i]==None or pylist[i]=="" or pylist[i]==[] or pylist[i]=={} or pylist[i]==():
del pylist[i]
removed+=1
i-=1
i+=1
return pylist
#not necessary...reference pylist is a reference to an object.
#Since pylist is a mutable object, changes are saved
#but I kept doing list=remove_empty_from_list(list)
def remove_empty_from_dict(pydict):
new_dict={}
for i in pydict:
if pydict[i]!=None and pydict[i]!="" and pydict[i]!=[] and pydict[i]!={} and pydict[i]!=():
new_dict[i]=pydict[i]
return new_dict
def extract_website(url):
website=""
if "http://" in url or "https://" in url:
website= url.split('/')[2]
else: website= url.split('/')[0]
if "www." in website:
website = website.split("www.")[1]
return website
def extract_links(article): #extracts links from HTML code
article_soup=BeautifulSoup(article)
link_dict={}
for link_tag in article_soup.find_all('a'):
link_dict[link_tag.contents[0].strip()]=link_tag.get('href')
# this makes a dict of the form {'article_heading':'article_hyperlink'}
return link_dict
# def properly_encode(article, input_encoding="UTF-8"):
# article=article.decode(input_encoding)
# article = article.replace(u"\u2022", "*")
# # article = article.replace(u"\u2022", "*")
# # print article.encode('utf-8')
# article=unicodedata.normalize('NFKD', article).encode('ascii','ignore')
# # article = article.encode('unicode_escape')
# # article=article.encode('ascii')
# # print article
# return article
def shorten_whitespace(str):
# removes streches of whitespace
# appearing before, after and inside a string,
# replacing it with two newline characters.
str=str.strip()
return re.sub("([ ]*(\\n)+[ ]*)+","\\n\\n",str)
def remove_HTML_tags(str): #removes html tags completely from a string, not their contents
str=re.sub("<br>","\n", str)
return re.sub("<.*?>","", str)
def remove_HTML(str, tag, attributes=""): #removed everything inside all occurences of that html tag
regex='''(<%s[ ]*%s.*?>)(\\n*.*?)*?(</%s>)'''%(tag,attributes,tag)
# print regex
return re.sub(regex,"",str)
def remove_HTML_except(str, tag): #removed everything inside all html tags, except a particular HTML tag
return re.sub('''(<%s.*?>)(\\n.*?)*?(</%s>)'''%(tag,tag),"",str)
def remove_php_from_HTML(str): #removes php code completely from a string
return re.sub("<?php.*?>","", str)
def get_charset(page_url): #gets encoding of the
response=None
if 'http://' in page_url.lower():
response =urllib2.urlopen(page_url)
elif 'https://' in page_url.lower():
response=urllib2.urlopen('https://'+page_url.split("https://")[-1])
else:
response =urllib2.urlopen('http://'+page_url)
charset = response.headers.getparam('charset')
return charset
def get_html(page_url):
#get_html: because typing a few lines of code is way too hard
response=None
if 'http://' in page_url.lower():
response =urllib2.urlopen(page_url)
elif 'https://' in page_url.lower():
response=urllib2.urlopen('https://'+page_url.split("https://")[-1])
else:
response =urllib2.urlopen('http://'+page_url)
html = response.read()
return html
class HTML_page_Obj:
def __init__(self, page_url):
response=None
self.url=None
try:
response=self.get_response_object(page_url)
except Exception:
try:
# time.sleep(1)
response=self.get_response_object(page_url)
except Exception:
try:
# time.sleep(5)
response=self.get_response_object(page_url)
except Exception:
print "\nTried 3 times. Cannot access url: %s. \nHence, cannot make HTML_page_Obj"%page_url
if response != None:
self.url=page_url
try:
self.short_url=response.headers.getparam('Link')
except Exception:
self.short_url=""
self.charset = response.headers.getparam('charset')
self.headers={'charset':self.charset}
for i in response.headers:
self.headers[i]=response.headers[i].split(';')[0]
self.html = response.read()
def all_hyperlinks(self):
article_soup=BeautifulSoup(self.html)
self.link_dict={}
for link_tag in article_soup.find_all('a'):
self.link_dict[link_tag.contents[0].strip()]=link_tag.get('href')
# this makes a dict of the form {'article_heading':'article_hyperlink'}
return self.link_dict
def make_soup(self):
html_soup= BeautifulSoup(self.html)
self.html=html_soup.prettify(encoding=self.charset)
return html_soup
def get_response_object(self, page_url):
if 'http://' in page_url.lower():
response =urllib2.urlopen(page_url)
elif 'https://' in page_url.lower():
response=urllib2.urlopen('https://'+page_url.split("https://")[-1])
else:
response =urllib2.urlopen('http://'+page_url)
return response
def bytes_to_other(bytes):
KB=pow(2,10)
# print KB
if bytes < KB:
return str(bytes)+" Bytes"
MB = pow(2,20)
# print MB
if bytes>KB and bytes<MB:
return str(bytes/KB)+" KB"
GB = pow(2,30)
# print GB
if bytes>MB and bytes<GB:
return str(bytes/MB)+" MB"
if bytes>GB:
return str(bytes/GB)+" GB"
def get_file(url, folderpath="./", block_sz=8192, confirm=False, printing=True):
#does not work on html files
if printing:
print "Attempting to download from URL : %s"%url
file_name = make_filename(url.split('/')[-1] )#get the last thing seperated by a '/'
u = urllib2.urlopen(url)
folderpath=rectify_folder_path(folderpath)
fileout_path=folderpath+file_name
# print "\n\nfileout_path: %s\n\n"%fileout_path
try:
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
if (confirm):
print "File size is %s , do you want to continue?"%bytes_to_other(file_size)
y_or_n= raw_input("\nEnter y or n\n\t>")
if y_or_n.lower() != 'y':
exit(0)
if printing:
print "Downloading: %s\nBytes: %s" % (file_name, file_size)
print "Writing to: %s"%fileout_path
f = open(fileout_path, 'wb')
file_size_dl = 0
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
if printing:
print status,
f.close()
except Exception:
f = open(fileout_path, 'wb')
f.write(u.read())
f.close()
if printing:
print "Done downloading : %s"%url
if printing:
print ""
return fileout_path
def properly_format(article):
i=0
article=article.strip()
# print article, "\n\n\n\n\n\n\n\n\n\n\n"
length = len(article)
output_article = ""#+"\t"
while i<length:
if article[i]==";" and article[i+1]=='\n':
i+=1
continue
elif article[i]=='\n':
if output_article[len(output_article)-1]=='\n':
i+=1
continue
else: output_article+='\n'
elif article[i]=='\t': #this does not seem to be working
if output_article[-1]=='\t':
i+=1;
continue
# elif output_article[-1]=='\n':
# output_article+='\t'
# output_article=output_article
elif article[i]==" ":
if output_article[len(output_article)-1]=='\n' or output_article[len(output_article)-1]=='\t':
i+=1
continue
else: output_article+=article[i]
else: output_article+=article[i]
i+=1
output_article= re.sub("&","&",output_article)
output_article= re.sub(">",">",output_article)
return output_article
def article_supersplit(article):
article=properly_format(article)
'''
This function splits a "properly_format"ed article,
and returns the variable 'text'.
'text' is structured as:
a list of paragraphs,
where each paragraph is a list of sentences,
where each sentence is a list of words, punctuations as seperate words.
'''
text=article.split("\n") #get paragraphs
text = remove_empty_from_list(text)
for i in range(0,len(text)):
text[i]=text[i].split(". ") #get sentences
text[i]=remove_empty_from_list(text[i])
for j in range(0,len(text[i])):
try:
# print "\ntrying NLTK"
text[i][j]=nltk.word_tokenize(text[i][j])
# print "\nNLTK success"
except Exception:
# print "\n\nNLTK failed. Going for backup..."
text[i][j]=text[i][j].split(" ") #get words
text[i][j]+="."
for k in range(0,len(text[i][j])):
text[i][j][k]=re.sub(",", "", text[i][j][k])
text[i][j][k]=re.sub(";", "", text[i][j][k])
text[i][j][k]=re.sub("\(", "", text[i][j][k])
text[i][j][k]=re.sub("\)", "", text[i][j][k])
text[i][j][k]=re.sub("\[", "", text[i][j][k])
text[i][j][k]=re.sub("\]", "", text[i][j][k])
text[i][j][k]=re.sub("\{", "", text[i][j][k])
text[i][j][k]=re.sub("\}", "", text[i][j][k])
if text[i][-1][-2][-1] == ".":
print text[i][-1]
text[i][-1][-2]=re.sub(".*", text[i][-1][-2][:-1], text[i][-1][-2])
# print "\nreplaced: %s\n\n\n"%text[i][-1]
finally:
text[i][j]=remove_empty_from_list(text[i][j])
return text
|
"""
关于八皇后问题的一些解法,包括回溯法之类的方法。
八皇后问题其实是一种十分经典的问题,简单来说在8×8的国际象棋棋盘里边,如何摆放8个皇后,用到递归或者动态规划法之类的,是值得研究的问题。
详细介绍可以看看这一篇文章,介绍的十分详细。
https://www.cnblogs.com/franknihao/p/9416145.html
"""
# 方法一,暴力迭代法。
def checkPos(positionArray):
for i in range(len(positionArray)):
for j in range(len(positionArray)):
if i != j:
if positionArray[i][1] == positionArray[j][1]:
return False
elif abs(positionArray[i][0] - positionArray[j][0]) == abs(positionArray[i][1] - positionArray[j][1]):
return False
return True
def eightQueen1():
"""
通过暴力迭代,总共有8的8次方个时间复杂度,反正就是很复杂,虽然以计算机的算力也不算什么,但是确实还是有点意思。
:return:
"""
counter = 0
for a in range(8):
for b in range(8):
for c in range(8):
for d in range(8):
for e in range(8):
for f in range(8):
for g in range(8):
for h in range(8):
positionArray = [[0, a], [1, b], [2, c], [3, d], [4, e], [5, f], [6, g], [7, h]]
if checkPos(positionArray):
counter += 1
print(counter)
print(positionArray)
# print(counter1)
# print(counter)
# 方法二,在暴力迭代中去去除那些没有前途的状态,可以节约时间,其实就相当于回溯法。
def eightQueen2():
"""
通过暴力迭代,总共有8的8次方个时间复杂度,反正就是很复杂,虽然以计算机的算力也不算什么,但是确实还是有点意思。
:return:
"""
counter = 0
positionArray = []
for a in range(8):
positionArray.append([0, a])
if not checkPos(positionArray):
positionArray.pop()
continue
for b in range(8):
positionArray.append([1, b])
if not checkPos(positionArray):
positionArray.pop()
continue
for c in range(8):
positionArray.append([2, c])
if not checkPos(positionArray):
positionArray.pop()
continue
for d in range(8):
positionArray.append([3, d])
if not checkPos(positionArray):
positionArray.pop()
continue
for e in range(8):
positionArray.append([4, e])
if not checkPos(positionArray):
positionArray.pop()
continue
for f in range(8):
positionArray.append([5, f])
if not checkPos(positionArray):
positionArray.pop()
continue
for g in range(8):
positionArray.append([6, g])
if not checkPos(positionArray):
positionArray.pop()
continue
for h in range(8):
positionArray.append([7, h])
if not checkPos(positionArray):
positionArray.pop()
continue
else:
counter += 1
print(counter)
print(positionArray)
positionArray.pop()
# print(counter1)
positionArray.pop()
positionArray.pop()
positionArray.pop()
positionArray.pop()
positionArray.pop()
positionArray.pop()
positionArray.pop()
# 方法三,上边的代码可读性比较强,但是有很多重复的代码,虽然表达了回溯的想法,但是代码量太大,并且只能针对八皇后问题,相当于将此类问题写死了,所以下边我们整理一版代码,递归回溯方法。
def checkBoard(board, row, colume):
for i in range(row):
if abs(colume - board[i]) in (0, abs(row - i)):
return False
return True
def printBoard(board):
'''为了更友好地展示结果 方便观察'''
import sys
for i, col in enumerate(board):
sys.stdout.write('□ ' * col + '■ ' + '□ ' * (len(board) - 1 - col))
print("")
def eightQueen(board, row):
"""
在进入递归时,应当传入的递归参数显然是期盼状态以及所应当到达的位置
:param board: 期盼状态
:param row: 应当摆放的棋子行数
:return:
"""
# 如果应当摆放的位置是第9行,那么说明摆放成功。
if row == 8:
print(board)
printBoard(board)
# return True
for colume in range(8):
if checkBoard(board, row, colume):
board[row] = colume
eightQueen(board, row + 1)
return False
if __name__ == '__main__':
state = [-1]*8
print(state)
eightQueen(state,0)
# positionArray = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
# res = checkPos(positionArray)
# print(res)
|
"""PreFilter Policies Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
import warnings
class PreFilterPolicies(APIClassTemplate):
"""The PreFilterPolicies Object in the FMC."""
VALID_JSON_DATA = ["id", "name", "type", "description", "defaultAction"]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/policy/prefilterpolicies"
VALID_CHARACTERS_FOR_NAME = """[.\w\d_\- ]"""
REQUIRED_FOR_POST = ["name"]
DEFAULT_ACTION_OPTIONS = ["ANALYZE_TUNNELS", "BOCK_TUNNELS"]
FIRST_SUPPORTED_FMC_VERSION = "6.5"
def __init__(self, fmc, **kwargs):
"""
Initialize PreFilterPolicies object.
Set self.type to "PreFilterPolicy" and parse the kwargs.
:param fmc (object): FMC object
:param **kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for PreFilterPolicies class.")
self.parse_kwargs(**kwargs)
self.type = "PreFilterPolicy"
self._defaultAction = None
self.defaultAction = "ANALYZE_TUNNELS"
@property
def defaultAction(self):
"""
Getter for defaultAction.
:return: {"type": "PrefilterPolicyDefaultAction", "action": self._defaultAction}
"""
return {"type": "PrefilterPolicyDefaultAction", "action": self._defaultAction}
@defaultAction.setter
def defaultAction(self, action):
"""Setter for defaultAction."""
if action in self.DEFAULT_ACTION_OPTIONS:
self._defaultAction = action
else:
logging.error(
f"action, {action}, is not a valid option. Choose from {self.DEFAULT_ACTION_OPTIONS}."
)
def format_data(self):
"""
Gather all the data in preparation for sending to API in JSON format.
:param filter_query: (str) 'all' or 'kwargs'
:return: (dict) json_data
"""
json_data = super().format_data()
logging.debug("In format_data() for AccessPolicies class.")
json_data["defaultAction"] = self.defaultAction
return json_data
def put(self):
"""PUT method for API for PreFilterPolicies not supported."""
logging.info("PUT method for API for PreFilterPolicies not supported.")
pass
class PreFilterPolicy(PreFilterPolicies):
"""
Dispose of this Class after 20210101.
Use PreFilterPolicies() instead.
"""
def __init__(self, fmc, **kwargs):
warnings.resetwarnings()
warnings.warn(
"Deprecated: PreFilterPolicy() should be called via PreFilterPolicies()."
)
super().__init__(fmc, **kwargs)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import WPRPCCommand
class Command(WPRPCCommand):
help = 'Get list of blogs on a wordpress installation.'
def handle(self, url=None, **optz):
server, user, password = self.parse_rpc_endpoint(url, **optz)
self.print_data(server.wp.getUsersBlogs(user, password), header='Blog data')
|
import random
import networkx as nx
import copy
from paras import TIME_THRE, SIMU_THRE
import time
from Timers import Timer
from paras import A1, A2, A3, A4
from paras import C
import numpy as np
from math import log
class mcts(object):
# 如果考虑linking,有args,为entity_mention, mention_entity
def __init__(self, state, actions, unused_rules, if_linking, thre_type, p1, *args) -> None:
self.if_linking = if_linking
self.thre_type = thre_type
if if_linking:
self.sample = state[0]
self.linking_graphs = state[1]
self.entity_mention = args[0]
self.mention_entity = args[1]
self.create_prob = args[2]
else:
self.sample = state
# 这两个在之后模拟过程中会被深拷贝然后更新
self.actions = actions
self.unused_rules = unused_rules
# 需要现在进行深拷贝,之后select的时候会直接更新
self.unsimulated = copy.deepcopy(actions)
self.UCBs = dict()
self.rewards = dict()
self.p1 = p1
self.rewards = dict()
self.N = 0
def select(self) -> tuple:
if len(self.unsimulated) != 0:
action = random.choice(self.unsimulated)
self.unsimulated.remove(action)
else:
action = max(self.UCBs, key=self.UCBs.get)
return action
def generate_label(self):
rand = random.random()
#print(self.p1)
if rand < self.p1:
return True
else:
return False
def generate_note(self, mention):
#print(self.mention_entity)
#print(self.entity_mention)
entity = self.mention_entity[mention]
linking_graph = self.linking_graphs[entity]
notes = list(nx.get_node_attributes(linking_graph, 'note').values())
notes = [n for n in notes if n != None]
if len(notes) == 0:
return str(time.time())
rand = random.random()
# 新建实体
if rand < self.create_prob:
# 用时间戳随机生成note
return str(time.time())
# 实体归类,等概率随机选一个有note的mention,返回它的note
else:
note = random.choice(notes)
return note
def if_continue(self, t) -> bool:
if self.thre_type == 'time':
return t < TIME_THRE
elif self.thre_type == 'simu':
return self.N < SIMU_THRE
else:
print('Invalid threshold type! (not time or simu)')
def simulate(self, action) -> None:
timer = Timer(A1, A2, A3, A4)
if self.if_linking:
state = (self.sample, self.linking_graphs)
helper = simulationHelper(self.if_linking, state, self.actions, self.unused_rules, self.entity_mention, self.mention_entity)
else:
state = self.sample
helper = simulationHelper(self.if_linking, state, self.actions, self.unused_rules)
# 先根据给定的action走一步
if len(action) == 2:
note = self.generate_note(action)
helper.update_entity(action, note)
elif len(action) == 3:
label = self.generate_label()
helper.update_triple(action, label)
else:
print('Invalid action!')
if action in self.unsimulated:
self.unsimulated.remove(action)
while not helper.check_finish():
action_selected = helper.select_one()
#print(action_selected)
# return: ('triple', triple, (origin, doc_id, sentence)) or ('mention', mention, (doc_id, sentence, entity_linking_graph))
t = action_selected[0]
information = action_selected[2]
if t == 'triple':
triple = action_selected[1]
label = self.generate_label()
helper.update_triple(triple, label)
# update timer
doc_id = information[1]
timer.update(t, doc_id)
#print(timer.triple_num)
else:
mention = action_selected[1]
note = self.generate_note(mention)
helper.update_entity(mention, note)
# update timer
doc_id = information[0]
if_new = helper.if_new_note(mention, note)
timer.update(t, doc_id, if_new)
helper.infer()
# -total_cost就是reward,耗时越少reward越大
reward = (-1) * timer.total_cost()
if action not in self.rewards:
self.rewards[action] = []
self.rewards[action].append(reward)
self.N += 1
# UCB = \bar{R} + C * \sqrt{\frac{2\ln(n)}{n(a)}}
def update(self, action) -> None:
#print(self.rewards)
UCB = np.mean(self.rewards[action]) + C * (2 * log(self.N) / len(self.rewards[action])) ** 0.5
self.UCBs[action] = UCB
def run(self) -> tuple:
simu_time = 0
start = time.time()
t = time.time() - start
while self.if_continue(t):
action = self.select()
self.simulate(action)
self.update(action)
t = time.time() - start
# 动作选择标准:UCB最大的
return max(self.UCBs, key=self.UCBs.get)
class simulationHelper(object):
# 如果考虑linking,有args,为entity_mention, mention_entity
def __init__(self, if_linking, state, actions, unused_rules, *args) -> None:
self.if_linking = if_linking
if self.if_linking:
self.sample = copy.deepcopy(state[0])
self.linking_graphs = copy.deepcopy(state[1])
else:
self.sample = copy.deepcopy(state)
self.actions = copy.deepcopy(actions)
self.unused_rules = copy.deepcopy(unused_rules)
if self.if_linking:
self.entity_mention = args[0]
self.mention_entity = args[1]
# MCTS中下一个任务完全随机选择
# select_one会返回额外信息!
def select_one(self) -> tuple:
# 选到的可能是mention也可能是triple
#print('action:', self.actions)
#print('label situation:', dict(nx.get_node_attributes(self.sample, 'label')))
action = random.choice(self.actions)
# triple
if len(action) == 3:
doc_id = self.sample.nodes[action]['doc_id']
sent = self.sample.nodes[action]['sentence']
return ('triple', action, (self.sample.nodes[action]['origin'], doc_id, sent))
# mention
if len(action) == 2:
entity = self.mention_entity[action]
linking_graph = self.linking_graphs[entity]
doc_id = linking_graph.nodes[action]['doc_id']
sent = linking_graph.nodes[action]['sentence']
return ('mention', action, (doc_id, sent, linking_graph))
raise Exception('Invalid action (not mention or triple)')
def update_triple(self, triple, label) -> None:
nx.set_node_attributes(self.sample, {triple: {'label': label}})
# 更新self.actions
self.actions.remove(triple)
def update_entity(self, mention, note) -> None:
entity = self.mention_entity[mention]
graph = self.linking_graphs[entity]
# 更新当前mention的note
nx.set_node_attributes(graph, {mention: {'note': note}})
# 更新self.actions
self.actions.remove(mention)
neighbors = list(graph.neighbors(mention))
#print('begin update!')
#print('linking graph situation:', dict(nx.get_node_attributes(graph, 'note')))
#print('linking graph edge labels:', dict(nx.get_edge_attributes(graph, 'label')))
for n in neighbors:
# 看是否能更新边
#print('1:', graph.nodes[n]['note'])
#print('2:', graph.edges[(n, mention)])
if graph.nodes[n]['note'] != None:
if graph.edges[(n, mention)]['label'] == None:
#print('True update!')
label = (graph.nodes[n]['note'] == note)
attr = {'label': label}
nx.set_edge_attributes(graph, {(n, mention): attr})
# 如果更新了,再更新self.sample
# self.sample中的linking点中mention是要考虑顺序的,都按(min, max)的顺序
linking_node = (min(n, mention), max(n, mention))
#print('linking node:', linking_node)
nx.set_node_attributes(self.sample, {linking_node: attr})
# 看有label的边能不能更新没有note的别的mention,如果边的label是True则可以更新note
if graph.edges[(n, mention)] == True:
if graph.nodes[n]['note'] == None:
attr = {'note': note}
nx.set_node_attributes(graph, {n: attr})
# 更新self.actions
self.actions.remove(n)
def update_linking(self, linking, label) -> None:
mention1, mention2 = linking
entity = self.mention_entity[mention1]
linking_graph = self.linking_graphs[entity]
# 更新self.sample上linking node的label
self.sample.nodes[linking]['label'] = label
# 更新linking graph上的边
linking_graph.edges[linking]['label'] = label
# 检查能否更新mention
if label == True:
note1 = linking_graph.nodes[mention1]['note']
note2 = linking_graph.nodes[mention2]['note']
if note1 != None and note2 == None:
self.update_entity(mention2, note1)
#linking_graph.nodes[mention2]['note'] = note1
#self.actions.remove(mention2)
elif note2 != None and note1 == None:
self.update_entity(mention1, note2)
#linking_graph.nodes[mention1]['note'] = note2
#self.actions.remove(mention1)
def update_graph(self, graph):
def update_one_round(graph):
if_update = False
nodes = list(graph.nodes)
node_notes = nx.get_node_attributes(graph, 'note')
# nodes
for node in nodes:
note = node_notes[node]
if note != None:
neighbors = list(graph.neighbors(node))
for n in neighbors:
if graph.edges[(node, n)] == True and node_notes[n] == None:
if_update = True
graph.nodes[n]['note'] = note
# remove from self.actions (not update)
for node in nodes:
note = node_notes[node]
if note != None and node in self.actions:
self.actions.remove(node)
continue
if node in self.actions:
neighbors = nx.neighbors(graph, node)
labels = [graph.edges[(node, n)]['label'] for n in neighbors]
if not None in labels:
self.actions.remove(node)
# edges
#edge_labels = nx.get_edge_attributes(graph, 'label')
for edge in graph.edges:
label = graph.edges[edge]['label']
if not label == None:
continue
node1, node2 = edge
note1 = node_notes[node1]
note2 = node_notes[node2]
if note1 == None or note2 == None:
continue
else:
if_update = True
graph.edges[edge]['label'] = bool(note1 == note2)
# edge-inference
for edge in graph.edges:
now_edge = (min(edge), max(edge))
label = graph.edges[edge]['label']
if not label == None:
continue
m = edge[0]
entity = self.mention_entity[m]
entity_tris = self.tris[entity]
#print(self.tris)
#print(self.tris[entity])
if now_edge not in entity_tris:
continue
tri_list = entity_tris[now_edge]
for tri in tri_list:
ls = []
for e in tri:
if e == now_edge:
continue
else:
#print('edge labels:', dict(edge_labels))
l = graph.edges[e]['label']
if l == None:
continue
else:
ls.append(l)
if len(ls) == 2:
if True in ls:
if_update = True
ls.remove(True)
graph.edges[now_edge]['label'] = ls[0]
return if_update
while update_one_round(graph):
pass
def synch_graph2sample(self, graph):
label_dict = dict(nx.get_edge_attributes(graph, 'label'))
#print(label_dict)
for edge in label_dict.keys():
label = label_dict[edge]
if label != None:
pair = (min(edge), max(edge))
#print(self.sample.nodes)
# graph上有的边,sample上未必有
if pair in self.sample.nodes:
self.sample.nodes[pair]['label'] = label
def synch_sample2graphs(self, linking_nodes_dict):
graphs = []
for linking_node in linking_nodes_dict.keys():
label = linking_nodes_dict[linking_node]
m1, m2 = linking_node
e = self.mention_entity[m1]
graph = self.linking_graphs[e]
graph.edges[linking_node]['label'] = label
graphs.append(graph)
graphs = list(set(graph))
for graph in graphs:
self.update_graph(graph)
self.synch_graph2sample(graph)
""" def infer(self) -> None:
# 循环推理至没有新的可推理处
if self.if_linking:
while self.infer_linking():
pass
else:
while self.infer_simple():
pass """
def infer(self) -> None:
# 循环推理至没有新的可推理处
if self.if_linking:
result = self.infer_linking()
while result != False:
if len(result) != 0:
self.synch_sample2graphs(result)
result = self.infer_linking()
else:
#print('begin inferring')
#print(self.infer_simple())
while self.infer_simple():
pass
# 简单推理,只在self.sample上推理且self.sample上只有triple node,没有linking node
def infer_simple(self) -> bool:
possible_rules = []
triples = list(nx.get_node_attributes(self.sample, 'triple').keys())
labeled_triples = list(set(triples).difference(set(self.actions)))
for t in labeled_triples:
rs = list(self.sample.successors(t))
possible_rules += list(set(rs).intersection(set(self.unused_rules)))
if len(possible_rules) == 0:
return False
else:
if_infer = False
for r in possible_rules:
# 先检查下规则是否真的没被使用
if r not in self.unused_rules:
continue
conds = list(self.sample.predecessors(r))
if_labeled = [t not in self.actions for t in conds]
# 如果有没标注的就无法使用这个规则
if False in if_labeled:
continue
else:
self.sample.nodes[r]['state'] = 'used'
self.unused_rules.remove(r)
can_infer = True
# 这个规则的结论情况
conc = list(self.sample.successors(r))[0]
conc_label = self.sample.edges[(r, conc)]['conclusion']
conc_node_label = self.sample.nodes[conc]['label']
# 检查入节点label是否满足规则条件
for cond in conds:
cond_label = self.sample.nodes[cond]['label']
rule_label = self.sample.edges[(cond, r)]['condition']
# 还要看结论是否已经被标注了
if cond_label != rule_label or conc_node_label != None:
can_infer = False
break
if can_infer:
if_infer = True
self.update_triple(conc, conc_label)
rs = list(self.sample.successors(conc))
possible_rules += list(set(rs).intersection(set(self.unused_rules)))
else:
continue
return if_infer
def infer_linking(self) -> bool:
# self.sample上的推理
possible_rules = []
result = dict()
label_dict = nx.get_node_attributes(self.sample, 'label')
labeled_nodes = [n for n in label_dict.keys() if label_dict[n] != None]
for t in labeled_nodes:
rs = list(self.sample.successors(t))
possible_rules += list(set(rs).intersection(set(self.unused_rules)))
if len(possible_rules) == 0:
return False
else:
if_infer = False
for r in possible_rules:
# 先检查下规则是否真的没被使用
if r not in self.unused_rules:
continue
conds = list(self.sample.predecessors(r))
if_labeled = [t not in self.actions for t in conds]
# 如果有没标注的就无法使用这个规则
if False in if_labeled:
continue
else:
self.sample.nodes[r]['state'] = 'used'
self.unused_rules.remove(r)
# 检查这个rule能不能用
can_infer = True
# 这个规则的结论情况
conc = list(self.sample.successors(r))[0]
conc_label = self.sample.edges[(r, conc)]['conclusion']
conc_node_label = self.sample.nodes[conc]['label']
# 检查入节点label是否满足规则条件
for cond in conds:
cond_label = self.sample.nodes[cond]['label']
rule_label = self.sample.edges[(cond, r)]['condition']
if cond_label != rule_label or conc_node_label != None:
can_infer = False
break
if can_infer:
if_infer = True
# 如果结论是triple node
if len(conc) == 3:
self.update_triple(conc, conc_label)
elif len(conc) == 2:
# 如果结论是linking node
#self.update_linking(conc, conc_label)
result[conc] = conc_label
self.sample.nodes[conc]['label'] = conc_label
else:
print('invalid conclusion!')
rs = list(self.sample.successors(conc))
possible_rules += list(set(rs).intersection(set(self.unused_rules)))
else:
continue
if if_infer == False:
return False
else:
return result
""" def check_finish(self) -> bool:
labels = list(nx.get_node_attributes(self.sample, 'label').values())
return not (None in labels) """
def check_finish(self) -> bool:
labels = list(nx.get_node_attributes(self.sample, 'label').values())
#print(nx.get_node_attributes(self.sample, 'label'))
if not self.if_linking:
return not (None in labels)
else:
if_finish = True
for entity in self.linking_graphs.keys():
graph = self.linking_graphs[entity]
ls = list(nx.get_edge_attributes(graph, 'label').values())
if None in ls:
if_finish = False
break
return (not (None in labels)) and if_finish
def output(self) -> tuple:
if self.if_linking:
return (self.sample, self.linking_graphs)
else:
return (self.sample,)
def if_new_note(self, mention, note):
entity = self.mention_entity[mention]
linking_graph = self.linking_graphs[entity]
notes = set(nx.get_node_attributes(linking_graph, 'note').values())
return note not in notes |
from lib.Database import Database
async def main(args):
if len(args) == 0: print("What you want to do? [reset]")
else:
if args[0] == "reset": await database_reset()
async def database_reset():
db = Database()
await db.reset() |
from Notes.forms import SearchBarForm
def SearchBarContext(request):
"""
Produces a context variable for the searchbar that's available across
all pages.
:param request:
:return:
"""
return {'searchbar':SearchBarForm()}
def SetCurrentCourses(request):
"""
Produces a context variable for all a user's current courses that's
available across all pages
:param request:
:return:
"""
user = request.user
try:
current_term = user.terms.all().filter(user=user, current = True)[0]
current_courses = current_term.course.all()
except(AttributeError,IndexError):
current_courses = None
return {'current_course': current_courses} |
from django.db import models
# Create your models here.
class Lecturer(models.Model):
name = models.CharField(max_length=128)
capacity = models.PositiveSmallIntegerField(default=0)
def __str__(self):
return self.name
class Project(models.Model):
name = models.CharField(max_length=128)
description = models.TextField()
capacity = models.PositiveSmallIntegerField(default=0)
supervisor = models.ForeignKey(Lecturer, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Student(models.Model):
name = models.CharField(max_length=128)
preflist = models.CharField(max_length=255,
blank=True,
verbose_name='Project(s) Preflist')
assignment = models.ForeignKey(Project,
null=True,
blank=True,
default=0,
verbose_name='Assigned Project')
def __str__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=128)
representative = models.ForeignKey(Student,
related_name='representative',
on_delete=models.CASCADE)
members = models.ManyToManyField(Student, related_name='members')
preflist = models.CharField(max_length=255,
blank=True,
verbose_name='Project(s) Preflist')
assignment = models.ForeignKey(Project,
null=True,
blank=True,
default=0,
verbose_name='Assigned Project')
def __str__(self):
return self.name
# class GroupPreflist(models.Model):
# group = models.ForeignKey(Group)
# project = models.ForeignKey(Project)
class LecturerPreflist(models.Model):
lecturer = models.ForeignKey(Lecturer)
project = models.ForeignKey(Project)
preflist = models.CharField(max_length=255,
verbose_name='Group(s) Preflist')
matched = models.ManyToManyField(Group,
blank=True,
default=0,
related_name='matched',
verbose_name='Assigned Group')
def __str__(self):
return '%s, %s' % (self.lecturer, self.project)
|
import os
import io
import sys
import errno
import signal
import socket
import logging
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(
os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(
os.path.join(SCRIPT_DIR, PACKAGE_PARENT, PACKAGE_PARENT)))
from src.config import get_config
from src.utils import get_logger, calc_sha3_512_checksum
from src.ztransfer.packets import (ZTConnReqPacket, ZTDataPacket,
ZTAcknowledgementPacket, ZTFinishPacket,
ZTResendPacket, deserialize_packet,
ZT_RAW_DATA_BYTES_SIZE)
from src.ztransfer.errors import (ZTVerificationError, ERR_VERSION_MISMATCH,
ERR_ZTDATA_CHECKSUM, ERR_MAGIC_MISMATCH,
ERR_PTYPE_DNE)
config = get_config()
CREQ_SEQ = 0
DATA_SEQ_FIRST = 1
CREQ_TIMER_DURATION = config["udp"]["client"].get("creq_timer_duration", 1)
RAPID_RECV_TIMER_DURATION = config["udp"]["client"].get("rapid_recv_timer_duraton", 1)
MAX_RESENDS_BEFORE_TIMEOUT = config["udp"]["client"].get("max_resends_before_timeout", 20)
WINDOW_SIZE_START = config["udp"]["client"].get("window_size_start", 100)
WINDOW_SIZE_INC_RAP = config["udp"]["client"].get("window_increase_factor_rapid_start", 2)
WINDOW_SIZE_INC_REG = config["udp"]["client"].get("window_increase_factor_regular", 10)
WINDOW_SIZE_DEC_REG = config["udp"]["client"].get("window_decrease_factor_regular", 2)
WS_RAPID_START_MAX = config["udp"]["client"].get("ws_rapid_start_max", 1000)
class CREQTimeout(Exception):
pass
class RRecvTimeout(Exception):
pass
class ZTransferUDPClient(object):
STATE_INIT = 0
STATE_WAIT_CREQ_ACK = 1
STATE_RAPID_SEND = 2
STATE_RAPID_RECV = 3
STATE_FIN = 4
def __init__(self, server_host: str, server_port: int, port_pool: list, file_name: str, file_stream: io.BytesIO, logger_verbose: bool = False):
self.server_addr = (server_host, server_port)
self.port_pool = port_pool
self.port_occupied = None
self.file_name = file_name
self.file_stream = file_stream
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.acked_packet_seqs = set()
self.all_data_seqs = set()
self.to_send_seqs = set()
self.session_sent_seqs = set()
self.failed_packet_count = 0
self._server_disconnect_ctr = 0
self._acks_got_up_to_timeout = 0
self.old_drop_factor = 100
self.window_size = WINDOW_SIZE_START
self.__in_rapid_start = True
self.__in_first_rtt = True
self._updated_window_size_in_rtt = False
self.buffer_memview = None
self.logger = get_logger("ZTransferUDPClient", logger_verbose)
self.logger.debug(f"Constructed ZTransferUDPClient({server_host}, {server_port}, ...)")
self.logger.debug(f"WINDOW_SIZE: {self.window_size}")
self.logger.debug(f"CREQ_TIMER_DURATION: {CREQ_TIMER_DURATION}")
self.logger.debug(f"RAPID_RECV_TIMER_DURATION: {RAPID_RECV_TIMER_DURATION}")
def _creq_timer_handler(self, *args, **kwargs):
raise CREQTimeout()
def _rrecv_timer_handler(self, *args, **kwargs):
raise RRecvTimeout()
def initiate_transfer(self):
self.buffer_memview = self.file_stream.getbuffer()
file_size_bytes = self.buffer_memview.nbytes
num_data_packets = file_size_bytes // ZT_RAW_DATA_BYTES_SIZE
if file_size_bytes % ZT_RAW_DATA_BYTES_SIZE > 0:
num_data_packets += 1
self.last_data_seq = DATA_SEQ_FIRST + num_data_packets - 1
self.all_data_seqs = set(range(DATA_SEQ_FIRST, DATA_SEQ_FIRST + num_data_packets))
file_checksum = calc_sha3_512_checksum(self.file_stream.getvalue())
state = self.STATE_INIT
self.logger.debug(f"File size: {file_size_bytes} bytes")
self.logger.debug(f"File checksum (SHA3-512): {file_checksum[:10]}...")
self.logger.debug(f"Total {num_data_packets} data packets will be sent")
while True:
try:
if state == self.STATE_INIT:
self.logger.debug(f"State: INIT")
for port in self.port_pool:
try:
self.socket.bind(("0.0.0.0", port))
except socket.error as e:
if e.errno == errno.EADDRINUSE:
continue
else:
self.port_occupied = port
break
if self.port_occupied is None:
self.logger.critical(f"Could not bind to any ports from: {self.port_pool}")
self.clear()
return
self.logger.debug(f"Bound to port: {self.port_occupied}")
self._creq_packet = ZTConnReqPacket(CREQ_SEQ, file_size_bytes,
self.last_data_seq, file_checksum, self.file_name)
try:
self.socket.sendto(self._creq_packet.serialize(), self.server_addr)
except Exception as e:
self.logger.critical(f"Error occured while self.socket.sendto(): {e}")
self.clear()
return
signal.signal(signal.SIGALRM, self._creq_timer_handler)
signal.alarm(CREQ_TIMER_DURATION)
state = self.STATE_WAIT_CREQ_ACK
self.logger.debug(f"Sent CREQ packet and updated state to WAIT_CREQ_ACK")
elif state == self.STATE_WAIT_CREQ_ACK:
self.logger.debug(f"State: WAIT_CREQ_ACK")
recv_data, server_addr = self.socket.recvfrom(1000)
self.logger.debug(f"Received {len(recv_data)} bytes from the server")
if server_addr != self.server_addr:
# Discard packet
self.logger.debug(f"Another server ({server_addr}) sent the packet, discarding.")
continue
if len(recv_data) != 1000:
self.logger.debug(f"Packet probably corrupt (data size != 1000), resending CREQ packet")
# Stop CREQ timer
signal.alarm(0)
try:
self.socket.sendto(self._creq_packet.serialize(), self.server_addr)
except Exception as e:
self.logger.critical(f"Error occured while self.socket.sendto(): {e}")
self.clear()
return
signal.signal(signal.SIGALRM, self._creq_timer_handler)
signal.alarm(CREQ_TIMER_DURATION)
self.logger.debug(f"Sent CREQ packet again and restarted timer")
continue
try:
self.logger.debug(f"Deserializing received packet data...")
packet = deserialize_packet(recv_data)
except ZTVerificationError as e:
# Stop CREQ timer
signal.alarm(0)
try:
self.socket.sendto(self._creq_packet.serialize(), self.server_addr)
except Exception as e:
self.logger.critical(f"Error occured while self.socket.sendto(): {e}")
self.clear()
return
signal.signal(signal.SIGALRM, self._creq_timer_handler)
signal.alarm(CREQ_TIMER_DURATION)
self.logger.debug(f"Sent CREQ packet again and restarted timer")
else:
self.logger.debug(f"Packet OK: {packet.__class__.__name__} ({packet.sequence_number})")
if not isinstance(packet, ZTAcknowledgementPacket):
self.logger.warning(f"Was waiting for ACK, got '{packet.ptype}'")
# Stop CREQ timer
signal.alarm(0)
try:
self.socket.sendto(self._creq_packet.serialize(), self.server_addr)
except Exception as e:
self.logger.critical(f"Error occured while self.socket.sendto(): {e}")
self.clear()
return
signal.signal(signal.SIGALRM, self._creq_timer_handler)
signal.alarm(CREQ_TIMER_DURATION)
self.logger.debug(f"Sent CREQ packet again and restarted timer")
else:
if packet.seq_to_ack != CREQ_SEQ:
# Stop CREQ timer
signal.alarm(0)
try:
self.socket.sendto(self._creq_packet.serialize(), self.server_addr)
except Exception as e:
self.logger.critical(f"Error occured while self.socket.sendto(): {e}")
self.clear()
return
signal.signal(signal.SIGALRM, self._creq_timer_handler)
signal.alarm(CREQ_TIMER_DURATION)
self.logger.debug(f"Sent CREQ packet again and restarted timer")
else:
# Stop CREQ timer
signal.alarm(0)
state = self.STATE_RAPID_SEND
self.logger.debug(f"ACK OK, updated state to RAPID_SEND")
elif state == self.STATE_RAPID_SEND:
self.logger.debug(f"State: RAPID_SEND")
# Ensure timer is stopped
signal.alarm(0)
self.failed_packet_count += len(self.session_sent_seqs - self.acked_packet_seqs)
self.to_send_seqs = self.all_data_seqs - self.acked_packet_seqs
if len(self.to_send_seqs) == 0:
state = self.STATE_FIN
self.logger.debug(f"Got all ACKs, updated state to FIN")
continue
if (not self._updated_window_size_in_rtt) and (not self.__in_first_rtt):
if len(self.session_sent_seqs) > 0:
drop_factor = len(self.session_sent_seqs - self.session_acked_seqs) / len(self.session_sent_seqs)
drop_factor *= 100
if (drop_factor < self.old_drop_factor) or (drop_factor == 0):
self.logger.debug(f"Drop factor ({drop_factor}) is less than old one ({self.old_drop_factor})")
if self.__in_rapid_start:
self.window_size *= WINDOW_SIZE_INC_RAP
else:
self.window_size += WINDOW_SIZE_INC_REG
else:
self.logger.debug(f"Drop factor ({drop_factor}) is greater than old one ({self.old_drop_factor})")
self.window_size = max(WINDOW_SIZE_START, self.window_size / WINDOW_SIZE_DEC_REG)
self.__in_rapid_start = False
self.old_drop_factor = drop_factor
self.session_sent_seqs = set()
self.session_acked_seqs = set()
self.logger.debug(f"RTT window size: {self.window_size}")
_window_ctr = 0
for packet_seq in self.to_send_seqs:
if _window_ctr >= self.window_size:
break
self.file_stream.seek((packet_seq - 1) * ZT_RAW_DATA_BYTES_SIZE)
if packet_seq == self.last_data_seq:
_data_bytes = self.file_stream.read()
else:
_data_bytes = self.file_stream.read(ZT_RAW_DATA_BYTES_SIZE)
data_packet = ZTDataPacket(packet_seq, _data_bytes)
self.logger.debug(f"==> Data pkt #{packet_seq} ({(packet_seq - 1) * ZT_RAW_DATA_BYTES_SIZE}:{(packet_seq - 1) * ZT_RAW_DATA_BYTES_SIZE + len(_data_bytes)})")
try:
self.socket.sendto(data_packet.serialize(), self.server_addr)
except Exception as e:
self.logger.critical(f"Error occured while self.socket.sendto(): {e}")
self.clear()
return
self.session_sent_seqs.add(packet_seq)
_window_ctr += 1
signal.signal(signal.SIGALRM, self._rrecv_timer_handler)
signal.alarm(RAPID_RECV_TIMER_DURATION)
if self.__in_first_rtt:
self.__in_first_rtt = False
self._acks_got_up_to_timeout = 0
self.logger.debug(f"Sent {_window_ctr} data packets and started RAPID_RECV timer")
state = self.STATE_RAPID_RECV
elif state == self.STATE_RAPID_RECV:
self.logger.debug(f"State: RAPID_RECV")
self._updated_window_size_in_rtt = False
if self.acked_packet_seqs == self.all_data_seqs:
signal.alarm(0)
state = self.STATE_FIN
self.logger.debug(f"Got all ACKs, updated state to FIN")
continue
if self.session_sent_seqs.issubset(self.acked_packet_seqs):
self.logger.debug(f"All window packets ACKed, updated state to RAPID_SEND")
self.to_send_seqs = self.all_data_seqs - self.acked_packet_seqs
state = self.STATE_RAPID_SEND
continue
recv_data, server_addr = self.socket.recvfrom(1000)
self.logger.debug(f"Received {len(recv_data)} bytes from the server")
if server_addr != self.server_addr:
# Discard packet
self.logger.debug(f"Another server ({server_addr}) sent the packet, discarding.")
continue
if len(recv_data) != 1000:
self.logger.debug(f"Packet probably corrupt (data size != 1000), dropping packet")
continue
try:
self.logger.debug(f"Deserializing received packet data...")
packet = deserialize_packet(recv_data)
except ZTVerificationError as e:
if e.err_code == ERR_MAGIC_MISMATCH:
self.logger.warning(f"Wrong magic number '{e.extras['magic']}' (seq: {e.extras['seq']}, ptype: {e.extras['ptype']}, ts: {e.extras['ts']})")
if e.err_code == ERR_VERSION_MISMATCH:
self.logger.warning(f"Mismatched version number '{e.extras['version']}' (seq: {e.extras['seq']}, ptype: {e.extras['ptype']}, ts: {e.extras['ts']})")
if e.err_code == ERR_PTYPE_DNE:
self.logger.warning(f"Not known packet type '{e.extras['ptype']}' (seq: {e.extras['seq']}, ts: {e.extras['ts']})")
if e.err_code == ERR_ZTDATA_CHECKSUM:
self.logger.warning(f"Corrupt packet. (seq: {e.extras['seq']}, ptype: {e.extras['ptype']}, ts: {e.extras['ts']})")
continue
self.logger.debug(f"Packet OK: {packet.__class__.__name__} ({packet.sequence_number})")
if isinstance(packet, ZTAcknowledgementPacket):
if DATA_SEQ_FIRST <= packet.seq_to_ack <= (2**32 - 2):
self.acked_packet_seqs.add(packet.seq_to_ack)
self.session_acked_seqs.add(packet.seq_to_ack)
self.logger.debug(f"ACK received for data packet #{packet.seq_to_ack}")
self._acks_got_up_to_timeout += 1
else:
self.logger.debug(f"ACK packet has seq_to_ack out of ranges: {packet.seq_to_ack}, dropped.")
elif isinstance(packet, ZTResendPacket):
if DATA_SEQ_FIRST <= packet.seq_to_rsnd <= (2**32 - 2):
self.acked_packet_seqs.discard(packet.seq_to_rsnd)
self.session_acked_seqs.discard(packet.seq_to_rsnd)
self.logger.debug(f"RSND received for data packet #{packet.seq_to_rsnd}")
self._acks_got_up_to_timeout += 1
else:
self.logger.debug(f"RSND packet has seq_to_rsnd out of ranges: {packet.seq_to_rsnd}, dropped.")
elif isinstance(packet, ZTFinishPacket):
self.logger.debug(f"Received FIN packet, premature finish, updated state to FIN.")
state = self.STATE_FIN
continue
else:
self.logger.warning(f"Was waiting for ACK or RSND, got '{packet.ptype}', discarded.")
pass
elif state == self.STATE_FIN:
self.logger.debug(f"State: FIN")
self.clear()
break
except CREQTimeout:
self.logger.debug(f"Timeout: Hit to CREQTimeout")
try:
self.socket.sendto(self._creq_packet.serialize(), self.server_addr)
except Exception as e:
self.logger.critical(f"Error occured while self.socket.sendto(): {e}")
self.clear()
return
signal.signal(signal.SIGALRM, self._creq_timer_handler)
signal.alarm(CREQ_TIMER_DURATION)
self.logger.debug(f"Sent CREQ packet again and restarted timer")
except RRecvTimeout:
self.logger.debug(f"Timeout: Hit to RRecvTimeout")
if self._acks_got_up_to_timeout == 0:
self._server_disconnect_ctr += 1
else:
self._server_disconnect_ctr = 0
self.logger.debug(f"server_disconnect_ctr: {self._server_disconnect_ctr}")
if self._server_disconnect_ctr >= MAX_RESENDS_BEFORE_TIMEOUT:
# Server is probably down.
self.logger.debug(f"Resent the same window for {self._server_disconnect_ctr} times. The server is probably down. Updated state to FIN.")
state = self.STATE_FIN
else:
# This must not be but for protection anyway
if len(self.session_sent_seqs) > 0:
drop_factor = len(self.session_sent_seqs - self.session_acked_seqs) / len(self.session_sent_seqs)
drop_factor *= 100
if (drop_factor < self.old_drop_factor) or (drop_factor == 0):
self.logger.debug(f"Drop factor ({drop_factor}) is less than old one ({self.old_drop_factor})")
if self.__in_rapid_start:
self.window_size *= WINDOW_SIZE_INC_RAP
else:
self.window_size += WINDOW_SIZE_INC_REG
else:
self.logger.debug(f"Drop factor ({drop_factor}) is greater than old one ({self.old_drop_factor})")
self.window_size = max(WINDOW_SIZE_START, self.window_size / WINDOW_SIZE_DEC_REG)
self.__in_rapid_start = False
self.old_drop_factor = drop_factor
self._updated_window_size_in_rtt = True
self.to_send_seqs = self.all_data_seqs - self.acked_packet_seqs
state = self.STATE_RAPID_SEND
except Exception as e:
self.logger.critical(f"UNEXPECTED: {e}")
self.clear()
return
def clear(self):
self.socket.close()
|
from django.test import TestCase
import pytest
# Create your tests here.
from datetime import datetime, timedelta
from django.contrib.auth.models import User, Group
from snippets.models import Snippet, CoursePage, CourseList
from django.utils import timezone
class SnippetModelTest(TestCase):
@classmethod
def setUpTestData(cls):
user = User.objects.create(username = 'test', password = '123')
#"""Set up non-modified objects used by all test methods."""
Snippet.objects.create(
title = 'ABCDF',
code = 'abcd',
linenos = False,
language = 'python',
style = 'friendly',
perm_list = '1,2',
owner = user,
highlighted = 'oprst'
)
def test_title(self):
snippet = Snippet.objects.get(id=1)
print('SOME', snippet)
field_label = snippet._meta.get_field('title').verbose_name
#self.assertEquals(field_label, 'title')
assert field_label == 'title'
class CoursePageModelTest(TestCase):
@classmethod
def setUpTestData(cls):
user = User.objects.create(username = 'test', password = '123')
snippet = Snippet.objects.create(
title = 'ABCDF',
code = 'abcd',
linenos = False,
language = 'python',
style = 'friendly',
perm_list = '1,2',
owner = user,
highlighted = 'oprst'
)
course = CourseList.objects.create(
created = timezone.now(),
title = 'abcd',
descrpt = 'afajhaja',
owner = user,
date_begin = datetime(2017, 3, 5),
)
#"""Set up non-modified objects used by all test methods."""
CoursePage.objects.create(
course = course,
snippet = snippet,
order = 1,
dtm = datetime(2017, 3, 5),
)
def test_object_name(self):
coursepage = CoursePage.objects.get(id=1)
expected_object_name = '%d %s ID = %d %s ' % (
coursepage.order,
coursepage.snippet.title,
coursepage.snippet.id,
coursepage.dtm,
)
assert expected_object_name == str(coursepage)
@pytest.fixture
def input_data():
print('Solver data generated!')
return ['abc@abx.ab'], 'def'
@pytest.fixture
def input_data_with_result(input_data):
return input_data, ('abc', 'def')
def test_send_mail_note(input_data):
from snippets.management.commands.run_check import send_mail_note
try:
send_mail_note(*input_data)
except TypeError:
pytest.fail('Function must take 2 args')
|
from _typeshed import Incomplete
def gn_graph(
n,
kernel: Incomplete | None = None,
create_using: Incomplete | None = None,
seed: Incomplete | None = None,
): ...
def gnr_graph(
n, p, create_using: Incomplete | None = None, seed: Incomplete | None = None
): ...
def gnc_graph(
n, create_using: Incomplete | None = None, seed: Incomplete | None = None
): ...
def scale_free_graph(
n,
alpha: float = 0.41,
beta: float = 0.54,
gamma: float = 0.05,
delta_in: float = 0.2,
delta_out: int = 0,
create_using: Incomplete | None = None,
seed: Incomplete | None = None,
initial_graph: Incomplete | None = None,
): ...
def random_k_out_graph(
n, k, alpha, self_loops: bool = True, seed: Incomplete | None = None
): ...
|
class CornishPlot:
pass |
#-------------------------------------------------------------------------------
# Name: ANSYS Wrapper Generator GUI
# Owner: Mechanical Solutions Inc.
#
# Author: Kyle Lavoie, Mechanical Solutions Inc.
#
# Created: 5/14/2013
# Copyright: (c) Mechanical Solutions Inc.
#-------------------------------------------------------------------------------
import logging
import os
import subprocess
import sys
import time
import string
import ansysinfo
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import ConfigParser
from PyQt4 import QtCore, QtGui
from ui_ANSYS_Wrapper_Generator_3 import *
import openmdao.gui.filemanager
indent1 = ' '
indent2 = indent1 + indent1
indent3 = indent2 + indent1
indent4 = indent3 + indent1
indent5 = indent4 + indent1
indent6 = indent5 + indent1
triplequote = '"""'
CR = "'\\n'"
class MainDlg(QDialog, Ui_Dialog):
def __init__(self, parent=None, logger_name = None):
super(MainDlg, self).__init__(parent)
self.__index = 0
self.setupUi(self)
# set initial values
self.ansysFileDir.setText('C:/')
self.ansysFileName.setText('file.db')
self.genWrapName.setText('file')
self.ansysVer.setText('v14.5')
self.logger_name = logger_name
@pyqtSignature("")
def on_generateWrap_clicked(self):
#Get Input Values
self.ansysDir= str(self.ansysFileDir.text())
self.ansysName = str(self.ansysFileName.text())
self.wrapName = str(self.genWrapName.text())
self.version = str(self.ansysVer.text())
testdir = self.ansysDir + '/'
#Build TestGen\TestOut directory if it dosent already exist
genfolder = "TestGen"
outfolder = "TestOut"
beginpath = self.ansysDir
firstpath = os.path.join(beginpath, genfolder)
fullpath = os.path.join(firstpath, outfolder)
if not os.path.exists(fullpath):
os.makedirs(fullpath)
#update ANSYS Version
versionNum = ''.join(filter(lambda x : x.isdigit(), self.version))
version = "ANSYS" + versionNum
name = self.wrapName
genfilename = testdir + 'TestGen/' + self.wrapName+ '.py'
dbfile = testdir + self.ansysName
self.wg = WrapperGenerator(name, genfilename, dbfile, ANSYS_VER = version, logger_name = self.logger_name)
self.wg.generate()
@pyqtSignature("")
def on_dirBrowse_clicked(self):
path = QFileDialog.getExistingDirectory(self,
"Make PyQt - Set Path", self.ansysFileDir.text())
if path:
self.ansysFileDir.setText(QDir.toNativeSeparators(path))
@pyqtSignature("")
def on_nameBrowse_clicked(self):
string = QString("")
filter = "All (*)"
qstr = QFileDialog.getOpenFileName(self, QString("Open File"), self.ansysFileDir.text(), QString("ANSYS (*.db)"));
fileName = os.path.basename(str(qstr))
self.ansysFileName.setText(fileName)
class WrapperGenerator:
"""Generates an OpenMDAO Wrapper for ANSYS Structural, based on info either
in a generated component information file, or in the ANSYS db.
To use: initialize, then call generate().
*Parameters*
name: string
Name used to identify the Wrapped Component.
genfilename: string
Full path to filename of the generate Wrapped Component.
dbfile: string (optional)
Full path to name of ANSYS Structural .db file. Default ''. One of dbfile and componentsfile MUST be set.
componentsfile: string (optional)
Full path to name of previously created components file for the ANSYS Structural Model. Default ''.
One of dbfile and componentsfile MUST be set.
ANSYS_VER: string (optional)
ANSYS Version String. Default ANSYS145.
model_file_ext: string (optional)
File extension for the model file. Default py.
logger_name: string(optional)
Name of an existing logging::logger to use, if any. Default None. If None, a logger will be created with an internal name.
initial_values_dictionary: dictionary of string to float (optional)
Values in the original Structural model of the items in ansysinfo globalinputtypes
"""
ok = True
components = {} #empty dictionary of dictionaries of node numbers
prep7 = []
solution = []
post = []
def __init__(self, name, genfilename, dbfile = '', componentsfile = '', ANSYS_VER = 'ANSYS145', model_file_ext = 'py',
logger_name = None, initial_values_dictionary = {'omega_Z':0.0, 'temp_ref':0.0, 'temp_unif':0.0}):
if logger_name == None:
self.logger = logging.getLogger("MSI")
else:
self.logger = logging.getLogger(logger_name)
if dbfile == '' and componentsfile == '':
s = 'AnsysWrapperGenerator for ' + name + ': at least one of dbfile or componentsfile is required'
print 'ERROR: ' + s
self.logger.error(s)
self.ok = False
return
self.genfilename = genfilename
self.dbfile = dbfile
self.componentsfile = componentsfile
self.name = name
self.classname = name + 'Wrapper'
self.ANSYS_VER = ANSYS_VER
self.model_file_ext = model_file_ext
self.initial_values_dictionary = initial_values_dictionary
self.input_names = set([])
self.output_names = set([])
def get_model_file_name(self):
return self.componentsfilebase + '.' + self.model_file_ext
def _gen_componentsfile(self, path):
cfile = ''
try:
ansysdir = os.environ[self.ANSYS_VER + '_DIR']
except KeyError as ke:
s = 'Cannot find ' + self.ANSYS_VER + '_DIR in environment\n\t' + sys.exc_info()[0] + '\n\t' + str(ke)
print 'ERROR: ' + s
self.logger.error(s)
self.ok = False
return cfile
try:
ansysdir2 = os.environ['ANSYS_SYSDIR']
except KeyError as ke:
s = 'Cannot find ANSYS_SYSDIR in environment\n\t' + sys.exc_info()[0] + '\n\t' + str(ke)
print 'ERROR: ' + s
self.logger.error(s)
self.ok = False
return cfile
ansys_exe = os.path.join(ansysdir, 'bin', ansysdir2, self.ANSYS_VER)
dbname, dbext = os.path.splitext(self.dbfile)
if len(dbext): dbext = dbext[1:]
tempstr = 'tmp_' + time.strftime('%Y_%m_%d__%H_%M_%S')
inputfile = tempstr + '_gen_comps.dat'
self.componentsfilebase = 'c_' + tempstr
try:
f = open(inputfile, 'w')
outfile = tempstr + '_ListComps.out'
#import pdb; pdb.set_trace()
f.write('/batch\n')
f.write('/TITLE, List Components of ' + self.name + '\n')
f.write('/prep7\n')
f.write('NNAME = \'NODE\'\n')
f.write('resume,' + dbname + ',' + dbext + '\n')
f.write('CSYS, 1\n')
f.write('/STATUS,UNITS\n')
f.write('*get,units,ACTIVE,0,UNITS\n')
f.write('*get,nComps,COMP,,ncomp\n')
f.write('/STATUS,GLOBAL\n')
f.write('*cfopen,' + self.componentsfilebase + ',' + self.model_file_ext + ',, \n')
f.write('*vwrite\n')
f.write('class FeaModelInPythonFormat:\n')
f.write('*vwrite\n')
f.write(' def __init__(self):\n')
f.write('*vwrite\n')
f.write(' self.nodeLabels = ["number", "x", "y", "z",]\n')
f.write('*vwrite, units\n')
f.write(' self.units = %I\n')
f.write('*vwrite\n')
f.write(' self.coordinateSystem = "Cartesian"\n')
f.write('*vwrite\n')
f.write(' self.smoothingCoordinates = "XYZ"\n')
f.write('*vwrite\n')
f.write(' self.nodeMap = {\n')
f.write('i = 1\n')
f.write('! one DO loop to fill up the node map\n')
f.write('*do,i,1,nComps,1\n')
f.write(' *get,compName,comp,i,name ! get the name of the nth component\n')
f.write(' *get,nType,comp,compName,type ! get the type #\n')
f.write(' ! 1=Nodes, 2=Elements, 6=Keypoints, 7=Lines, 8=Areas, 9=Volumes\n')
f.write(' ! first, print out all of the nodes\n')
f.write(' ! node components\n')
f.write(' *if,nType,eq,1,then\n')
f.write(' allsel\n')
f.write(' cmsel,,compName\n')
f.write(' ! keypoint components\n')
f.write(' *elseif,nType,eq,6,then\n')
f.write(' allsel\n')
f.write(' cmsel,,compName\n')
f.write(' nslk,S\n')
f.write(' *elseif,nType,eq,8,then\n')
f.write(' allsel\n')
f.write(' cmsel,,compName\n')
f.write(' nsla,s,1\n')
f.write(' *endif\n')
f.write(' ! start writing the nodes\n')
f.write(' *get,nCount,node,,count ! Get total number of selected nodes\n')
f.write(' *vwrite, compName\n')
f.write(' "%s" :\n')
f.write(' *vwrite\n')
f.write(' [\n')
f.write(' *dim, nArray, array, nCount,4 ! Create NCOUNT array\n')
f.write(' *vget, nArray(1,1), node, 1, nlist ! Fill NARRAY with node numbers\n')
f.write(' *vget, nArray(1,2), node, 2, loc, X\n')
f.write(' *vget, nArray(1,3), node, 3, loc, Y\n')
f.write(' *vget, nArray(1,4), node, 4, loc, Z\n')
f.write(' *vwrite, nArray(1,1), nArray(1,2), nArray(1,3), nArray(1,4) ! Write NARRAY to file\n')
f.write(' [%6D, %10.4F, %10.4F, %10.4F,],\n')
f.write(' *vwrite\n')
f.write(' ],\n')
f.write(' *del,nArray,,nopr\n')
f.write('*enddo ! finished with the node map\n')
f.write('*vwrite\n')
f.write(' }\n')
f.write('\n')
f.write('\n')
f.write('\n')
f.write('! another DO loop for the facets\n')
f.write('*vwrite\n')
f.write(' self.facetMap = {\n')
f.write('*do,i,1,nComps,1\n')
f.write(' *get,compName,comp,i,name ! get the name of the nth component\n')
f.write(' *get,nType,comp,compName,type ! get the type #\n')
f.write(' ! 1=Nodes, 2=Elements, 6=Keypoints, 7=Lines, 8=Areas, 9=Volumes\n')
f.write('\n')
f.write(' *if,nType,eq,8,then\n')
f.write('\n')
f.write(' allsel\n')
f.write(' cmsel,,compName\n')
f.write(' nsla,s,1\n')
f.write('\n')
f.write(' nsla,S,0 ! select all nodes internal to the area(s)\n')
f.write(' esln,S,0 ! select all elements connected to the nodes\n')
f.write(' nsla,S,1 ! select all nodes internal to the area and\n')
f.write('\n')
f.write(' *get,eCount,ELEM,,count ! number of selected elements\n')
f.write(' current_element_number=0\n')
f.write('*vwrite, compName\n')
f.write(' "%s" :\n')
f.write('*vwrite\n')
f.write(' [\n')
f.write('\n')
f.write(' *do,AR13,1,eCount ! loop on all selected nodes\n')
f.write(' current_element_number=ELNEXT(current_element_number) ! element number in the list\n')
f.write(' face_number=NMFACE(current_element_number) ! face number\n')
f.write(' *if, face_number, gt, 0, then\n')
f.write(' *dim,fn,array,8\n')
f.write(' *do,j,1,8\n')
f.write(' fn(j) = ndface(current_element_number, face_number, j) !node numbers for this face\n')
f.write(' *enddo\n')
f.write(' *vwrite, current_element_number, face_number, fn(1), fn(2), fn(3), fn(4), fn(5), fn(6), fn(7), fn(8)\n')
f.write(' [%6d, %6d, %6d, %6d, %6d, %6d, %6d, %6d, %6d, %6d,],\n')
f.write('\n')
f.write(' *del,fn,,NOPR\n')
f.write(' *endif\n')
f.write(' *enddo\n')
f.write(' *vwrite\n')
f.write(' ],\n')
f.write(' *endif\n')
f.write('*enddo\n')
f.write('*vwrite !finished with the element faces\n')
f.write(' }\n')
f.write('*CFCLOSE\n')
f.close()
cmd = '"' + ansys_exe + '" -b -i ' + inputfile + \
' -o ' + outfile + ' -j ' + 'Job_' + tempstr
ret = subprocess.call(cmd)
if ret == 8: # success return
cfile = self.get_model_file_name()
#check for errors in ansys error file
errfile = 'Job_' + tempstr + '.err'
try:
f = open(errfile, 'r')
except IOError as ioe:
s = 'AnsysWrapperGenerator for ' + self.name + ': opening error file ' + errfile
s += '\n\tPLEASE CHECK GENERATED WRAPPER ' + cfile + '\n\t' + sys.exc_info()[0] + '\n\t' + str(ioe)
print 'ERROR: ' + s
self.logger.error(s)
return cfile
lines = f.readlines()
f.close()
for l in lines:
if l.count('ERROR'):
s = 'AnsysWrapperGenerator for ' + self.name + ': found "ERROR" in ' + errfile
s += '\n\tPLEASE CHECK GENERATED WRAPPER ' + cfile
print 'ERROR: ' + s
self.logger.error(s)
break
else:
s = 'AnsysWrapperGenerator for ' + self.name + ': ANSYS returned ' + str(ret) + ' for command ' + cmd
print 'ERROR: ' + s
self.logger.error(s)
return cfile
except IOError as ioe:
s = 'AnsysWrapperGenerator for ' + self.name + ': trying to create file ' + inputfile + ' in directory ' + path
s += '\n\t' + sys.exc_info()[0] + '\n\t' + str(ioe)
print 'ERROR: ' + s
self.logger.error(s)
self.ok = False
return cfile
def _parse_componentsfile(self, componentsfile):
try:
f = open(componentsfile, 'r')
except IOError as ioe:
s = 'AnsysWrapperGenerator for ' + self.name + ': opening componentsfile file ' + componentsfile
s += '\n\t' + sys.exc_info()[0] + '\n\t' + str(ioe)
print 'ERROR: ' + s
self.logger.error(s)
return False
exec(f)
f.close()
feaModel = FeaModelInPythonFormat()
self.components['nodes'] = {} # empty dictionary
for nodeName in feaModel.nodeMap:
self.components['nodes'][nodeName] = [] # empty list
for n in feaModel.nodeMap[nodeName]:
self.components['nodes'][nodeName].append( n[0] )
self.components['surfaces'] = {} # empty dictionary
for surfaceName in feaModel.facetMap:
self.components['surfaces'][surfaceName] = [] # empty list
for n in feaModel.facetMap[surfaceName]:
self.components['surfaces'][surfaceName].append( [n[0], n[1]] )
self.unitsinfo = ansysinfo.unitsinfodict['0']
if str(feaModel.units) in ansysinfo.unitsinfodict:
self.unitsinfo = ansysinfo.unitsinfodict[ str(feaModel.units) ]
else:
s = 'AnsysWrapperGenerator for ' + self.name + ': unknown units value ' + str(feaModel.units) + ' in components file ' + componentsfile
print 'WARNING: ' + s
self.logger.warning(s)
s = 'Units info: ' + self.unitsinfo.dump()
print s
self.logger.info(s)
return True
def _parse_prep7File(self):
""" Read the extra /PREP7 command file, if it exists. This will have extra
commands that wind up in the PREP7 portion of the wrapped ANSYS input file.
The commands come from model_db_name.prep7.txt from the same directory as the db file."""
dbname, dbext = os.path.splitext(self.dbfile)
apdlFile = dbname + '.prep7.txt'
if os.path.exists( apdlFile ):
f = open(apdlFile, 'r')
self.prep7 = f.readlines()
f.close()
def _parse_solutionFile(self):
""" Read the extra /SOL command file, if it exists. This will have extra
commands that wind up in the SOL portion of the wrapped ANSYS input file.
The commands come from model_db_name.solution.txt from the same directory as the db file"""
dbname, dbext = os.path.splitext(self.dbfile)
apdlFile = dbname + '.solution.txt'
if os.path.exists( apdlFile ):
f = open(apdlFile, 'r')
self.solution = f.readlines()
f.close()
def _parse_postFile(self):
""" Read the extra /POST command file, if it exists. This will have extra
commands that wind up in the POST portion of the wrapped ANSYS input file.
The commands come from model_db_name.post.txt from the same directory as the db file"""
dbname, dbext = os.path.splitext(self.dbfile)
apdlFile = dbname + '.post.txt'
if os.path.exists( apdlFile ):
f = open(apdlFile, 'r')
self.post = f.readlines()
f.close()
def _writeline(self, line):
self.genfile.write(line + '\n')
def _genheader(self, codepath):
self._writeline('import logging')
self._writeline('import operator')
self._writeline('import math')
self._writeline('from openmdao.main.api import Component')
self._writeline('from openmdao.lib.datatypes.api import Array, Float, List, Str')
self._writeline('from openmdao.lib.components.api import ExternalCode')
self._writeline('from openmdao.util.filewrap import FileParser')
self._writeline('from openmdao.util.filewrap import InputFileGenerator')
self._writeline('from openmdao.main.exceptions import RunInterrupted')
self._writeline('from pyparsing import ParseBaseException')
self._writeline('import os.path')
self._writeline('from os import chdir, getcwd')
self._writeline('import sys')
self._writeline('sys.path.insert(0, \'' + codepath.replace('\\', '/') +
'\')\n')
self._writeline('from ansyswrapper.ansyswrapper import ANSYSWrapperBase')
self._writeline('class ' + self.classname + '(ANSYSWrapperBase):')
self._writeline(indent1 + triplequote +
'A Wrapper for ANSYS Classic Structural ' + self.name + '.' +
triplequote)
self._writeline(indent1 +
'#Creates parameters and initializes components.')
self._writeline(indent1 +
'#Base class handles input, execution, and output.')
def _gendecl(self, k, i, name, units):
nm = ansysinfo._make_name(name, i)
self.input_names.add(nm)
s = indent1 + nm + ' = Float(0.0,' 'iotype = "in",\n' + indent2 + 'desc = " ' + \
i + ' on ' + k + ' component ' + name + '"'
if self.unitsinfo.ok and units in self.unitsinfo.info:
s = s + ',\n' + indent2 + 'units = "' + self.unitsinfo.info[units] + '"'
s = s + ')'
self._writeline(s)
def _gendecls(self):
self._writeline(indent1 + '#Assumes 0.0 initial value for inputs')
for k, v in self.components.iteritems():
for name, nodes in v.iteritems():
#TO_CHECK - is there any way to get units?
if k == 'surfaces':
for i, v in ansysinfo.surfaceinputtypes.iteritems():
self._gendecl(k, i, name, v[1])
elif k == 'keypoints':
for i, v in ansysinfo.keypointinputtypes.iteritems():
self._gendecl(k, i, name, v[1])
elif k == 'nodes':
for i, v in ansysinfo.nodeinputtypes.iteritems():
self._gendecl(k, i, name, v[1])
else:
s = 'AnsysWrapperGenerator for ' + name + ': unknown component type ' + k + ' - IGNORED'
print 'WARNING: ' + s
self.logger.warning(s)
for otype, ounits in ansysinfo.outputtypes.iteritems():
n = ansysinfo._make_name(name, otype)
self.output_names.add(n)
if self.unitsinfo.ok and ounits in self.unitsinfo.info:
units_str = ',\n' + indent2 + 'units = "' + self.unitsinfo.info[ounits] + '"'
else:
units_str = ''
self._writeline(indent1 + n +
' = Array(iotype = "out", dtype = "float",\n' + indent2 + 'desc = "' + otype +
' on nodes of ' + name + '"' + units_str + ')')
for ctype in ansysinfo.calctypes:
cname = n + '_' + ctype
self.output_names.add(cname)
self._writeline(indent1 + cname + ' = Float(0.0, iotype = "out",\n' +
indent2 + 'desc = "' + ctype + ' of ' + otype + ' on nodes of ' + name + '"' + units_str + ')')
for i, v in ansysinfo.globalinputtypes.iteritems():
iunits = v[1]
if self.unitsinfo.ok and iunits in self.unitsinfo.info:
units_str = 'units = "' + self.unitsinfo.info[iunits] + '"'
else:
units_str = ''
global_name = 'FEA_' + i
self.input_names.add(global_name)
initial_name = 'initial_' + global_name
if i in self.initial_values_dictionary:
initial_value = self.initial_values_dictionary[i]
else:
initial_value = 0.0
s = 'AnsysWrapperGenerator for ' + name + ': ' + i + ' not in initial_values_dictionary; using 0.0 as initial value'
print 'WARNING: ' + s
self.logger.warning(s)
self._writeline(indent1 + global_name + ' = Float(' + str(initial_value) + ', iotype = "in", ' + units_str + ')')
self._writeline(indent1 + initial_name + ' = ' + str(initial_value))
#an output for the full name of the python results file
self._writeline(indent1 + 'Results_File = Str(iotype = "out", desc = "Results file in Python Format")')
def _geninit(self):
self._writeline(indent1 +
'def __init__(self, name, runner, dbfile, elasticity = [100, 100, 100], poisson = [0.33, 0.33, 0.33], logger_name = None):')
self._writeline(indent2 + triplequote + 'Constructor for the ' +
self.classname + ' ANSYS OpenMDAO component.' + triplequote)
self._writeline(indent2 + 'super(' + self.classname +
', self).__init__(name = name, runner = runner, dbfile = dbfile, elasticity = elasticity, poisson = poisson, logger_name = logger_name)')
self._writeline(indent2 + 'self.Results_File = os.path.join(runner.workingdir, self.my_name + ".py")')
self._writeline(indent2 + 'self.components["global"] = {}')
self._writeline(indent2 + 'self.components["global"]["FEA"] = []')
for k, v in self.components.iteritems():
self._writeline(indent2 +
'self.components["' + k + '"] = {} #empty dictionary')
for name, nodes in v.iteritems():
self._writeline(indent2 +
'self.components["' + k + '"]["' + name + '"] = ' + str(nodes))
self._writeline(indent2 + 'self.logger.debug("Init: " + self.dump())')
self._writeline(indent1 + 'def execute(self):')
self._writeline(indent2 + 'super(' + self.classname + ', self).execute()')
def _genexecute(self):
pass
def _genoptions(self):
if( self.prep7):
self._writeline('')
self._writeline(indent1 + 'def prep7(self):' )
self._writeline(indent2 + triplequote + 'Entry point for derived wrappers to add customization to the /PREP7 section')
self._writeline(indent2 + 'Derived from the file: model_db_name.prep7.txt from same directory as the db file' + triplequote)
self._writeline(indent2 + 'options = []')
for line in self.prep7:
self._writeline(indent2 + 'options.append( ' + '"' + line.strip() + '"' + ' )' )
self._writeline(indent2 + 'return options')
if( self.solution):
self._writeline('')
self._writeline(indent1 + 'def solution(self):' )
self._writeline(indent2 + triplequote + 'entry point for derived wrappers to add customization to the /SOL section')
self._writeline(indent2 + 'Derived from the file: model_db_name.solution.txt from same directory as the db file' + triplequote)
self._writeline(indent2 + 'options = []')
for line in self.solution:
self._writeline(indent2 + 'options.append( ' + '"' + line.strip() + '"' + ' )' )
self._writeline(indent2 + 'return options')
if( self.post):
self._writeline('')
self._writeline(indent1 + 'def post(self):' )
self._writeline(indent2 + triplequote + 'entry point for derived wrappers to add customization to the /POST section')
self._writeline(indent2 + 'Derived from the file: model_db_name.post.txt from same directory as the db file' + triplequote)
self._writeline(indent2 + 'options = []')
for line in self.post:
self._writeline(indent2 + 'options.append( ' + '"' + line.strip() + '"' + ' )' )
self._writeline(indent2 + 'return options')
def generate(self):
"""Generate the wrapper."""
if not self.ok:
s = 'AnsysWrapperGenerator for ' + self.name + ': see previous errors'
print 'ERROR: ' + s
self.logger.error(s)
return
currdir = os.getcwd()
try:
path,gen = os.path.split(self.genfilename)
os.chdir(path)
if not os.path.exists(self.componentsfile): #need to generate the components file
self.componentsfile = self._gen_componentsfile(path)
if not os.path.exists(self.componentsfile): #problem generating it
self.ok = False
s = 'AnsysWrapperGenerator for ' + self.name + ': trying to create componentsfile ' + self.componentsfile
print 'ERROR: ' + s
self.logger.error(s)
return
s = 'AnsysWrapperGenerator for ' + self.name + ': components file: ' + self.componentsfile
print s
self.logger.info(s)
try:
self.genfile = open(self.genfilename, 'w')
#import pdb; pdb.set_trace()
self.ok = self._parse_componentsfile(self.componentsfile)
if not self.ok:
s = 'AnsysWrapperGenerator for ' + self.name + ': no components found in ' + self.componentsfile
print 'ERROR: ' + s
self.logger.error(s)
return
#parse the prep7, sol, and post files
self._parse_prep7File()
self._parse_solutionFile()
self._parse_postFile()
self._genheader(currdir)
self._gendecls()
self._geninit()
self._genexecute()
self._genoptions()
self.genfile.close()
except IOError as ioe:
s = 'AnsysWrapperGenerator for ' + self.name + ': cannot open ' + self.genfilename
s += '\n\t' + sys.exc_info()[0] + '\n\t' + str(ioe)
print 'ERROR: ' + s
self.logger.error(s)
self.ok = False
return
finally:
os.chdir(currdir)
if __name__ == "__main__": # pragma: no cover
import sys
# Use this code block to run directly
# name = 'BeamWUnitsTest'
# dir = 'C:\\Projects_KRL\\OPenMDAO\\Cases\\MSI_Pump_Case\\Case_4\ANSYS'
# genfilename = os.path.join(dir, name + '.py')
# dbfile = os.path.join(dir, 'impeller_coarse.db')
# wg = WrapperGenerator(name, genfilename, dbfile, ANSYS_VER = 'ANSYS145')
# wg.generate()
# Use this code block to run through the GUI
app = QApplication(sys.argv)
form = MainDlg(logger_name = 'MSI')
form.show()
app.exec_()
|
import turtle
myxloc = 0
myyloc = 0
scr=turtle.Screen()
locations = [[0,0],[0,1],[0,2],[0,3]]
scr.listen()
while True:
def moveleft():
myxloc = myxloc -1
print(myxloc,myyloc)
def moveright():
myxloc = myxloc + 1
print(myxloc, myyloc)
scr.onkey(moveleft,'a')
scr.onkey(moveleft,'d')
|
import os
from shutil import copyfile, rmtree
from abc import ABC, abstractmethod
from ronto import verbose, dryrun, run_cmd
from ronto.model import get_model, get_value, get_value_with_default
def get_init_build_dir():
BUILD_DIR = "build" # default from poky
return get_value_with_default(["build", "build_dir"], BUILD_DIR)
def get_init_script():
INIT_SCRIPT = "sources/poky/oe-init-build-env" # default from poky
return get_value_with_default(["build", "init_script"], INIT_SCRIPT)
class SiteConfigHandler:
def __init__(self):
self.sitefile = get_value_with_default(["build", "site", "file"], "site.conf")
verbose(f"Use site configuration file: {self.sitefile}")
self.build_dir = get_init_build_dir()
def handle(self):
"""
Update site.conf if overwrite and file available
Create site.conf if source file available
"""
dest_config_dir = os.path.join(os.getcwd(), self.build_dir, "conf")
dest_site_conf_file = os.path.join(dest_config_dir, "site.conf")
if not os.path.isfile(dest_site_conf_file):
# site.conf file does not exit (Create scenario)
src_site_conf_file = os.path.join(os.getcwd(), self.sitefile)
if os.path.isfile(src_site_conf_file):
verbose(f"Create site.conf from: {src_site_conf_file}")
if dryrun():
print(f"copy {src_site_conf_file} to {dest_site_conf_file}")
else:
os.makedirs(dest_config_dir, exist_ok=True)
copyfile(src_site_conf_file, dest_site_conf_file)
def init_to_source_in() -> str:
"""
Deliver the commandline that must be sourced as init.
returns is e.g. "TEMPLATECONF=template/dir source script build_dir"
"""
script = get_init_script()
template_dir = get_value_with_default(["build", "template_dir"])
build_dir = get_init_build_dir()
source_line = ""
if template_dir != None:
source_line += "TEMPLATECONF="
source_line += os.path.join(os.getcwd(), template_dir) + " "
source_line += "source " + script + " " + build_dir + "\n"
return source_line
def run_init():
"""
Source the init script once to place/ update build dir structure
i.e. create build dir, conf dir and add local.conf, bblayer.conf
"""
source_line = init_to_source_in()
verbose(f"Run init: {source_line[:-1]}")
run_cmd(["bash", "-c", source_line])
def clean_init(rebuild_conf=True, clean_conf_dir=False, clean_build_dir=False):
build_dir = get_init_build_dir()
config_dir = os.path.join(build_dir, "conf")
if rebuild_conf:
verbose(f"Remove local.conf and bblayers.conf in {config_dir}")
try:
os.remove(os.path.join(os.getcwd(), config_dir, "local.conf"))
except FileNotFoundError:
pass
try:
os.remove(os.path.join(os.getcwd(), config_dir, "bblayers.conf"))
except FileNotFoundError:
pass
if clean_conf_dir:
verbose(f"Remove configuration directory (cleanup) {config_dir}")
rmtree(os.path.join(os.getcwd(), config_dir), ignore_errors=True)
if clean_build_dir:
verbose(f"Remove build directory (cleanup) {build_dir}")
rmtree(os.path.join(os.getcwd(), build_dir), ignore_errors=True)
|
from modules import utility
import shopping.shopping_cart
# import shopping.more_shopping.shopping_cart
from shopping.more_shopping.shopping_cart import buy
import random
import sys
my_list = [1, 2, 3, 4, 5]
print(utility)
print(shopping.shopping_cart.buy('apple'))
print(buy('banana'))
# prints a random number between 0 and 1
print(random.random())
# prints a random number between the specified intergers
print(random.randint(2, 10))
print(random.randrange(0,20,1))
print(random.choice((1,2,3,4,5)))
random.shuffle(my_list)
print(my_list, '\n')
print(sys, '\n')
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given a binary tree, find the length of the longest path where each node in the path has the same value.
# This path may or may not pass through the root.
# Note: The length of path between two nodes is represented by the number of edges between them.
# Example 1:
# Input:
# 5
# / \
# 4 5
# / \ \
# 1 1 5
# Output:
# 2
# Example 2:
# Input:
# 1
# / \
# 4 5
# / \ \
# 4 4 5
# Output:
# 2
# Note: The given binary tree has not more than 10000 nodes. The height of the tree is not more than 1000.
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# 68 / 68 test cases passed.
# Status: Accepted
# Runtime: 879 ms
# Your runtime beats 15.67 % of python submissions.
class Solution(object):
def longestUnivaluePath(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def traverse(root):
if root:
left_len, right_len = traverse(root.left), traverse(root.right)
left_len = (left_len + 1) if root.left and root.val == root.left.val else 0
right_len = (right_len + 1) if root.right and root.val == root.right.val else 0
self.ans = max(self.ans, left_len + right_len)
return max(left_len, right_len)
return 0
self.ans = 0
traverse(root)
return self.ans
# 68 / 68 test cases passed.
# Status: Accepted
# Runtime: 679 ms
# Your runtime beats 80.47 % of python submissions.
class Solution(object):
def longestUnivaluePath(self, root):
"""
:type root: TreeNode
:rtype: int
"""
length_list = []
def getlongest(root, pre_val):
if root:
if root.val == pre_val:
return 1 + max(getlongest(root.left, root.val), getlongest(root.right, root.val))
else:
length_list.append((getlongest(root.left, root.val) + getlongest(root.right, root.val)))
return 0
return 0
getlongest(root, None)
return max(length_list) if length_list else 0
if __name__ == '__main__':
root = TreeNode(2)
root.left = TreeNode(2)
root.right = TreeNode(2)
print(Solution().longestUnivaluePath(root))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# Example : open/read/close a file with gfal 2.0
#
import sys
import os
import gfal2
max_size=10000000 # maximum size to read
## main func
if __name__ == '__main__':
# comment for usage
if(len(sys.argv) < 2):
print "\nUsage\t %s [gfal_url] \n"%(sys.argv[0])
print " Example: %s lfn:/grid/dteam/myfile "%(sys.argv[0])
print " %s srm://myserver.com/myhome/myfile \n"%(sys.argv[0])
os._exit(1)
# open the file
f = gfal2.file(sys.argv[1], 'r')
# read the max_size first bytes.
content = f.read(max_size)
print content
# no close needed, done automatically with the destruction of the file handle
|
x=4
print(type(x))
x="edo okati"
print(type(x)) |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from scipy.stats import norm
# 0 | . | 0.5 1 0
# 1 | . . | 0.25 0.5 1 2
# 2 |. . . .| 0.125 0.25 3 4 5 6
# 3 7
def show(fig):
if False:
fig.show()
else:
fig.suptitle(fig.get_label())
fig.savefig("plots4/{}.png".format(fig.get_label().lower().replace(' ','_')))
def hier_ix(ix, shift_n1p1=True):
level = int(np.log2(ix+1))
offset = 0.5**(level + 1)
stride = 0.5**level
locix = ix - (2**level - 1)
if shift_n1p1:
return (offset + locix*stride)*2 - 1
else:
return offset + locix*stride
hier_x_centered = np.array([-1,1] + [hier_ix(ix,shift_n1p1=True) for ix in range(1500-2)])
s = 1000.0/6
hier_x_centered *= 3*s
hier_p = 1.0/np.sqrt(2*np.pi*s**2) * np.exp(-(hier_x_centered)**2 / (2 * s**2))
ix_u0 = 2
ix_v0 = 3
ix_u1 = 4
ix_v1 = 5
ix_u2 = 6
ix_v2 = 7
ix_vel0 = 8
ix_vel1 = 9
ix_vel2 = 10
all_dat = None
N = 2000
for i in range(N):
if i % 100 == 0: print(i)
dat = np.loadtxt('mc50/{}.dat'.format(i))
if all_dat is None:
all_dat = [dat]
else:
assert((dat[:,0] == all_dat[0][:,0]).all())
assert((dat[:,1] == all_dat[0][:,1]).all())
all_dat.append(dat)
all_dat = np.array(all_dat)
print("{} measurements x {} timesteps x {} channels".format(*all_dat.shape))
mean = np.mean(all_dat, axis=0)
sigma = np.std(all_dat, axis=0)
last_time_step = all_dat[:,-1,:] # [measurement, ix]
# mean and simga for increasing N
running_mean = np.cumsum(last_time_step,axis=0) / np.arange(1,1+last_time_step.shape[0])[:,np.newaxis] # [measurements, ix]
running_sigma = np.zeros_like(running_mean) # [measurements, ix]
for n in range(all_dat.shape[0]):
diffs = last_time_step[:n,:] - running_mean[n,:]
running_sigma[n,:] = np.sqrt(np.sum(diffs*diffs, axis=0) / (n - 1))
ix_show = ix_vel0
for ix_show in range(2,11):
print("last mean: {:>.5e}, last sigma: {:>.5e}".format(mean[-1,ix_show], sigma[-1,ix_show]))
fig = plt.figure("Monte Carlo over Time for {}".format(ix_show), figsize=(20,12))
fig.clear()
ax = fig.add_axes([0.1,0.1,0.65,0.65])
ax.plot(mean[:,1], mean[:,ix_show], 'r', label=r"mean $\mu(t)$, $\mu(t)\pm\sigma(t)$")
ax.fill_between(mean[:,1], mean[:,ix_show] + sigma[:,ix_show], mean[:,ix_show] - sigma[:,ix_show], color='r', alpha=.25)
ax.legend(loc='best')
ax.set_xlabel("t")
axh = fig.add_axes([0.75+0.02,0.1,0.2,0.65])
axh.yaxis.set_major_formatter(NullFormatter())
axh.xaxis.set_major_formatter(NullFormatter())
axh.hist(all_dat[:,-1,ix_show], color='g', bins=np.sqrt(N), alpha=0.5, normed=True, orientation="horizontal", label="histogram for t={:1.1f}s".format(all_dat[0,-1,1]))
axv = fig.add_axes([0.1,0.75+0.02,0.65,0.17])
axv.xaxis.set_major_formatter(NullFormatter())
axv.plot(mean[:,1], sigma[:,ix_show]**2, color='r', label=r"variance $\sigma^2(t)$")
axv.legend(loc='best')
ylimits = (min(ax.get_ylim()[0], axh.get_ylim()[0]), max(ax.get_ylim()[1], axh.get_ylim()[1]))
ax.set_ylim(ylimits)
axh.set_ylim(ylimits)
x = np.linspace(ylimits[0], ylimits[1], 1000)
axh.plot(norm.pdf(x, mean[-1,ix_show], sigma[-1,ix_show]), x, 'k', linewidth=2, label=r"fit $\mu={:>.1E}, \sigma={:>.1E}$".format(mean[-1,ix_show], sigma[-1,ix_show]))
axh.legend(loc='best')
ax.set_xlim(0,all_dat[0,-1,1]) # tight to last time stamp
axv.set_xlim(0,all_dat[0,-1,1]) # tight to last time stamp
show(fig)
# Convergence of MC
fig = plt.figure("Monte Carlo Convergence - Mean")
fig.clear()
ax = fig.add_subplot(111)
ax.grid()
ax.loglog(np.abs(running_mean[:,2:] - running_mean[-1,2:]), basex=2, basey=2)
ax.loglog(1/np.sqrt(np.arange(running_mean.shape[0])) / 2**6, basex=2, basey=2)
ax.set_xlabel("N")
show(fig)
fig = plt.figure("Monte Carlo Running Mean")
fig.clear()
ax = fig.add_subplot(111)
ax.grid()
ax.plot(running_mean[:,2:])
ax.set_xlabel("N")
show(fig)
fig = plt.figure("Monte Carlo Convergence - Std")
fig.clear()
ax = fig.add_subplot(111)
ax.grid()
ax.loglog(np.abs(running_sigma[:,2:] - running_sigma[-1,2:]), basex=2, basey=2)
ax.loglog(1/np.sqrt(np.arange(running_mean.shape[0])) / 2**6, basex=2, basey=2)
ax.set_xlabel("N")
show(fig)
fig = plt.figure("Monte Carlo Running Std")
fig.clear()
ax = fig.add_subplot(111)
ax.grid()
ax.plot(running_sigma[:,2:])
ax.set_xlabel("N")
show(fig)
# Convergence of Trapezoidal Rule
all_tr_dat = None
N = 1500
for i in range(N):
if i % 100 == 0: print(i)
dat = np.loadtxt('tr50/{}.dat'.format(i))
if all_tr_dat is None:
all_tr_dat = [dat]
else:
assert((dat[:,0] == all_tr_dat[0][:,0]).all())
assert((dat[:,1] == all_tr_dat[0][:,1]).all())
all_tr_dat.append(dat)
all_tr_dat = np.array(all_tr_dat) # [measurement, time, ix]
trapezoidal = []
trapezoidal_sigma = []
X_p = hier_p[:,np.newaxis,np.newaxis] * all_tr_dat
#X_p[[0,1]] /= 2
# +/- 3 sigma * (a+b) / 2
trapezoidal.append(2*3*(1000/6) * (X_p[0,:,:] + X_p[1,:,:])/2)
trapezoidal_sigma.append(2*3*(1000/6) * np.sum((all_tr_dat[:2,:,:] - trapezoidal[-1]) ** 2 * hier_p[:2,np.newaxis,np.newaxis], axis=0) / 2)
start = 2
num = 1
level = 1
while True:
trapezoidal.append(2*3*(1000/6) / 2**level * np.sum(X_p[start:start+num,:,:], axis=0) + trapezoidal[-1] / 2)
trapezoidal_sigma.append(2*3*(1000/6) / 2**level * np.sum((all_tr_dat[start:start+num,:,:] - trapezoidal[-1]) ** 2 * hier_p[start:start+num,np.newaxis,np.newaxis], axis=0) + trapezoidal_sigma[-1] / 2)
trapezoidal_sigma[-1] = trapezoidal_sigma[-1]
level += 1
start += num
num *= 2
if start+num > X_p.shape[0]: break
print("used first {} data points for trapezoidal rule".format(start))
# t(h) = I + ch² + ch⁴
# t(h/2) = I + ch²/4 + ch⁴/16
# s(h) = I + ch⁴(1/4 - 1)/(4-1)
# = I + ch⁴(-1/4)
# s(h) = I + c'h⁴
# s(h/2) = I + c'h⁴ / 16 # convergence for last time step:
trapezoidal = np.array(trapezoidal) # good h^2 convergence after the first two steps
simpson = (4*trapezoidal[1:,:] - 1*trapezoidal[:-1,:]) / (4-1) # good h^4 convergence after the first two steps, levels out at 3.7e-9
simpson2 = (16*simpson[1:,:] - 1*simpson[:-1,:]) / (16-1) # good h^6 convergence after the first two steps, quickly levels out at 3.7e-9
simpson3 = (64*simpson[1:,:] - 1*simpson[:-1,:]) / (64-1) # only h^6 convergence after the first two steps, quickly levels out at 3.7e-9
trapezoidal_sigma = np.sqrt(np.array(trapezoidal_sigma)) # variance = sigma^2
for ix_show in range(2,11):
fig = plt.figure("Monte Carlo vs Trapezoidal over Time for {}".format(ix_show), figsize=(20,12))
fig.clear()
ax = fig.add_axes([0.1,0.1,0.65,0.65])
ax.plot(mean[:,1], mean[:,ix_show], 'r', label=r"mean mc $\mu(t)$, $\mu(t)\pm\sigma(t)$")
ax.fill_between(mean[:,1], mean[:,ix_show] + sigma[:,ix_show], mean[:,ix_show] - sigma[:,ix_show], color='r', alpha=.25)
ax.plot(mean[:,1], trapezoidal[-1,:,ix_show], 'b', label=r"mean tr $\mu(t)$, $\mu(t)\pm\sigma(t)$") #use most refined trapezoidal
ax.fill_between(mean[:,1], trapezoidal[-1,:,ix_show] + trapezoidal_sigma[-1,:,ix_show], trapezoidal[-1,:,ix_show] - trapezoidal_sigma[-1,:,ix_show], color='b', alpha=.25)
ax.legend(loc='best')
ax.set_xlabel("t")
axh = fig.add_axes([0.75+0.02,0.1,0.2,0.65])
axh.yaxis.set_major_formatter(NullFormatter())
axh.xaxis.set_major_formatter(NullFormatter())
axh.hist(all_dat[:,-1,ix_show], color='r', bins=np.sqrt(N), alpha=0.25, normed=True, orientation="horizontal", label="histogram for t={:1.1f}s".format(all_dat[0,-1,1]))
axv = fig.add_axes([0.1,0.75+0.02,0.65,0.17])
axv.xaxis.set_major_formatter(NullFormatter())
axv.plot(mean[:,1], sigma[:,ix_show]**2, color='r', label=r"variance mc $\sigma^2(t)$")
axv.plot(mean[:,1], trapezoidal_sigma[-1,:,ix_show]**2, color='b', label=r"variance tr $\sigma^2(t)$")
axv.legend(loc='best')
ylimits = (min(ax.get_ylim()[0], axh.get_ylim()[0]), max(ax.get_ylim()[1], axh.get_ylim()[1]))
ax.set_ylim(ylimits)
axh.set_ylim(ylimits)
x = np.linspace(ylimits[0], ylimits[1], 1000)
axh.plot(norm.pdf(x, mean[-1,ix_show], sigma[-1,ix_show]), x, 'r', linewidth=2, label=r"fit $\mu={:>.1E}, \sigma={:>.1E}$".format(mean[-1,ix_show], sigma[-1,ix_show]))
axh.plot(norm.pdf(x, trapezoidal[-1,-1,ix_show], trapezoidal_sigma[-1,-1,ix_show]), x, 'b', linewidth=2, label=r"fit $\mu={:>.1E}, \sigma={:>.1E}$".format(trapezoidal[-1,-1,ix_show], trapezoidal_sigma[-1,-1,ix_show]))
axh.legend(loc='best')
ax.set_xlim(0,all_dat[0,-1,1]) # tight to last time stamp
axv.set_xlim(0,all_dat[0,-1,1]) # tight to last time stamp
show(fig)
fig = plt.figure("Trapezoidal")
fig.clear()
ax = fig.add_subplot(111)
ax.set_xlabel("levels")
ax.set_ylabel("error")
ax.set_xticks(np.arange(trapezoidal.shape[0]))
ax.grid()
ax.semilogy(np.arange(trapezoidal.shape[0]), np.abs(trapezoidal[:,-1,2:] - simpson3[-1,-1,2:]), 'r-', basey=2)
ax.semilogy(np.arange(simpson.shape[0]), np.abs(simpson[:,-1,2:] - simpson3[-1,-1,2:]), 'b-', basey=2)
ax.semilogy(np.arange(simpson2.shape[0]), np.abs(simpson2[:,-1,2:] - simpson3[-1,-1,2:]), 'g-', basey=2)
ax.semilogy(np.arange(simpson3.shape[0]), np.abs(simpson3[:,-1,2:] - simpson3[-1,-1,2:]), 'orange', basey=2)
ax.legend(loc='best')
show(fig)
fig = plt.figure("Trapezoidal Error Difference")
fig.clear()
ax = fig.add_subplot(111)
ax.set_xlabel("levels")
ax.set_ylabel("error")
ax.set_xticks(np.arange(trapezoidal.shape[0]))
ax.grid()
ax.semilogy(np.arange(trapezoidal.shape[0]-1), np.abs(trapezoidal[1:,-1,ix_u1] - trapezoidal[:-1,-1,ix_u1]), 'r-', basey=2)
ax.semilogy(np.arange(simpson.shape[0]-1), np.abs(simpson[1:,-1,ix_u1] - simpson[:-1,-1,ix_u1]), 'b-', basey=2)
ax.semilogy(np.arange(simpson2.shape[0]-1), np.abs(simpson2[1:,-1,ix_u1] - simpson2[:-1,-1,ix_u1]), 'g-', basey=2)
ax.semilogy(np.arange(simpson3.shape[0]-1), np.abs(simpson3[1:,-1,ix_u1] - simpson3[:-1,-1,ix_u1]), 'orange', basey=2)
ax.legend(loc='best')
show(fig)
|
#!/usr/bin/python
from tkinter import Tk, Canvas, Frame, BOTH
import random
import numpy as np
from time import sleep
import matplotlib.pyplot as plt
import pickle
import sys, getopt
#Allowed moves in the board
moves = ['UP','LEFT','RIGHT','DOWN']
oposite_moves = {'UP': 'DOWN', 'DOWN': 'UP', 'LEFT': 'RIGHT','RIGHT': 'LEFT'}
#Number of iterations for training
iterations = 50000
#Drawing stuff
rectangles = []
rectangle_h = 50
rectangle_w = 50
x1 = None
x2 = None
#World size
grid_size_x = 10
grid_size_y = 3
# World rewards (will be filled when world is created)
rewards = []
#Training stuff
number_of_moves = 0
score = 0
q_learning_scores = []
sarsa_scores = []
end_positions = []
log = []
start_position = 0
end_flag = False
move_type = []
#HyperParams
#A factor of 0 will make the agent not learn anything, while a factor of 1 would make
#the agent consider only the most recent information.
alpha = 0.1
decay = 0.000001
eps = .9
#The discount factor determines the importance of future rewards. A factor of 0 makes the agent
#"opportunistic" by only considering current rewards, while a factor approaching 1 will make it
# strive for a long-term high reward.
gamma = 0.7
#Needed for world moves and actions
fringe_left_values = []
fringe_right_values = []
for i in range(grid_size_y):
fringe_left_values.append(grid_size_x * i)
fringe_right_values.append((grid_size_x * i) + grid_size_x - 1)
def initialize_q_table():
global q_table
q_table = np.zeros((grid_size_x * grid_size_y, len(moves), 1))
def save_object(obj, filename):
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def load_q_table(filename):
with open(filename, 'rb') as input:
q_table = pickle.load(input)
return q_table
def win_condition(x):
return x == 10
def set_eps():
global eps
eps = 0.9
def calculate_end_positions():
global end_positions
for i in range(1,grid_size_x):
end_positions.append(i)
def draw_x(pos):
global x1,x2
if x1 is not None:
canvas.delete(x1)
if x2 is not None:
canvas.delete(x2)
if (pos < grid_size_x):
x = (pos + 1) * rectangle_w
y = rectangle_h
elif (grid_size_x <= pos < grid_size_x * 2 ):
x = (pos + 1 - grid_size_x) * rectangle_w
y = rectangle_h * 2
else:
x = (pos + 1 - (grid_size_x * 2)) * rectangle_w
y = rectangle_h * 3
x1 = canvas.create_line(x, y, x + rectangle_w, y + rectangle_h, fill='deep sky blue', width=3)
x2 = canvas.create_line(x + rectangle_w, y, x, y + rectangle_h, fill='deep sky blue', width=3)
def reset_canvas():
canvas.delete(x1)
canvas.delete(x2)
def random_move():
intended_direction = random.choice(moves)
return actual_move(intended_direction)
def actual_move(intended_direction):
move_list, probabilities = probabilities_for_a_move(intended_direction)
actual_direction = np.random.choice(move_list,1,p=probabilities)
return actual_direction
def probabilities_for_a_move(intended_direction):
moves_with_probabilities = {'UP': 0.1, 'DOWN':0.1, 'LEFT': 0.1,'RIGHT': 0.1}
moves_with_probabilities[intended_direction] = 0.8
moves_with_probabilities[oposite_moves[intended_direction]] = 0
k = list(moves_with_probabilities.keys())
v = list(moves_with_probabilities.values())
return k,v
def calculate_new_pos_and_reward(actual_pos,direction):
if (direction == 'UP'):
if (actual_pos < grid_size_x):
new_pos = actual_pos
else:
new_pos = actual_pos - 10
elif (direction == 'DOWN'):
if (actual_pos > ((grid_size_x * 2 ) -1) ):
new_pos = actual_pos
else:
new_pos = actual_pos + 10
elif (direction == 'LEFT'):
if actual_pos in fringe_left_values:
new_pos = actual_pos
else:
new_pos = actual_pos - 1
else:
if actual_pos in fringe_right_values:
new_pos = actual_pos
else:
new_pos = actual_pos + 1
return new_pos, rewards[new_pos]
def play_mode_move(actual_pos):
global end_flag, number_of_moves
best_move_idx = np.argmax(q_table[actual_pos])
best_move = moves[best_move_idx]
direction = actual_move(best_move)
new_pos, reward = calculate_new_pos_and_reward(actual_pos,direction)
number_of_moves = number_of_moves + 1
if new_pos in end_positions:
end_flag = True
return new_pos, reward, direction
def move(actual_pos):
global end_flag, number_of_moves
if np.random.uniform(0, 1) < eps:
direction = random_move()
#print ("move: random")
#move_type.append(0)
else:
best_move_idx = np.argmax(q_table[actual_pos])
best_move = moves[best_move_idx]
direction = actual_move(best_move)
#print ("move: best")
#move_type.append(1)
#print("direction: ", direction)
new_pos, reward = calculate_new_pos_and_reward(actual_pos,direction)
number_of_moves = number_of_moves + 1
#if (new_pos == end_position):
if new_pos in end_positions:
end_flag = True
return new_pos, reward, direction
def e_greedy(pos):
if np.random.uniform(0, 1) < eps:
return random.choice(q_table[pos])
else:
return np.max(q_table[pos])
def q_learning():
print ('using Q-Learning')
initialize_q_table()
for iter in range(iterations):
global score, end_flag, number_of_moves,decay,eps, gamma, q_table
print("iteration #:", iter)
#print("eps: ", eps)
score = 0
end_flag = False
number_of_moves = 0
actual_position = start_position
while (end_flag == False):
draw_x(actual_position)
root.update()
#print("number_of_moves",number_of_moves)
new_position, reward, action = move(actual_position)
#update q_table
q_table[actual_position,moves.index(action)] = (1 - alpha) * q_table[actual_position,moves.index(action)] + (alpha * (reward + (gamma * np.max(q_table[new_position]))))
score = score + reward
actual_position = new_position
eps -= decay
#sleep(0.01)
q_learning_scores.append(score)
draw_x(actual_position)
root.update()
#sleep(3)
def sarsa():
global score, end_flag, number_of_moves, decay, eps, gamma, q_table
initialize_q_table()
set_eps()
print ('using SARSA')
for iter in range(iterations):
print("iteration #:", iter)
score = 0
end_flag = False
number_of_moves = 0
actual_position = start_position
while (end_flag == False):
draw_x(actual_position)
root.update()
#print("iteration #:", iter)
#print("actual_position: ", actual_position)
#print("score:", score)
#print("number_of_moves",number_of_moves)
#print("eps: ", eps)
new_position, reward, action = move(actual_position)
#update q_table
q_next = e_greedy(new_position)
q_table[actual_position,moves.index(action)] = (1 - alpha) * q_table[actual_position,moves.index(action)] + (alpha * (reward + (gamma * q_next)))
score = score + reward
actual_position = new_position
eps -= decay
#sleep(0.01)
draw_x(actual_position)
root.update()
sarsa_scores.append(score)
def play():
global q_table, number_of_moves, end_flag, log
score = 0
end_flag = False
number_of_moves = 0
actual_position = start_position
log.append(actual_position)
while (end_flag == False):
draw_x(actual_position)
root.update()
new_position, reward, direction = play_mode_move(actual_position)
actual_position = new_position
log.append(actual_position)
score = score + reward
sleep(0.5)
draw_x(actual_position)
root.update()
print("# of Moves: ", number_of_moves)
print("score: ", score)
if score == 10:
ask_for_replay()
else:
ask_for_new_play()
def ask_for_replay():
global log
respose = input("Do you want to see a replay? [Y/n]")
if (respose == '' or respose == 'y' or respose == 'Y'):
for actual_position in log:
draw_x(actual_position)
root.update()
sleep(0.5)
ask_for_replay()
else:
print ("Exiting...")
def ask_for_new_play():
respose = input("Do you want to play again? [Y/n]")
if (respose == '' or respose == 'y' or respose == 'Y'):
play()
else:
print ("Exiting...")
def build_env():
global root, canvas
calculate_end_positions()
# Define la ventana principal de la aplicación
root = Tk()
# Define las dimensiones de la ventana, que se ubicará en
# el centro de la pantalla. Si se omite esta línea la
# ventana se adaptará a los widgets que se coloquen en
# ella.
root.geometry("800x250+300+300")
root.title("UNTREF - IA - TP 4")
canvas = Canvas(root)
for y in range(1, grid_size_y + 1):
for x in range(1, grid_size_x + 1):
init_x = x * rectangle_w
init_y = y * rectangle_h
#print("pos inicio x:", init_x)
#print("pos inicio y:",init_y)
#print("pos fin x:",init_x + rectangle_w)
#print("pos fin y:",init_y + rectangle_h)
color = 'gray95'
if y == 1:
if x == 1:
rewards.append(0)
elif x == grid_size_x:
rewards.append(10)
color = 'pale green'
else:
rewards.append(-10)
color = 'firebrick1'
else:
rewards.append(0)
rectangle = canvas.create_rectangle(init_x,init_y,init_x + rectangle_w,init_y + rectangle_h, fill=color)
rectangles.append(rectangle)
#for x in rewards:
#print(x)
canvas.pack(fill=BOTH, expand=1)
def main(options):
global q_table
if len(options) == 2:
selected_option = options[1]
else:
selected_option = ''
print("Selected Option: ", selected_option)
#np settings
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
build_env()
if (selected_option == 'q'):
q_learning()
print(q_table)
save_object(q_table, 'q_table_q_learning.pkl')
plt.plot(q_learning_scores,'ro')
plt.ylabel('q_learning scores')
plt.xlabel('round')
plt.show()
elif (selected_option == 'sarsa'):
sarsa()
print(q_table)
save_object(q_table, 'q_table_sarsa.pkl')
plt.plot(sarsa_scores,'ro')
plt.ylabel('sarsa scores')
plt.xlabel('round')
plt.show()
elif (selected_option == 'both'):
#Runs the 2 learning algorithms
q_learning()
print(q_table)
save_object(q_table, 'q_table_q_learning.pkl')
sarsa()
print(q_table)
save_object(q_table, 'q_table_sarsa.pkl')
plt.subplot(2, 1, 1)
plt.plot(q_learning_scores,'ro')
plt.ylabel('q_learning scores')
plt.xlabel('round')
plt.subplot(2, 1, 2)
plt.plot(sarsa_scores,'ro')
plt.ylabel('sarsa scores')
plt.xlabel('round')
plt.show()
q_learning_win_sum = sum(1 for i in q_learning_scores if win_condition(i))
sarsa_win_sum = sum(1 for i in sarsa_scores if win_condition(i))
print("q-learning win rate : ", (q_learning_win_sum * 100)/iterations)
print("sarsa win rate : ", (sarsa_win_sum * 100)/iterations)
elif ( selected_option == 'play_q'):
q_table = load_q_table('q_table_q_learning.pkl')
play()
sleep(5)
elif ( selected_option == 'play_s'):
q_table = load_q_table('q_table_sarsa.pkl')
play()
sleep(5)
else:
print("ussage: reinforcement_learning.py <option>")
print("options are:")
print("q : Learns using Q-Learning")
print("sarsa: Learns using SARSA")
print("both: Runs both Learning algorithms")
print("play_q: Plays using Q-Learning q_table")
print("play_s: Plays using SARSA q_table")
if __name__ == "__main__":
main(sys.argv) |
#!/usr/bin/env python
# encoding: utf-8
# @Time : 2019/5/7 15:29
# @Author : lxx
# @File : yuyiCorrector.py
# @Software: PyCharm
import kenlm
import jieba
all_train=[]
for line in open("data/data.train",encoding="utf-8"):
line=line.strip()
line=line.split("\t")
sentens=line[2:]
if len(sentens) >=2:
for i in range(1,len(sentens)):
tmp = []
tmp.append(sentens[0])
tmp.append(sentens[i])
all_train.append(tmp)
else:
all_train.append([sentens[0],sentens[0]])
print(len(all_train))
lm = kenlm.Model("kenlm-model/people_chars_lm.klm")
err_count=0
for pair_sen in all_train:
sen_seg0=jieba.cut(pair_sen[0])
sen_seg1=jieba.cut(pair_sen[1])
ppl_score0=lm.perplexity(' '.join(sen_seg0))
ppl_score1=lm.perplexity(' '.join(sen_seg1))
if ppl_score1 > ppl_score0:
err_count+=1
print(err_count)
print(len(all_train)-err_count)
print("222222222222222222222222222222222222222222")
print("yuyi")
print("github")
|
import botostubs
import boto3
import json
boto_session = boto3.Session(profile_name='personal')
photo = 'assets/group_selfie.jpg'
rekognition: botostubs.Rekognition = boto_session.client('rekognition',region_name='ap-south-1')
with open(photo, 'rb') as image:
response = rekognition.recognize_celebrities(
Image={'Bytes': image.read()})
print('Detected faces for ' + photo)
for celebrity in response['CelebrityFaces']:
print('Name: ' + celebrity['Name'])
print('Id: ' + celebrity['Id'])
print('Position:')
print(' Left: ' +
'{:.2f}'.format(celebrity['Face']['BoundingBox']['Height']))
print(' Top: ' +
'{:.2f}'.format(celebrity['Face']['BoundingBox']['Top']))
print('Info')
for url in celebrity['Urls']:
print(' ' + url)
print
|
import json
def read_json_file(file_to_read) -> object:
# Opening JSON file
json_file = open(file_to_read, "r")
# returns JSON object as dictionary
json_data = json.load(json_file)
# print(json_data[0]["pid"])
# Closing file
json_file.close()
return json_data
|
import requests
import json
def monero():
get_ = requests.get("https://api.cryptonator.com/api/ticker/xmr-usd", headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"})
jsonData = json.loads(get_.text)
return float(jsonData["ticker"]["price"])
def xmr():
return monero()
def bitcoin():
get_ = requests.get("https://api.cryptonator.com/api/ticker/btc-usd", headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"})
jsonData = json.loads(get_.text)
return float(jsonData["ticker"]["price"])
def btc():
return bitcoin()
def ethereum():
get_ = requests.get("https://api.cryptonator.com/api/ticker/eth-usd", headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"})
jsonData = json.loads(get_.text)
return float(jsonData["ticker"]["price"])
def eth():
return ethereum()
def litecoin():
get_ = requests.get("https://api.cryptonator.com/api/ticker/ltc-usd", headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"})
jsonData = json.loads(get_.text)
return float(jsonData["ticker"]["price"])
def ltc():
return litecoin()
def dogecoin():
get_ = requests.get("https://api.cryptonator.com/api/ticker/doge-usd", headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"})
jsonData = json.loads(get_.text)
return float(jsonData["ticker"]["price"])
def doge():
return dogecoin()
def dash():
get_ = requests.get("https://api.cryptonator.com/api/ticker/dash-usd", headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"})
jsonData = json.loads(get_.text)
return float(jsonData["ticker"]["price"])
def ripple():
get_ = requests.get("https://api.cryptonator.com/api/ticker/xrp-usd", headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"})
jsonData = json.loads(get_.text)
return float(jsonData["ticker"]["price"])
def xrp():
return ripple()
def tether():
get_ = requests.get("https://api.cryptonator.com/api/ticker/usdt-usd", headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"})
jsonData = json.loads(get_.text)
return float(jsonData["ticker"]["price"])
def usdt():
return tether()
def bittorrent():
get_ = requests.get("https://api.cryptonator.com/api/ticker/btt-usd", headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"})
jsonData = json.loads(get_.text)
return float(jsonData["ticker"]["price"])
def btt():
return bittorrent()
def trueusd():
get_ = requests.get("https://api.cryptonator.com/api/ticker/tusd-usd", headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"})
jsonData = json.loads(get_.text)
return float(jsonData["ticker"]["price"])
def tusd():
return trueusd() |
import os
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from Utils.data_class import InputExample, InputFeatures
def read_examples(filename):
with open(filename, 'r') as f:
lines = [data.strip().split('\t') for data in f.readlines()]
examples = []
for index,line in enumerate(lines):
guid = int(line[0]) if line[0].isdigit() else index
text_a = line[1]
text_b = line[2]
label = line[3]
examples.append(
InputExample(guid, text_a, text_b, label)
)
return examples
def load_data(args, tokenizer, data_type):
if data_type == "train":
batch_size = args.train_batch_size
filename = os.path.join(args.data_dir, 'train.tsv')
elif data_type == "dev":
batch_size = args.dev_batch_size
filename = os.path.join(args.data_dir, 'dev.tsv')
elif data_type == "test":
batch_size = args.test_batch_size
filename = os.path.join(args.data_dir, 'test.tsv')
else:
raise RuntimeError("should be train or dev or test")
examples = read_examples(filename)
features = convert_examples_to_features(examples, args.max_length, tokenizer)
dataset = convert_features_to_tensors(features, batch_size)
return dataset
def convert_examples_to_features(examples, max_length, tokenizer):
label_list = ['0', '1']
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
print("Writing example {} of {}".format(ex_index, len(examples)))
inputs = tokenizer.encode_plus(
example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
attention_mask = [1] * len(input_ids)
#padding
padding_length = max_length - len(input_ids)
input_ids = input_ids + ([0] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(
len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
label_id = label_map[example.label]
idx = int(example.guid)
features.append(
InputFeatures(
idx=idx, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label_id
)
)
return features
def convert_features_to_tensors(features, batch_size):
all_idx_ids = torch.tensor(
[f.idx for f in features], dtype=torch.long)
all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_idx_ids, all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def random_dataloader(dataset, batch_size):
sampler = RandomSampler(dataset)
dataloader = DataLoader(
dataset, sampler=sampler, batch_size=batch_size)
return dataloader
def sequential_dataloader(dataset, batch_size):
sampler = SequentialSampler(dataset)
dataloader = DataLoader(
dataset, sampler=sampler, batch_size=batch_size)
return dataloader
|
from fastapi_scaffolding.core import messages
def test_auth_using_prediction_api_no_apikey_header(test_client) -> None:
response = test_client.post('/api/model/predict')
assert response.status_code == 400
assert response.json() == {"detail": messages.NO_API_KEY}
def test_auth_using_prediction_api_wrong_apikey_header(test_client) -> None:
response = test_client.post(
'/api/model/predict',
json={"image": "test"},
headers={"token": "WRONG_TOKEN"}
)
assert response.status_code == 401
assert response.json() == {"detail": messages.AUTH_REQ} |
score = int(input("0点から100点までの得点を入力してください:"))
if score >= 0 and score < 60:
print("不合格です")
elif score >= 60 and score <= 100:
print("合格です")
if score >= 80:
print("素晴らしい成績ですね")
else:
print("範囲外の得点です")
"""
実験結果1(2)
入力 score : メッセージ
-1,101 : 範囲外の得点です
0,1,59 : 不合格です
60,61,79 : 合格です
80,81,100 : 合格です 素晴らしい成績ですね
"""
# |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.