text
stringlengths 8
6.05M
|
|---|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'exclude_with_action',
'type': 'none',
'msvs_cygwin_shell': 0,
'actions': [{
'action_name': 'copy_action',
'inputs': [
'copy-file.py',
'bad.idl',
],
'outputs': [
'<(INTERMEDIATE_DIR)/bad.idl',
],
'action': [
'python', '<@(_inputs)', '<@(_outputs)',
],
}],
},
{
'target_name': 'exclude_with_rule',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
'bad.idl',
],
'rules': [{
'rule_name': 'copy_rule',
'extension': 'idl',
'inputs': [
'copy-file.py',
],
'outputs': [
'<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).idl',
],
'action': [
'python', '<@(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
],
}],
},
{
'target_name': 'program',
'type': 'executable',
'sources': [
'program.cc',
],
'dependencies': [
'exclude_with_action',
'exclude_with_rule',
],
},
],
}
|
#Written by Michael Disyak - Feb 2017
#This class represents a general value function and has the ability to
# recieve updates to relevant incoming data streams and learn using TD(lambda) and GTD(lambda) algorithms
# This class is designed to be used in conjunction with the Plotter class and a control file that implements a behaviour policy
import numpy as np
import time
import math
class GenValFunc:
prediction = 0.0
cumulant = 0.0
delta = 0.0
predictions = []
postPrediction = 0.0
postReturn = 0.0
timeDiff = 0
gammaNext = 0
gammaCurrent = 0
lamb = 0
row = 0
action = 0
numberOfLearningSteps = 0
averageError = 0.0
offPol = False
targetPol = None
rowFunc = None
rupee = 0.0
ude = 0.0
alpha = 0.1
betaNotRupee = (1 - lamb) * (alpha/30)
def __init__(self, numTilings, numTilesTotal, alpha = 0.1, gamma = 0.9, lamb = 0.9, offPol = False, targetPolicy = None, beta = 0.01, rowFunc = None):
self.offPol = offPol
self.gammaCurrent = gamma
self.lamb = lamb
self.numTilings = numTilings
self.numTilesTotal = numTilesTotal
self.currentState = np.zeros(self.numTilesTotal)
self.nextState = np.zeros(self.numTilesTotal)
self.weightVect = np.zeros(self.numTilesTotal)
self.hWeightVect = np.zeros(self.numTilesTotal)
self.alpha = alpha
self.beta = beta
# self.postTimeSteps = int(round(1.0/(1.0-self.gammaCurrent)))*5
self.recordedCumulant = np.array([])
self.recordedPrediction = np.array([])
self.recordedGammas = np.array([])
self.recordedError = np.array([])
self.recordedUDE = np.array([])
self.recordedRupee = np.array([])
self.eligTrace = np.zeros(self.numTilesTotal)
self.targetPol = targetPolicy
self.rowFunc = rowFunc
#Vars for RUPEE
self.alphaRupee = self.alpha*5
self.hRupee = np.zeros(self.numTilesTotal)
self.deltaRupee = np.zeros(self.numTilesTotal)
self.taoRupee = 0.0
self.betaNotRupee = (1 - self.lamb) * (self.alpha/30)
self.betaRupee = 0.0
#Vars for UDE
self.deltaUDE = 0.0
self.taoUDE = 0.0
self.betaNotUDE = self.alpha * 10
self.betaUDE = 0.0
self.varUDE = 0.0
self.nUDE = 0
self.deltaMean = 0.0
self.oldDeltaMean = 0.0
self.deltaM2 = 0.0
self.epsilonUDE = 0.0001
#Updates all values that can change after an action occurs
def update(self, nextState, currentState, cumulant, lamb, gamma, action):
self.nextState = nextState
self.currentState = currentState
self.cumulant = cumulant
self.lamb = lamb
self.gammaNext = gamma
self.action = action
if self.offPol:
self.calcRow()
#Initiates the learning step for off and on-policy gvfs
def learn(self):
if self.offPol: #If on-policy do TD(lamb) else GTD(lamb)
self.learnGTD()
else:
self.learnOnPol()
if not self.pavlovianControl is None:
self.pavlovianControl()
self.calcRupee()
self.calcUDE()
#Performs the learning step for on-policy GVFs using TD(lambda)
def learnOnPol(self): #args, stoppingEvent):
startTime = time.time()
#TD ERROR BELOW
self.currentStateValue = np.dot(self.weightVect, self.currentState)
self.nextStateValue = np.dot(self.weightVect, self.nextState)
self.delta = self.cumulant + ((self.gammaNext * self.nextStateValue) - self.currentStateValue)
self.eligTrace = (self.lamb * self.gammaCurrent * self.eligTrace) + self.currentState
self.weightVect = self.weightVect + (self.alpha * self.delta * self.eligTrace)
self.prediction = self.currentStateValue
self.predictions.append(self.prediction)
self.numberOfLearningSteps += 1
self.gammaCurrent = self.gammaNext
self.timeDiff = round(time.time()-startTime,6)
#Performs the learning step for off-policy GVFs using GTD(lambda)
def learnGTD(self):
startTime = time.time()
alphaGTD = self.alpha #* (1-self.lamb)
#TD ERROR BELOW
self.currentStateValue = np.dot(self.weightVect, self.currentState)
self.nextStateValue = np.dot(self.weightVect, self.nextState)
self.delta = self.cumulant + ((self.gammaNext * self.nextStateValue) - self.currentStateValue)
#End TD Error
self.eligTrace = self.row * (self.currentState + (self.lamb * self.gammaCurrent * self.eligTrace))
self.weightVect += alphaGTD * ((self.delta * self.eligTrace) - ((self.gammaNext * (1-self.lamb)) * np.dot(self.eligTrace, self.hWeightVect) * self.nextState))
self.hWeightVect += self.beta * ((self.delta * self.eligTrace) - (np.dot(self.hWeightVect, self.currentState) * self.currentState))
self.prediction = self.currentStateValue
self.predictions.append(self.prediction)
self.numberOfLearningSteps += 1
self.gammaCurrent = self.gammaNext
self.timeDiff = round(time.time()-startTime,6)
#TODO: be overwritten dynamically upon creation
#Determines if the action taken matches the target policy of the off-policy gvf for the purpose of importance sampling
def calcRow(self):
if self.action == self.targetPol:
self.row = 1
else:
self.row = 0
#self.rowFunction(self, self.targetPol, self.action)
#Calculates an approximation of the true return post-hoc
def verifier(self):
self.recordedPrediction = np.append(self.recordedPrediction, [self.prediction])
self.predictions.append(self.prediction)
self.recordedCumulant = np.append(self.recordedCumulant, [self.cumulant])
self.recordedGammas = np.append(self.recordedGammas, [self.gammaCurrent])
if np.size(self.recordedCumulant) == self.postTimeSteps + 1:
currentPostPrediction = self.recordedPrediction[0]
returnTotal = 0
gammaTotal = 1
self.recordedGammas[0] = 1
for i in range(0,np.size(self.recordedCumulant)-1): #0 to length of your recorded cumulant
currentCumulant = self.recordedCumulant[i]
gammaTotal = gammaTotal * self.recordedGammas[i]
returnTotal = returnTotal + (gammaTotal * currentCumulant)
self.postReturn = returnTotal
self.postPrediction = currentPostPrediction
self.recordedError = np.append(self.recordedError, returnTotal - currentPostPrediction)
if np.size(self.recordedError) == self.postTimeSteps+1:
self.recordedError = np.delete(self.recordedError, 0)
self.averageError = np.sum(self.recordedError)/self.postTimeSteps
self.recordedCumulant = np.delete(self.recordedCumulant, 0)
self.recordedPrediction = np.delete(self.recordedPrediction, 0)
self.recordedGammas = np.delete(self.recordedGammas, 0)
# Calcualtes RUPEE for the GVF
def calcRupee(self):
self.hRupee = self.hRupee + (self.alphaRupee*((self.delta * self.eligTrace) - (np.dot(np.transpose(self.hRupee),self.currentState) * self.currentState)))
self.taoRupee = ((1 - self.betaNotRupee) * self.taoRupee) + self.betaNotRupee
self.betaRupee = self.betaNotRupee/self.taoRupee
self.deltaRupee = ((1-self.betaRupee)*self.deltaRupee) + (self.betaRupee * self.delta * self.eligTrace)
self.rupee = math.sqrt(abs(np.dot(np.transpose(self.hRupee), self.deltaRupee)))
self.recordedRupee = np.append(self.recordedRupee, self.rupee)
#Calculates Unexpected Demon Error for the GVF
def calcUDE(self):
self.taoUDE = ((1.0 - self.betaNotUDE) * self.taoUDE) + self.betaNotUDE
self.betaUDE = self.betaNotUDE / self.taoUDE
self.deltaUDE = ((1.0 - self.betaUDE) * self.deltaUDE) + (self.betaUDE * self.delta)
self.calcVariance()
self.ude = abs(round(self.deltaUDE,4)/(math.sqrt(round(self.varUDE,4)) + self.epsilonUDE))
#This method was taken from the Online Algorithm section of "Algorithms for calculating variance" on Wikipedia, Feb 22, 2017
#https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
#Function calculates variance in an online and incremental way
def calcVariance(self):
self.nUDE += 1
self.oldDeltaMean = self.deltaMean
self.deltaMean = (1.0 - self.betaUDE) * self.deltaMean + self.betaUDE * self.delta
self.varUDE = ((self.nUDE - 1) * self.varUDE + (self.delta - self.oldDeltaMean) * (self.delta - self.deltaMean))/self.nUDE
def pavlovianControl(self):
test = True
|
"""
Simulate N(t) = ceiling(lambda * t), with lambda normally distributed.
In "Learning to time: a perspective", Appendix, they simulate two 'walks'
with lambda = {0.8,1.2} sampled from N(1,0.2)
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def n_t(t, mean, sd):
"""
lambda admit just positive values
"""
lmbd = np.random.normal(mean, sd, 1)
vec_time = np.arange(t)
nt_iter = np.zeros(t)
while lmbd < 0:
lmbd = np.random.normal(mean, sd, 1)
for j in vec_time:
nt_iter[j] = np.ceil(lmbd*j)
return nt_iter
T = 40
time = np.arange(T) + 1
m = 1
std = 0.2
trials = 200
trial_rf = np.zeros(trials)
gs = gridspec.GridSpec(1, 4, wspace=0.06)
ax1 = plt.subplot(gs[0, 0:3])
ax2 = plt.subplot(gs[0, 3])
for jj in np.arange(trials):
Nt = n_t(T, m, std)
trial_rf[jj] = Nt[-1]
ax1.step(time, Nt, c='grey', lw=1, alpha=0.3)
ax1.scatter(time[-1], Nt[-1], c='black', s=10, alpha=0.5)
ax1.text(10, np.max(trial_rf) - 5,
r'$\lambda \sim \mathcal{{N}}(\mu = 1, \sigma = {{{}}})$'.format(std) + "\n" +
r'$T = {{{}}}$'.format(T) + ", {} trials".format(trials),
{'color': 'k', 'fontsize': 10, 'ha': 'center', 'va': 'center',
'bbox': dict(boxstyle="round", fc="w", ec="k", pad=0.2)})
ax1.set_xlabel('$t$ (time in trial)')
ax1.set_ylabel(r'$N(t) = \lceil \lambda t \rceil$')
ax2.hist(trial_rf, orientation="horizontal", histtype='step', linewidth=0.8,
facecolor='grey', edgecolor='k', fill=True, alpha=0.5)
ax2.yaxis.tick_right()
ax2.set_ylim(0, np.max(trial_rf))
ax2.yaxis.set_label_position("right")
ax2.axhline(y=np.mean(trial_rf), color='black', linestyle=":")
ax2.set_ylabel(r'$N(t = T)$')
# plt.savefig('nt_let.pdf', dpi = 120)
plt.show()
|
import tensorflow as tf
from models import blocks
from utils.logger import deep_log
class Net:
@deep_log
def __init__(self, dropout, num_output, num_conv, num_fc, cnn_config=None):
print 'Initializing a net ...'
self.dropout = dropout
self.num_output = num_output
self.num_conv = num_conv
self.num_fc = num_fc
self.cnn_config = cnn_config
self._load_config()
def _load_config(self):
if self.cnn_config is not None:
self.conv_kernel_config = self.cnn_config['conv_kernel_config']
self.pool_size = self.cnn_config['pool_size']
self.dropout_config = self.cnn_config['dropout_config']
self.fc_relu_config = self.cnn_config['fc_relu_config']
else:
self.conv_kernel_config = {
'size': 3,
'depth_init': 32
}
self.pool_size = 2
self.dropout_config = {
'keep_prob_init': 0.9,
'decay': 0.1
}
self.fc_relu_config = {
'size': 1000
}
print 'No customized CNN config is provided, so let us keep it as default!'
def _construct_cnn_layers(self, input, training):
conv_layers = [input]
for i in range(0, self.num_conv):
conv_input = conv_layers[-1]
dropout_param = self.dropout_config['keep_prob_init'] - float(i) * self.dropout_config['decay']
kernel_size = self.conv_kernel_config['size']
depth = self.conv_kernel_config['depth_init']*(1+i)
with tf.variable_scope('conv%s' % (i + 1)):
print '\n[%s th] CNN layer input shape is : %s' % (i + 1, conv_input.get_shape())
conv = blocks.conv_relu(conv_input, kernel_size=kernel_size, depth=depth)
pool = blocks.pool(conv, size=self.pool_size)
pool = tf.cond(training, lambda: tf.nn.dropout(pool, keep_prob=dropout_param if self.dropout else 1.0), lambda: pool)
conv_layers.append(pool)
return conv_layers
def _construct_fc_layers(self, flattened, training):
fc_layers = [flattened]
for j in range(0, self.num_fc):
fc_input = fc_layers[-1]
dropout_param = self.dropout_config['keep_prob_init'] - (float(self.num_conv) + float(j)) * self.dropout_config['decay']
with tf.variable_scope('fc%s' % (self.num_conv + j + 1)):
print '\n[%s th] FC layer input shape is : %s ' % (self.num_conv + j + 1, fc_input.get_shape())
fc = blocks.fc_relu(fc_input, size=self.fc_relu_config['size'])
if self.num_fc - j > 1:
fc = tf.cond(training, lambda: tf.nn.dropout(fc, keep_prob=dropout_param if self.dropout else 1.0), lambda: fc)
fc_layers.append(fc)
return fc_layers
@staticmethod
def _flattern_cnn_layers(layers):
last_layer = layers[-1]
shape = last_layer.get_shape().as_list()
flattened = tf.reshape(last_layer, [-1, shape[1] * shape[2] * shape[3]])
return flattened
def _construct_output_layer(self, fc_layers):
with tf.variable_scope('out'):
prediction = blocks.fc(fc_layers[-1], size=self.num_output)
return prediction
def define_net(self, input, training):
print 'Defining a net ...'
conv_layers = self._construct_cnn_layers(input=input, training=training)
flattened = self._flattern_cnn_layers(layers=conv_layers)
fc_layers = self._construct_fc_layers(flattened=flattened, training=training)
predictions = self._construct_output_layer(fc_layers=fc_layers)
return predictions
|
import unittest
from poker.card import Card
from poker.validators import StraightFlushValidator
class StraightFlushValidatorTest(unittest.TestCase):
def setUp(self):
self.seven_of_spades = Card(rank = "7", suit = "Spades")
self.eight_of_spades = Card(rank = "8", suit = "Spades")
self.nine_of_spades = Card(rank = "9", suit = "Spades")
self.ten_of_spades = Card(rank = "10", suit = "Spades")
self.jack_of_spades = Card(rank = "Jack", suit = "Spades")
self.queen_of_spades = Card(rank = "Queen", suit = "Spades")
self.king_of_clubs = Card(rank = "King", suit = "Clubs")
self.cards = [
self.seven_of_spades,
self.eight_of_spades,
self.nine_of_spades,
self.ten_of_spades,
self.jack_of_spades,
self.queen_of_spades,
self.king_of_clubs
]
def test_validates_that_cards_have_a_straight_flush(self):
validator = StraightFlushValidator(cards = self.cards)
self.assertEqual(
validator.is_valid(),
True
)
def test_returns_a_straight_flush_from_card_collection(self):
validator = StraightFlushValidator(cards = self.cards)
self.assertEqual(
validator.valid_cards(),
[
self.eight_of_spades,
self.nine_of_spades,
self.ten_of_spades,
self.jack_of_spades,
self.queen_of_spades,
]
)
|
from poker.validators import RankAndSuitValidator
class ThreeOfAKindValidator(RankAndSuitValidator):
def __init__(self, cards):
self.cards = cards
self.name = "Three of a Kind"
def is_valid(self):
return self._rank_count(3) == 1
def valid_cards(self):
copy = self.cards[:]
three = [card for card in self.cards \
if self._card_rank_counts[card.rank] == 3]
for card in three:
copy.remove(card)
valid_cards = copy + three
return valid_cards[-5:]
|
import socket
UDP_IP = "192.168.1.210"
UDP_PORT = 5005
MESSAGE = b'123456'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(MESSAGE, (UDP_IP, UDP_PORT))
|
import string
var1=input("Enter the string or the number(integer number) you wnat to check: ")
var1=var1.split(' ')
var1=''.join(var1)
if var1[::-1]==var1:
print("it is a palindrome")
else:
print("it's not a palindrome")
|
from django.contrib import admin
from django.urls import include, path
from django_js_reverse.views import urls_js
from pages.views import HomeView
urlpatterns = [
path('', HomeView.as_view(), name='home'),
path('admin/', admin.site.urls),
path('notes/', include('notes.urls')),
path('notebooks/', include('notebooks.urls')),
path('accounts/', include('users.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('django_registration.backends.activation.urls')),
path('jsreverse/', urls_js, name='js_reverse'),
path('martor/', include('martor.urls')),
path('tags/', include('tags.urls')),
path('search/', include('search.urls'))
]
|
# -*- coding: utf-8 -*-
# @Time : 2019-12-20
# @Author : mizxc
# @Email : xiangxianjiao@163.com
import os
from flask import current_app, request, flash, render_template, redirect, url_for
from flask_login import login_required, current_user
from . import bpAdmin
from project.common.dataPreprocess import strLength
from project.model.navigation import *
from project.common.filePreprocess import allowedImage, creatFileName, allowedFileSize, removeFile
@bpAdmin.route("/navigationColumn")
@login_required
def navigationColumn():
ns = Navigation.objects.order_by('+number')
return render_template('admin/navigationColumn.html',ns=ns)
@bpAdmin.route("/navigationColumnAdd", methods=['POST'])
@login_required
def navigationColumnAdd():
column = request.form['column']
introduction = request.form['introduction']
if not strLength(column,1,60):
flash(u'请输入60个字符内的栏目名称!')
return redirect(url_for('admin.navigationColumn'))
if introduction and not strLength(introduction,1,1000):
flash(u'请输入1000个字符内的栏目介绍!')
return redirect(url_for('admin.navigationColumn'))
n = Navigation()
n.column = column
n.number = Navigation.objects.count()+1
if introduction:n.introduction=introduction
n.save()
flash(u'栏目添加成功!')
return redirect(url_for('admin.navigationColumn'))
@bpAdmin.route("/navigationColumnEdit/<id>", methods=['GET','POST'])
@login_required
def navigationColumnEdit(id):
n = Navigation.objects(id=id).first()
if request.method == 'GET':
return render_template('admin/navigationColumnEdit.html',n=n)
if request.method == 'POST':
column = request.form['column']
introduction = request.form['introduction']
if not strLength(column,1,60):
flash(u'请输入60个字符内的栏目名称!')
return redirect(url_for('admin.navigationColumn'))
if introduction and not strLength(introduction,1,1000):
flash(u'请输入1000个字符内的栏目介绍!')
return redirect(url_for('admin.navigationColumn'))
n.column = column
if introduction:n.introduction=introduction
else:n.introduction=None
n.save()
flash(u'栏目修改成功!')
return redirect(url_for('admin.navigationColumn'))
@bpAdmin.route("/navigationColumnNumberChange/<number>/<direction>", methods=['GET'])
@login_required
def navigationColumnNumberChange(number, direction):
current = Navigation.objects(number=int(number)).first()
currentNumber = int(number)
if direction == 'up':
next = Navigation.objects(number=int(number)-1).first()
if direction == 'down':
next = Navigation.objects(number=int(number)+1).first()
nextNumber = next.number
current.number = nextNumber
current.save()
next.number = currentNumber
next.save()
return redirect(url_for('admin.navigationColumn'))
@bpAdmin.route("/navigationColumnDelete/<id>", methods=['GET'])
@login_required
def navigationColumnDelete(id):
n = Navigation.objects(id=id).first()
if len(n.webs)>0:
flash(u'栏目下有网站,不能删除,先删除网站再删除栏目!')
return redirect(url_for('admin.navigationColumn'))
n.delete()
#删除后,剩下的重新排编号
ns = Navigation.objects.order_by('+number')
for index, n in enumerate(ns):
n.number = index+1
n.save()
flash(u'栏目删除成功!')
return redirect(url_for('admin.navigationColumn'))
@bpAdmin.route("/navigationColumnManage/<id>")
@login_required
def navigationColumnManage(id):
n = Navigation.objects(id=id).first()
return render_template('admin/navigationColumnManage.html',n=n)
@bpAdmin.route("/navigationWebAdd/<id>", methods=['POST'])
@login_required
def navigationWebAdd(id):
n = Navigation.objects(id=id).first()
webName = request.form['webName']
url = request.form['url']
icon = request.files.get('img')
introduction = request.form['introduction']
if not strLength(webName,1,60):
flash(u'请输入60个字符内的网站名称!')
return redirect(url_for('admin.navigationColumnManage', id=id))
if not strLength(url,1,1000):
flash(u'请输入1000个字符内的网站url!')
return redirect(url_for('admin.navigationColumnManage', id=id))
if not url.startswith('http'):
flash(u'url开头请带上http://或者https://')
return redirect(url_for('admin.navigationColumnManage', id=id))
if introduction and not strLength(introduction,1,1000):
flash(u'请输入1000个字符内的栏目介绍!')
return redirect(url_for('admin.navigationColumnManage', id=id))
#其他字段判断完再判断图片上传
iconPath = None
if icon and allowedImage(icon.filename):
if allowedFileSize(len(icon.read()), 1):
icon.seek(0)
fileName = creatFileName(current_user.id, icon.filename)
icon.save(os.path.join(current_app.config['UPLOAD_WEBICON_PATH'], fileName))
iconPath = current_app.config['UPLOAD_PATH_WEBICON_FOR_DB'] + '/' + fileName
else:
flash(u"请上传小于1M的图片!")
return redirect(url_for('admin.navigationColumnManage', id=id))
w = Web()
w.webName = webName
w.url = url
if introduction:w.introduction=introduction
if iconPath:w.icon=iconPath
n.webs.append(w)
n.save()
flash(u'网站添加成功!')
return redirect(url_for('admin.navigationColumnManage',id=id))
@bpAdmin.route("/navigationWebEdit/<id>/<number>", methods=['GET','POST'])
@login_required
def navigationWebEdit(id,number):
number = int(number)
n = Navigation.objects(id=id).first()
w = n.webs[number]
if request.method == 'GET':
return render_template('admin/navigationWebEdit.html',n=n,w=w,number=number)
if request.method == 'POST':
webName = request.form['webName']
url = request.form['url']
icon = request.files.get('img')
introduction = request.form['introduction']
if not strLength(webName,1,60):
flash(u'请输入60个字符内的网站名称!')
return redirect(url_for('admin.navigationColumnManage', id=id))
if not strLength(url,1,1000):
flash(u'请输入1000个字符内的网站url!')
return redirect(url_for('admin.navigationColumnManage', id=id))
if not url.startswith('http'):
flash(u'url开头请带上http://或者https://')
return redirect(url_for('admin.navigationColumnManage', id=id))
if introduction and not strLength(introduction,1,1000):
flash(u'请输入1000个字符内的栏目介绍!')
return redirect(url_for('admin.navigationColumnManage', id=id))
#其他字段判断完再判断图片上传
iconPath = None
if icon and allowedImage(icon.filename):
if allowedFileSize(len(icon.read()), 1):
icon.seek(0)
fileName = creatFileName(current_user.id, icon.filename)
icon.save(os.path.join(current_app.config['UPLOAD_WEBICON_PATH'], fileName))
iconPath = current_app.config['UPLOAD_PATH_WEBICON_FOR_DB'] + '/' + fileName
if w.icon:
#删除以前的图片
removeFile(os.path.join(current_app.config['STATIC_PATH'], w.icon))
else:
flash(u"请上传小于1M的图片!")
return redirect(url_for('admin.navigationColumnManage', id=id))
w.webName = webName
w.url = url
if introduction:w.introduction=introduction
else:w.introduction=None
if iconPath:w.icon=iconPath
n.webs[number]=w
n.save()
flash(u'网站修改成功!')
return redirect(url_for('admin.navigationColumnManage',id=id))
@bpAdmin.route("/navigationWebNumberChange/<id>/<number>/<direction>", methods=['GET'])
@login_required
def navigationWebNumberChange(id,number, direction):
number = int(number)
n = Navigation.objects(id=id).first()
current = n.webs[number]
if direction == 'up':
next = n.webs[number-1]
n.webs[number] = next
n.webs[number - 1] = current
if direction == 'down':
next = n.webs[number+1]
n.webs[number] = next
n.webs[number + 1] = current
n.save()
return redirect(url_for('admin.navigationColumnManage', id=id))
@bpAdmin.route("/navigationWebDelete/<id>/<number>", methods=['GET'])
@login_required
def navigationWebDelete(id,number):
number = int(number)
n = Navigation.objects(id=id).first()
w = n.webs[number]
#删除图标,先判断是否有图标
if w.icon:
removeFile(os.path.join(current_app.config['STATIC_PATH'], w.icon))
n.webs.remove(w)
n.save()
flash(u'网站删除成功!')
return redirect(url_for('admin.navigationColumnManage', id=id))
|
A, B = input().split()
A = float(A)
B = float(B)
avg = (A + B) / 2
if avg >= 7:
print("Aprovado")
elif avg >= 4:
print("Recuperacao")
else:
print("Reprovado")
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pygame
import OpenGL.GL as gl
import numpy as np
#local imports
from common import DEFAULT_FLASH_RATE, correct_gamma
from screen import Screen
from checkerboard import CheckerBoard
class TripleCheckerBoardSinFlasher(Screen):
def setup(self,
nrows,
nrows_center = None,
check_width = None,
check_width_center = None,
screen_background_color = 'neutral-gray',
show_fixation_dot = False,
flash_rate_left = DEFAULT_FLASH_RATE,
flash_rate_right = DEFAULT_FLASH_RATE,
flash_rate_center = DEFAULT_FLASH_RATE,
#rate_compensation = None,
inv_gamma_func = None,
vsync_patch = 'bottom-right',
):
Screen.setup(self,
background_color = screen_background_color,
vsync_patch = vsync_patch,
)
# check if we are rendering center board
if flash_rate_center == None:
self.render_center = False
else:
self.render_center = True
# unless otherwise specified, center checkerboard will be same as others
if nrows_center == None:
nrows_center = nrows
if check_width_center == None:
check_width_center = check_width
# set checkerboard-related attributes
if check_width is None:
check_width = 2.0/nrows #fill whole screen
self.board_width = check_width*nrows
if self.render_center:
self.board_width_center = check_width_center * nrows_center
self.nrows = nrows
self.CB_left = CheckerBoard(nrows, check_width, show_fixation_dot = show_fixation_dot)
self.CB_right = CheckerBoard(nrows, check_width, show_fixation_dot = show_fixation_dot) #reversed pattern
if self.render_center:
self.CB_center = CheckerBoard(nrows_center, check_width_center, show_fixation_dot = False)#show_fixation_dot)
# set time-related attributes
self.overall_start_time = None
self.flash_rate_left = flash_rate_left
self.flash_rate_right = flash_rate_right
if self.render_center:
self.flash_rate_center = flash_rate_center
#self.rate_compensation = rate_compensation
self.inv_gamma_func = inv_gamma_func #for removing gamma correction
# get useful coordinate values for checkerboard rendering locations
self.xC, self.yC = (-0.5*self.board_width,-0.5*self.board_width)
self.xL, self.yL = (self.xC - 0.7*self.screen_right, self.yC)
self.xR, self.yR = (self.xC + 0.7*self.screen_right, self.yC)
# quantities for checking things
self.r1_list = []
self.t_list = []
def start_time(self,t):
# get start time and set current CB objects (and their change times)
Screen.start_time(self,t)
self._color_func_left = self._get_color_func(start_time = t,
flash_rate = self.flash_rate_left,
shape = "sin",
inv_gamma_func = self.inv_gamma_func,
)
self._color_func_right = self._get_color_func(start_time = t,
flash_rate = self.flash_rate_right,
shape = "sin",
inv_gamma_func = self.inv_gamma_func,
)
self._color_func_center = self._get_color_func(start_time = t,
flash_rate = self.flash_rate_center,
shape = "square",
inv_gamma_func = self.inv_gamma_func,
)
def render(self):
# do general OpenGL stuff as well as FixationCross and Vsync Patch if needed
Screen.render(self)
# translate to position of left board and render
gl.glLoadIdentity()
gl.glTranslatef(self.xL, self.yL, 0.0)
self.CB_left.render()
# translate to position of right board and render
gl.glLoadIdentity()
gl.glTranslatef(self.xR, self.yR, 0.0)
self.CB_right.render()
# render center board
if self.render_center:
gl.glLoadIdentity()
gl.glTranslatef(-self.board_width_center / 2.0, -self.board_width_center / 2.0, 0.0)
self.CB_center.render()
def update(self, t, dt):
self.ready_to_render = True # render on every Screen.pygame_display_loop loop
# update check colors on left checkerboard
c1, c2 = self._color_func_left(t)
self.CB_left.color1 = c1
self.CB_left.color2 = c2
# get some values for checking what was displayed
self.r1_list.append(c1[0])
self.t_list.append(t-self.t0)
# update check colors on right checkerboard
c1, c2 = self._color_func_right(t)
self.CB_right.color1 = c1
self.CB_right.color2 = c2
# update check colors on center checkerboard
if self.render_center:
c1, c2 = self._color_func_center(t)
self.CB_center.color1 = c1
self.CB_center.color2 = c2
def _get_color_func(self,
start_time,
flash_rate,
shape="sin",
inv_gamma_func = None,
):
color_func = None
# get color functions
if shape == "sin":
# Contrasts will go from 0 and 1 at flash_rate Hz,
# that is the half-cycle of full contrast change
# to which the SSVEP is sensitive.
# The intensities are inverse gamma corrected.
def color_func(t):
te = t - start_time # compute elapsed time
cos_term = np.cos(flash_rate * np.pi * te) / 2.0
c1 = (-cos_term + 0.5)
c2 = ( cos_term + 0.5)
if not inv_gamma_func is None:
c1 = float(inv_gamma_func(c1))
c2 = float(inv_gamma_func(c2))
return ((c1,c1,c1), (c2,c2,c2))
elif shape == "square":
def color_func(t):
te = t - start_time # compute elapsed time
c = -1.0 * np.cos(flash_rate * np.pi * te) / 2.0
if c > 0.0:
return ((1.0,1.0,1.0), (0.0,0.0,0.0))
else:
return ((0.0,0.0,0.0), (1.0,1.0,1.0))
else:
raise ValueError("shape = '%s' is not valid, try 'sin' or 'square'" % shape)
return color_func
def run(self, **kwargs):
# loop rate set too high so that it should run effectively as fast as python is capable of looping
Screen.run(self, display_loop_rate = 10000, **kwargs)
################################################################################
# TEST CODE
################################################################################
if __name__ == "__main__":
flash_rate_left = 15
flash_rate_right = 23
flash_rate_center = 19
nrows = 16
nrows_center = 1
duration = 10
show_plot = True
inv_gamma = 0.43
TCBF = TripleCheckerBoardSinFlasher.with_pygame_display(#VBI_sync_osx = False,
#display_mode = (512,512),
#debug = True,
)
from common import load_gamma_calibration
inv_gamma_func = load_gamma_calibration(monitor_name = "benq-gamer1", interp_kind = "cubic")
#inv_gamma_func = None
TCBF.setup(flash_rate_left = flash_rate_left,
flash_rate_right = flash_rate_right,
flash_rate_center = flash_rate_center,
check_width = 0.5 / nrows,
check_width_center = 0.5 / nrows_center,
screen_background_color = 'neutral-gray',
nrows = nrows,
nrows_center = nrows_center,
show_fixation_dot = True,
inv_gamma_func = inv_gamma_func,
)
#-------------------------------------------------------------------------------
# RECORDING CODE
# frame_rate = 140
# recording_name = "TCBFsin140FPS_512x512"
# TCBF.pygame_recording_loop(duration = 10.0,
# frame_rate = frame_rate,
# recording_name = recording_name,
# show = True,
# )
# import subprocess
# input_format = "%s/%s_%%05d.png" % (recording_name, recording_name)
# output_name = "%s.mp4" % recording_name
# subprocess.call(["ffmpeg",
# "-framerate",str(frame_rate),
# "-i", input_format,
# "-c:v", "libx264",
# "-preset", "fast", #compression rate
# #"-pix_fmt", "yuv420p",
# "-qp","0", #lossless
# #"-r", str(frame_rate),
# output_name])
#-------------------------------------------------------------------------------
# TEST CODE
TCBF.run(duration = duration)
pygame.quit()
if show_plot:
t_diffs = np.diff(np.array(TCBF.t_list))
mean_sample_freq = 1.0/t_diffs.mean()
print('Mean sample interval: ', t_diffs.mean())
print('Mean sample frequency:', mean_sample_freq)
print('Sample interval STD: ', t_diffs.std())
import matplotlib.pyplot as plt
plt.subplot(2,1,1)
plt.scatter(TCBF.t_list, TCBF.r1_list, color = 'red', label = 'Displayed')
time_vals = np.linspace(0, duration, duration * 720)
#trig_vals = [(-1.0 * np.cos(TCBF.flash_rate_left * 2.0 * np.pi * t) / 2.0 + 0.5) for t in time_vals]
if inv_gamma_func is None:
inv_gamma_func = lambda x:x
trig_vals = [inv_gamma_func(-1.0 * np.cos(TCBF.flash_rate_left * np.pi * t) / 2.0 + 0.5) for t in time_vals]
plt.plot(time_vals, trig_vals, color = 'blue', label = 'Ideal')
plt.legend()#loc = 'best')
plt.subplot(2,1,2)
fft_data = abs(np.fft.rfft(TCBF.r1_list))
fft_freqs = np.fft.rfftfreq(len(TCBF.r1_list), 1.0/mean_sample_freq)
plt.plot(fft_freqs, fft_data)
plt.scatter(fft_freqs, fft_data)
plt.show()
|
# -*- coding: utf-8 -*-
#########################################
# IPTV List Updater #
# by Nobody28 & satinfo #
#########################################
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Sources.StaticText import StaticText
from Components.Label import Label
from Components.ScrollLabel import ScrollLabel
import Components.config
from Components.config import config
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from Components.Language import language
from os import path, walk
from os import environ as os_environ
from enigma import eEnv
from locale import _
from skin import *
import os
class Credits(Screen):
def __init__(self, session, args = None):
self.session = session
path = "/usr/lib/enigma2/python/Plugins/Extensions/IPTV-List-Updater/skins/original/Credits.xml"
print path
with open(path, "r") as f:
self.skin = f.read()
f.close()
Screen.__init__(self, session)
self["title"] = Label(_("Special thanks to following Users:"))
self["text"] = ScrollLabel()
self["actions"] = NumberActionMap(["WizardActions", "OkCancelActions", "ColorActions"],
{
"back": self.close,
"red": self.close,
"up": self["text"].pageUp,
"down": self["text"].pageDown,
}, -1)
self["key_red"] = Label(_("Close"))
change = self.Ausgabe()
self["text"].setText(change)
def Ausgabe(self):
self.file = open("/usr/lib/enigma2/python/Plugins/Extensions/IPTV-List-Updater/credits/credits.txt","r")
self.text = self.file.read()
self.file.close()
return self.text
|
#### Class 04
#### Using Selenium: An Example
from selenium import webdriver
from bs4 import BeautifulSoup as bs
from selenium.webdriver.common.keys import Keys
import time
def start_chrome(webpage):
driver = webdriver.Chrome()
driver.get(webpage)
return driver
def define_search(driver):
## search element changed id randomly, class was the only other info
## but 2 elements in this class... we want the 2nd.
search_elem = driver.find_elements_by_class_name('searchmenu_open')
search_elem[1].click()
time.sleep(2)
keyword_elem = driver.find_element_by_name("query")
time.sleep(5)
keyword_elem.send_keys("Shirley Clark")
time.sleep(2)
button_elem = driver.find_element_by_xpath("//input[@value='Search']")
button_elem.click()
return driver
def get_headlines(driver):
html_source = driver.page_source
soup = bs(html_source, "lxml")
gma_tags = soup.find_all("span", {"class" : "headline a"})
gma_headlines = [tag.get_text() for tag in gma_tags]
with open("headlines.txt", "w") as f:
f.writelines("\n".join(gma_headlines))
driver.quit()
def main(webpage):
driver = start_chrome(webpage)
time.sleep(2)
driver = define_search(driver)
time.sleep(2)
get_headlines(driver)
main("http://www.spencerdailyreporter.com/")
|
def driver(input):
helper(input, 0, [])
def helper(input, idx, soFar):
if (idx == len(input)):
print(soFar)
else:
helper(input, idx+1, soFar)
soFar.append(input[idx])
helper(input, idx+1, soFar)
soFar.pop(-1)
driver([1, 3, 5, 7])
|
from redbot.core import commands, checks
from redbot.core.utils.chat_formatting import text_to_file
import random
from .words import words, words2, flags
from .country import country
import re
import subprocess
import discord
class Utilities(commands.Cog):
def __init__(self, bot):
self.bot = bot
# self.bot.remove_command("info")
@commands.command()
async def whoami(self, ctx):
word = random.choice(words)
word2 = random.choice(words2)
await ctx.send(f"{word} {word2}")
@commands.command()
async def flag(self, ctx, *, flag):
orig = ctx.guild.get_member(ctx.author.id).nick
if orig is None:
orig = ctx.guild.get_member(ctx.author.id).name
if len(flag) < 3:
if flag == "uk":
comp = flags.get("gb")
else:
comp = flags.get(flag.lower())
else:
comp2 = country.get(flag.lower())
comp = flags.get(comp2)
if not comp:
await ctx.send("No such flag buddy.")
return
def deEmojify(text):
regrex_pattern = re.compile(
pattern="["
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
# newnick = deEmojify(orig)
try:
await ctx.guild.get_member(ctx.author.id).edit(nick=f"{comp} {orig.strip()}")
except (discord.errors.Forbidden, discord.errors.HTTPException):
await ctx.send("Missing permissions or nickname too large (32 chars max)")
return
await ctx.send(f"Added {comp} to {orig.strip()}'s nickname. To remove it use delflag command.")
@commands.command()
async def delflag(self, ctx):
orig = ctx.guild.get_member(ctx.author.id).nick
if orig is None:
orig = ctx.guild.get_member(ctx.author.id).name
def deEmojify(text):
regrex_pattern = re.compile(
pattern="["
# u"\U0001F600-\U0001F64F" # emoticons
# u"\U0001F300-\U0001F5FF" # symbols & pictographs
# u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
try:
await ctx.guild.get_member(ctx.author.id).edit(nick=f"{deEmojify(orig)}")
except (discord.errors.Forbidden, discord.errors.HTTPException):
await ctx.send("Missing permissions.")
return
await ctx.send("Done")
@commands.command()
@checks.is_owner()
async def console(self, ctx, *, cmd):
# The recommended way in Python 3.5 and above is to use subprocess.
output = subprocess.run(cmd, stdout=subprocess.PIPE, text=True, shell=True, stderr=subprocess.STDOUT)
#subprocess = subprocess.Popen(shell = True, stdout = subprocess.PIPE)
#output = subprocess.stdout.read()
response = output.stdout
if len(response) > 2000:
try:
file = text_to_file(response, "console.txt")
await ctx.send(file=file)
except discord.errors.HTTPException:
await ctx.send("File too large.")
return
else:
await ctx.send("```\n" + response + "\n```")
@commands.command(aliases=["emoji"])
async def emote(self, ctx, emoji: discord.PartialEmoji):
await ctx.send(emoji.url)
@commands.command()
async def avatar(self, ctx, member: discord.Member = None):
if member is None:
member = ctx.author
await ctx.send(member.avatar_url)
@commands.command()
@checks.is_owner()
async def say(self, ctx, *, stuff):
async for log in ctx.channel.history(limit=1):
if log.author == ctx.author:
await log.delete()
await ctx.send(stuff)
# @commands.command()
# async def info(self, ctx):
# desc = (
# "Multipurpose bot hosted by mucski, created by Twentysix\n"
# "For support you can contact my owner with the contact command\n"
# "Or join my support discord server:"
# )
# e = discord.Embed(title=f"{self.bot.user.display_name}'s info", color=await self.bot.get_embed_color(ctx), description=desc)
# await ctx.send(embed=e)
# await ctx.send("https://discord.gg/Juwfhp8nnc")
#
# def cog_unload(self):
# self.bot.add_command("info")
|
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sumNumbers(self, root):
"""
:type root: TreeNode
:rtype: int
"""
all_path = []
n_sum = 0
path_sum = self.root_to_leaf(root, n_sum, all_path.append)
print(f"all_paths:", all_path, n_sum)
print(sum(all_path))
return 1026
def root_to_leaf(self, node, curr_sum, all_path, path=None):
"""
Helper function to traverse the tree and calculate the path
sum
"""
if path is None:
path = []
if node is None:
# sum of all
print(f"when it hit the none: allpath: {(all_path)}, sum: {curr_sum}, path: {path}")
return all_path
path.append(node.val)
# go the left
if node.left is None and node.right is None:
# calculate the sum
path_val = int("".join(str(num) for num in path))
all_path(path_val)
print('curr path:', path_val)
curr_sum += path_val
print("curr_sum:", curr_sum)
self.root_to_leaf(node.left, curr_sum, all_path, path)
self.root_to_leaf(node.right, curr_sum, all_path, path)
path.pop()
if __name__ == "__main__":
root = TreeNode(4)
root.left = TreeNode(9)
root.right = TreeNode(0)
root.left.left = TreeNode(5)
root.left.right = TreeNode(1)
tree = Solution()
result = tree.sumNumbers(root)
print(result)
|
from django import forms
class loginform(forms.Form):
username=forms.CharField(label="Username",max_length=50,required=True)
password = forms.CharField(label="Password",widget=forms.PasswordInput,required=True)
|
import pandas as pd
import numpy as np
from osmread import parse_file, Node
import matplotlib.pyplot as plt
from tqdm import tqdm
import csv
import os
housing_df = pd.read_csv('./data/out/datall.csv')
def decode_node_to_csv():
# Dictionary with geo-locations of each address to use as strings
for entry in parse_file('./data/denmark-latest.osm'):
if (isinstance(entry, Node) and
'addr:street' in entry.tags and
'addr:postcode' in entry.tags and
'addr:housenumber' in entry.tags):
yield entry
def add_geolocations(decoded_node):
progress_bar = tqdm()
for file in os.listdir('./data/'):
for idx, decoded_node in enumerate(decode_node_to_csv()):
try:
full_address = decoded_node.tags['addr:street'] + " " + decoded_node.tags['addr:housenumber'] + " " + decoded_node.tags['addr:postcode'] + " " + decoded_node.tags['addr:city']
addr_with_geo = (full_address,decoded_node.lon,decoded_node.lat)
with open('decoded_nodes.csv', 'a', encoding='utf-8') as f:
output_writer = csv.writer(f)
output_writer.writerow(addr_with_geo)
progress_bar.update()
except (KeyError, ValueError):
pass
# Convert all sales dates in the dataset into proper datetime objects
def sales_dates_to_datetime():
# Pandas.to_datetime(arg)
df = pd.DataFrame['sale_date_str'] = pd.to_datetime(pd.DataFrame['sale_date_str'])
df.to_csv('datetime.csv')
def scatter_plot_from_dataframe(dataframe):
plot = dataframe.plot(kind='scatter', x='lon', y='lat')
plot.get_figure().savefig('scatterplot1.png')
def generate_scatter_plot(datetime_dataframe):
scatter_plot_from_dataframe(datetime_dataframe)
def run():
# add_geolocations(decode_node_to_csv())
# Write DataFrame to csv
# to_csv(path)
datetime_dataframe = pd.read_csv('decoded_nodes.csv')
generate_scatter_plot(datetime_dataframe)
run()
|
A = [0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0]
# N = 0
# 0, 1, 0, 1, ...
# 0, 0, 0, ..., 1 - only one jump
# 1, 1, 1, 1 - with best score == 1
# check empty
# 0, 0, 0, 0, ...
def solution(A):
A.append(1) # add value for last point - opposite riverbank
fib_numbers = get_fib_numbers_up_to(26) # there are 26 fib numbers less than 100 000
need_jumps_list = [-1] * len(A)
# mark those leaves that can be reached with one jump
for fib_num in fib_numbers:
if fib_num > len(A):
break
if A[fib_num - 1] == 1:
need_jumps_list[fib_num - 1] = 1
# go through each leaf
for i in range(len(A)):
# skip non-leaves and those that can be reached with one jump
if A[i] == 0 or need_jumps_list[i] == 1:
continue
can_be_reached = False
best_needed_jumps = 100001
# go through each possible possible jump
for jump in fib_numbers:
# get index of previous leaf for given jump
prev_leaf_i = i - jump
# check if previous leaf for such jump can exist
if prev_leaf_i < 0:
break
# check if this leaf can be reached somehow
if A[prev_leaf_i] == 0 or need_jumps_list[prev_leaf_i] < 0:
continue # skip this jump if it cannot be performed
# if jump will produce better score than current, then remember it
if best_needed_jumps > need_jumps_list[prev_leaf_i] + 1:
best_needed_jumps = need_jumps_list[prev_leaf_i] + 1
can_be_reached = True
if can_be_reached:
need_jumps_list[i] = best_needed_jumps
return need_jumps_list[len(A) - 1]
def get_fib_numbers_up_to(n):
fib_nums = [0] * n
fib_nums[0] = 0
fib_nums[1] = 1
for i in range(2, n):
fib_nums[i] = fib_nums[i - 1] + fib_nums[i - 2]
fib_nums.pop(0) # we can remove 0, since it will not produce any jumps
fib_nums.pop(0) # we can remove one 1, since it is duplicated
return fib_nums
# assert (solution(A) == 3)
assert (solution([0, 0, 0, 0]) == 1)
# assert (solution([0, 0, 0]) == -1)
assert (solution([]) == 1)
|
from django.test import TestCase
from django.test.utils import override_settings
import mock
from jenkinsapi import jenkins
from jenkins.tasks import build_job, push_job_to_jenkins, import_build
from .factories import (
JobFactory, JenkinsServerFactory, JobTypeFactory)
class BuildJobTaskTest(TestCase):
def setUp(self):
self.server = JenkinsServerFactory.create()
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_build_job(self):
"""
The build_job task should find the associated server, and request that
the job be built.
"""
job = JobFactory.create(server=self.server)
with mock.patch(
"jenkins.models.Jenkins",
spec=jenkins.Jenkins) as mock_jenkins:
build_job(job.pk)
mock_jenkins.assert_called_with(
self.server.url, username=u"root", password=u"testing")
mock_jenkins.return_value.build_job.assert_called_with(
job.name, params={})
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_build_job_with_build_id(self):
"""
If we provide a build_id, this should be sent as parameter.
"""
job = JobFactory.create(server=self.server)
with mock.patch(
"jenkins.models.Jenkins",
spec=jenkins.Jenkins) as mock_jenkins:
build_job(job.pk, "20140312.1")
mock_jenkins.assert_called_with(
self.server.url, username=u"root", password=u"testing")
mock_jenkins.return_value.build_job.assert_called_with(
job.name, params={"BUILD_ID": "20140312.1"})
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_build_job_with_params(self):
"""
If we provide parameters, then they should be passed with the job build
request.
"""
job = JobFactory.create(server=self.server)
with mock.patch("jenkins.models.Jenkins", spec=jenkins.Jenkins) as mock_jenkins:
build_job(job.pk, params={"MYTEST": "500"})
mock_jenkins.assert_called_with(
self.server.url, username=u"root", password=u"testing")
mock_jenkins.return_value.build_job.assert_called_with(
job.name, params={"MYTEST": "500"})
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_build_job_with_params_and_build_id(self):
"""
If we provide parameters and a build_id, we should get both in the
parameters.
"""
job = JobFactory.create(server=self.server)
with mock.patch("jenkins.models.Jenkins", spec=jenkins.Jenkins) as mock_jenkins:
build_job(job.pk, "20140312.1", params={"MYTEST": "500"})
mock_jenkins.assert_called_with(
self.server.url, username=u"root", password=u"testing")
mock_jenkins.return_value.build_job.assert_called_with(
job.name, params={"MYTEST": "500", "BUILD_ID": "20140312.1"})
class ImportBuildTaskTest(TestCase):
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_import_build(self):
"""
import_build should pull the details for the build and create artifacts
for them.
"""
job = JobFactory.create()
with mock.patch("jenkins.tasks.import_build_for_job") as task_mock:
import_build.delay(job.pk, 5)
task_mock.assert_called_once_with(job.pk, 5)
job_xml = """
<?xml version='1.0' encoding='UTF-8'?>
<project>{{ notifications_url }}</project>
"""
class CreateJobTaskTest(TestCase):
@override_settings(
CELERY_ALWAYS_EAGER=True, NOTIFICATION_HOST="http://example.com")
def test_push_job_to_jenkins(self):
"""
The push_job_to_jenkins task should find the associated server, and
create the job with the right name and content.
"""
jobtype = JobTypeFactory.create(config_xml=job_xml)
job = JobFactory.create(jobtype=jobtype, name="testing")
with mock.patch(
"jenkins.models.Jenkins",
spec=jenkins.Jenkins) as mock_jenkins:
mock_jenkins.return_value.has_job.return_value = False
push_job_to_jenkins(job.pk)
mock_jenkins.assert_called_with(
job.server.url, username=u"root", password=u"testing")
mock_jenkins.return_value.has_job.assert_called_with("testing")
mock_jenkins.return_value.create_job.assert_called_with(
"testing",
job_xml.replace(
"{{ notifications_url }}",
"http://example.com/jenkins/notifications/").strip())
@override_settings(
CELERY_ALWAYS_EAGER=True, NOTIFICATION_HOST="http://example.com")
def test_push_job_to_jenkins_with_already_existing_job(self):
"""
If the jobname specified already exists in Jenkins, then we can assume
we're updating the Job's config.xml.
"""
jobtype = JobTypeFactory.create(config_xml=job_xml)
job = JobFactory.create(jobtype=jobtype, name="testing")
mock_apijob = mock.Mock()
with mock.patch(
"jenkins.models.Jenkins",
spec=jenkins.Jenkins) as mock_jenkins:
mock_jenkins.return_value.has_job.return_value = True
mock_jenkins.return_value.get_job.return_value = mock_apijob
push_job_to_jenkins(job.pk)
mock_jenkins.assert_called_with(
job.server.url, username=u"root", password=u"testing")
mock_jenkins.return_value.has_job.assert_called_with("testing")
mock_apijob.update_config.assert_called_with(
job_xml.replace(
"{{ notifications_url }}",
"http://example.com/jenkins/notifications/").strip())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-10 10:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myblog', '0002_blogspost_summary'),
]
operations = [
migrations.AlterField(
model_name='blogspost',
name='summary',
field=models.CharField(default='default', max_length=50),
),
]
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
dataset1=pd.read_csv('tamil_movie_reviews_train.csv')
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
from lxml import etree
class OpenStreetMap :
def __init__ (self) :
self.bounds = None
self.nodes = []
self.ways = []
self.relations = []
def fromXML (self, xml) :
root = etree.fromstring(xml)
self.bounds = OSMUtils.domToBounds(root)
self.nodes = OSMUtils.domToNodes(root)
self.ways = OSMUtils.domToWays(root)
self.relations = OSMUtils.domToRelations(root)
class OSMUtils :
@staticmethod
def domToBounds (root) :
bounds_dom = root.xpath('/osm/bounds')[0]
a = bounds_dom.attrib
return OSMBounds(
OSMLocation(float(a['minlat']), float(a['minlon'])),
OSMLocation(float(a['maxlat']), float(a['maxlon']))
)
@staticmethod
def domToNodes (root) :
nodes = []
for node_dom in root.xpath('/osm/node') :
node = OSMNode()
node.fromXMLNode(node_dom)
nodes.append(node)
return nodes
@staticmethod
def domToWays (root) :
ways = []
for way_dom in root.xpath('/osm/way') :
way = OSMWay()
way.fromXMLNode(way_dom)
ways.append(way)
return ways
@staticmethod
def domToRelations (dom) :
relations = []
for relation_dom in dom.xpath('./relation') :
relation = OSMRelation()
relation.fromXMLNode(relation_dom)
relations.append(relation)
return relations
@staticmethod
def domToMembers (dom) :
members = []
for member_dom in dom.xpath('./member') :
member = OSMMember()
member.fromXMLNode(member_dom)
members.append(member)
return members
@staticmethod
def domToTags (dom) :
tags = {}
for tag_dom in dom.xpath('./tag') :
key = tag_dom.attrib.get('k', '')
value = tag_dom.attrib.get('v', '')
if key != '' and value != '' :
tags[key] = value
return tags
@staticmethod
def domToNds (dom) :
return [nd.attrib['ref'] for nd in dom.xpath('./nd') if nd.attrib.has_key('ref')]
class OSMBounds :
def __init__ (self, min_location, max_location) :
if min_location.lat > max_location.lat or min_location.lon > max_location.lon :
self.min_location = None
self.max_location = None
else :
self.min_location = min_location
self.max_location = max_location
def __str__ (self) :
return 'OSMBounds(min={0}, max={1})'.format(self.min_location, self.max_location)
class OSMLocation :
def __init__ (self, lat, lon) :
self.lat = lat
self.lon = lon
def __str__ (self) :
return 'OSMLocation(lat={0}, lon={0})'.format(self.lat, self.lon)
class OSMNode :
def __init__ (self) :
self.id = 0
self.visible = False
self.version = 0
self.changeset = 0
self.timestamp = '1970-01-01T00:00:00Z' # datetime.min
self.user = ''
self.uid = 0
self.lat = 0
self.lon = 0
self.tags = {}
def fromXMLNode (self, node) :
a = node.attrib
self.id = long(a.get('id', '0'))
self.visible = bool(a.get('visible', 'False'))
self.version = float(a.get('version', '0'))
self.changeset = long(a.get('changeset', '0'))
self.timestamp = a.get('timestamp', '1970-01-01T00:00:00Z')
self.user = a.get('user', '')
self.uid = long(a.get('uid', '0'))
self.lat = float(a.get('lat', '0'))
self.lon = float(a.get('lon', '0'))
self.tags = OSMUtils.domToTags(node)
def __str__ (self) :
return 'OSMNode(lat={0}, lon={1}, visible={2})'.format(self.lat, self.lon, self.visible)
class OSMWay :
def __init__ (self) :
self.id = 0
self.visible = False
self.version = 0
self.changeset = 0
self.timestamp = '1970-01-01T00:00:00Z' # datetime.min
self.user = ''
self.uid = 0
self.tags = {}
self.nds = {}
def fromXMLNode (self, node) :
a = node.attrib
self.id = long(a.get('id', '0'))
self.visible = bool(a.get('visible', 'False'))
self.version = float(a.get('version', '0'))
self.changeset = long(a.get('changeset', '0'))
self.timestamp = a.get('timestamp', '1970-01-01T00:00:00Z')
self.user = a.get('user', '')
self.uid = long(a.get('uid', '0'))
self.tags = OSMUtils.domToTags(node)
self.nds = OSMUtils.domToNds(node)
# TODO : implements bounding box
def __str__ (self) :
return 'OSMWay(id={0}, len(nds)={1})'.format(self.id, len(self.nds))
class OSMRelation :
def __init__ (self) :
self.id = 0
self.visible = False
self.version = 0
self.changeset = 0
self.timestamp = '1970-01-01T00:00:00Z' # datetime.min
self.user = ''
self.uid = 0
self.members = []
self.tags = {}
def fromXMLNode (self, node) :
a = node.attrib
self.id = long(a.get('id', '0'))
self.visible = bool(a.get('visible', 'False'))
self.version = float(a.get('version', '0'))
self.changeset = long(a.get('changeset', '0'))
self.timestamp = a.get('timestamp', '1970-01-01T00:00:00Z')
self.user = a.get('user', '')
self.uid = long(a.get('uid', '0'))
self.members = OSMUtils.domToMembers(node)
self.tags = OSMUtils.domToTags(node)
def __str__ (self) :
return 'OSMRelation(id={0}, len(members)={1})'.format(self.id, len(self.members))
class OSMMember :
def __init__ (self) :
self.type = ''
self.ref = -1
self.role = ''
def fromXMLNode (self, node) :
a = node.attrib
self.type = a.get('type', '')
self.ref = long(a.get('ref', '-1'))
self.role = a.get('role', '')
def __str__ (self) :
return 'OSMMember(type={0}, ref={1}, role={2})'.format(self.type, self.ref, self.role)
if __name__ == '__main__' :
# read xml
with open('../data/map.osm.xml') as f :
xml = f.read()
osm = OpenStreetMap()
osm.fromXML(xml)
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load compute forwarding rules into Inventory.
This pipeline depends on the LoadProjectsPipeline.
"""
from google.cloud.security.common.data_access import project_dao as proj_dao
from google.cloud.security.common.util import log_util
from google.cloud.security.common.util import parser
from google.cloud.security.inventory.pipelines import base_pipeline
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc,missing-return-type-doc
# pylint: disable=missing-yield-type-doc
LOGGER = log_util.get_logger(__name__)
class LoadForwardingRulesPipeline(base_pipeline.BasePipeline):
"""Load compute forwarding rules for all projects."""
RESOURCE_NAME = 'forwarding_rules'
def _transform(self, resource_from_api):
"""Create an iterator of forwarding rules to load into database.
TODO: truncate the region and target.
Args:
resource_from_api: A dict of forwarding rules, keyed by
project id, from GCP API.
Yields:
Iterator of forwarding rule properties in a dict.
"""
for (project_id, forwarding_rules) in resource_from_api.iteritems():
for rule in forwarding_rules:
yield {'project_id': project_id,
'id': rule.get('id'),
'creation_timestamp': parser.format_timestamp(
rule.get('creationTimestamp'),
self.MYSQL_DATETIME_FORMAT),
'name': rule.get('name'),
'description': rule.get('description'),
'region': rule.get('region'),
'ip_address': rule.get('IPAddress'),
'ip_protocol': rule.get('IPProtocol'),
'port_range': rule.get('portRange'),
'ports': parser.json_stringify(rule.get('ports', [])),
'target': rule.get('target'),
'load_balancing_scheme': rule.get('loadBalancingScheme'),
'subnetwork': rule.get('subnetwork'),
'network': rule.get('network'),
'backend_service': rule.get('backendService'),
'raw_forwarding_rule': parser.json_stringify(rule)}
def _retrieve(self):
"""Retrieve forwarding rules from GCP.
Get all the projects in the current snapshot and retrieve the
compute forwarding rules for each.
Returns:
A dict mapping projects with their forwarding rules (list):
{project_id: [forwarding_rules]}
"""
projects = (proj_dao
.ProjectDao(self.global_configs)
.get_projects(self.cycle_timestamp))
forwarding_rules = {}
for project in projects:
project_fwd_rules = self.safe_api_call('get_forwarding_rules',
project.id)
if project_fwd_rules:
forwarding_rules[project.id] = project_fwd_rules
return forwarding_rules
def run(self):
"""Run the pipeline."""
forwarding_rules = self._retrieve()
loadable_rules = self._transform(forwarding_rules)
self._load(self.RESOURCE_NAME, loadable_rules)
self._get_loaded_count()
|
def typeList(list1, list2):
checkCount = 0
if len(list1) == len(list2):
for i in range(0, len(list1)):
for j in range (i, i + 1):
if list1[i] != list2[j]:
print "The lists are not the same."
checkCount += 1
if checkCount == 0:
print "The lists are the same."
else:
print "The lists are not the same."
typeList(["hi", 2, 3, 4], ["bye", 2, 3, 4])
|
'''
print("====This is a intersting words game====")
temp = input("不妨猜一下我现在想的数字是:")
guess = int(temp)
if guess == 8:
print("哈哈,你真厉害哦!")
print("不过,猜中也没啥卵用,没有任何奖励的。")
else:
print("猜错啦哦,我想的是8,不信你去试试看哦")
print("游戏技结束,不玩了。")
import random
secret = random.randint(1,10)
print("==== This is the second words game ==== ")
temp = input("不妨猜一下我现在想的数字是:")
guess = int(temp)
while guess != secret:
temp = input("猜错啦,请重新输入吧:")
guess = int(temp)
if guess == secret:
print("哈哈,你真厉害哦")
print("不过,没有什么用哦")
else:
if guess > 8:
print("哥们,大了,大了")
else:
print("哈哈,小辣小辣")
print("游戏技结束,不玩了。")
input("请重新输入一个数来玩玩吧:\n")
'''
import random
random_number = random.randint(1, 10)
# print(random_number)
print("===== This is third words game =====")
temp = input("可否猜一下我现在想的数字是几呢?\n(小提示,在1到10之间哦!)\n")
guess_number = int(temp)
guess_count = 0
while guess_count < 2 :
if guess_number < random_number:
print("哥们,猜错啦,你猜小了哦!")
guess_number = int(input("请重新输入一个数来玩玩吧:\n"))
if guess_number > random_number:
print("哥们,错了哦,太大啦,请再输入一个数来玩吧")
guess_number = int(input("请重新输入一个数来玩玩吧:\n"))
if guess_number == random_number:
print("哇哦,厉害哦,恭喜你猜到了!\n哈哈哈,但是没有奖励哦")
break
guess_count += 1
else:
print("你已经试了很多次了!请重新开始吧")
print("Game over!")
|
# URI widget
import re
import zeam.form.ztk.compat
from zeam.form.base.markers import Marker, NO_VALUE
from zeam.form.base.widgets import FieldWidget
from zeam.form.ztk.fields import Field, registerSchemaField
from grokcore import component as grok
from zope.i18nmessageid import MessageFactory
from zope.interface import Interface
from zope.schema import interfaces as schema_interfaces
_ = MessageFactory("zeam.form.base")
isURI = re.compile(
# scheme
r"[a-zA-z0-9+.-]+:"
# non space (should be pickier)
r"\S*$").match
class URIField(Field):
"""A text line field.
"""
target = '_self'
def __init__(self, title,
minLength=0,
maxLength=None,
**options):
super(URIField, self).__init__(title, **options)
self.minLength = minLength
self.maxLength = maxLength
def isEmpty(self, value):
return value is NO_VALUE or not len(value)
def validate(self, value, form):
error = super(URIField, self).validate(value, form)
if error is not None:
return error
if not isinstance(value, Marker) and len(value):
assert isinstance(value, zeam.form.ztk.compat.string_types)
if not isURI(value):
return _(u"The URI is malformed.")
if self.minLength and len(value) < self.minLength:
return _(u"The URI is too short.")
if self.maxLength and len(value) > self.maxLength:
return _(u"The URI is too long.")
return None
# BBB
URISchemaField = URIField
class URIWidget(FieldWidget):
grok.adapts(URIField, Interface, Interface)
defaultHtmlClass = ['field', 'field-uri']
defaultHtmlAttributes = set(['readonly', 'required', 'autocomplete',
'maxlength', 'pattern', 'placeholder',
'size', 'style'])
class URIDisplayWidget(FieldWidget):
grok.adapts(URIField, Interface, Interface)
grok.name('display')
@property
def target(self):
return self.component.target
def URISchemaFactory(schema):
field = URIField(
schema.title or None,
identifier=schema.__name__,
description=schema.description,
required=schema.required,
readonly=schema.readonly,
minLength=schema.min_length,
maxLength=schema.max_length,
interface=schema.interface,
constrainValue=schema.constraint,
defaultFactory=schema.defaultFactory,
defaultValue=schema.__dict__['default'] or NO_VALUE)
return field
def register():
registerSchemaField(URISchemaFactory, schema_interfaces.IURI)
|
import scrapy.cmdline
scrapy.cmdline.execute(['scrapy','crawl','mybaike'])
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2021/4/8 19:07
# @Author :'liuyu'
# @Version:V 0.1
# @File :
# @desc :
from transformers import XLNetTokenizer, TFXLNetModel
import os
import tensorflow as tf
from tensorflow.python.keras.backend import set_session
from queue import Queue
from threading import Thread
import json
class Xlnet:
def __int__(self,index = None):
if index is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(index)
config = tf.compat.v1.ConfigProto(log_device_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = 1
config.gpu_options.allow_growth = True
config.log_device_placement = False
config.gpu_options.allocator_type = 'BFC' # 将内存分块管理,按块进行空间分配和释放
# config.allow_soft_placement=True
set_session(tf.compat.v1.Session(config=config))
self.model = TFXLNetModel.from_pretrained('xlnet-large-cased')
self.tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased', do_lower_case=True)
self.input_queue = Queue(maxsize=1)
self.output_queue = Queue(maxsize=1)
self.predict_thread = Thread(target=self.predict_from_queue, daemon=True)
self.predict_thread.start()
def encode(self, sentences):
self.input_queue.put(sentences)
output = self.output_queue.get()
return output
def predict_from_queue(self):
while True:
sentences = self.input_queue.get()
encoded_input = self.tokenizer(sentences, return_tensors='tf', padding=True)
outputs = self.model(encoded_input)
last_hidden_states = outputs.last_hidden_state
pooled_sentence = [tf.reduce_mean(vector, 0).numpy().tolist() for vector in last_hidden_states]
self.output_queue.put(pooled_sentence)
|
import sys
import socket
import threading
import time
import bcrypt
import json
from PyQt5 import QtCore, QtGui, uic, QtWidgets
from base64 import b64decode, b64encode
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
running = True
testing_client = True
""" #==============================================================================
Client Information
""" #==============================================================================
class ClientInfo:
def __init__(self):
self.server_socket = None
self.connected = False
self.running = True
self.current_main_thread = None
self.current_receive_thread = None
self.encryption_key = b"ATERRIBLEKEYYYYY"
clientInfo = ClientInfo()
clientInfoLock = threading.Lock()
""" #==============================================================================
Window Creation
""" #==============================================================================
# Get UI file and load as window.
qtCreatorFile = "window_design.ui"
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
# PyQT application.
class QtWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
# Send startup message
self.textDisplay.append("Window initialised.")
# Hide the login panel
self.hide_login_panel()
# Hide the map
self.hide_map()
self.map_hidden = True
# buttons Onclick
self.InputButton.clicked.connect(lambda: self.text_input())
self.LoginButton.clicked.connect(lambda: self.attempt_login())
self.NewAccountButton.clicked.connect(lambda: self.attempt_account_creation())
# When enter is pressed in input box.
self.UserInputBox.returnPressed.connect(lambda: self.text_input())
def text_input(self):
self.new_nput = self.UserInputBox.text()
print("User input submitted")
self.text_to_display(self.new_nput)
self.UserInputBox.setText("")
# Send to the server!!!say
send_data(self.new_nput)
def text_to_display(self, text):
self.textDisplay.append(text)
self.textDisplay.moveCursor(QtGui.QTextCursor.End)
''' #==============================================================================
Logging In
''' #==============================================================================
def attempt_account_creation(self):
# Username and password must be at least 5 characters long
if len(self.UsernameInput.text()) > 4 and \
len(self.PasswordInput.text()) > 4:
username = self.UsernameInput.text()
password = self.PasswordInput.text()
salt = bcrypt.gensalt(12)
password = password.encode('utf-8')
password = bcrypt.hashpw(password, salt)
password = password.decode()
salt = salt.decode()
send_data("CreateAccount#" + username + "#" + password + "#" + salt)
self.UsernameInput.clear()
self.PasswordInput.clear()
else:
self.text_to_display("ERROR! - Credentials must be longer than 4 characters")
def attempt_login(self):
if len(self.UsernameInput.text()) > 4 and \
len(self.PasswordInput.text()) > 4:
username = self.UsernameInput.text()
password = self.PasswordInput.text()
# Send Username and password across for checking
send_data("VerifySalt#" + username + "#" + password)
else:
self.text_to_display("ERROR! - Credentials must be longer than 4 characters")
def send_salted_password(self, received_salt):
username = self.UsernameInput.text()
password = self.PasswordInput.text().encode('utf-8')
salt = received_salt.encode('utf-8')
password = bcrypt.hashpw(password, salt)
password = password.decode()
# Send Username and password across for checking
send_data("CheckLogin#" + username + "#" + password)
def display_login_panel(self):
self.loginPanel.show()
def hide_login_panel(self):
self.loginPanel.hide()
def hide_map(self):
self.mapPanel.hide()
def open_close_map(self):
if not self.map_hidden:
self.mapPanel.hide()
self.map_hidden = True
elif self.map_hidden:
self.mapPanel.show()
self.map_hidden = False
''' #==============================================================================
Cryptography
''' #==============================================================================
def encrypt_info(data):
print("Encrypting DATA")
data2 = data.encode()
key = clientInfo.encryption_key
cipher = AES.new(key, AES.MODE_CBC)
ct_bytes = cipher.encrypt(pad(data2, AES.block_size))
iv = b64encode(cipher.iv).decode('utf-8')
ct = b64encode(ct_bytes).decode('utf-8')
result = json.dumps({'iv':iv, 'ciphertext':ct})
return result
def decrypt_info(data, key):
b64 = json.loads(data)
iv = b64decode(b64['iv'])
ct = b64decode(b64['ciphertext'])
cipher = AES.new(key, AES.MODE_CBC, iv)
result = unpad(cipher.decrypt(ct), AES.block_size)
return result.decode('utf-8')
def send_data(new_input):
if clientInfo.connected:
new_input = encrypt_info(new_input)
clientInfo.server_socket.send(new_input.encode())
else:
window.text_to_display("ERROR! - Client is not connected to a server")
''' #==============================================================================
Receive Thread
''' #==============================================================================
def receive_thread(clientInfo):
print("receive_thread running")
while clientInfo.connected is True:
try:
data_recv = clientInfo.server_socket.recv(4)
payload_size = int.from_bytes(data_recv, byteorder='big')
payload_data = clientInfo.server_socket.recv(payload_size)
payload_data = decrypt_info(payload_data, clientInfo.encryption_key)
data = json.loads(payload_data)
print("Time received:" + data['Time'] + "\nMessage:[" + data['Message'] + "]")
# Decrypts the data and checks the result
parse_incoming_data(data['Message'])
except socket.error:
print("Server lost")
window.text_to_display("Server lost")
clientInfo.connected = False
clientInfo.server_socket = None
''' #==============================================================================
Function to determine what to do with the data received fromm the server
''' #==============================================================================
def parse_incoming_data(data):
# Split the string into a list with max 2 items
# Index 0 should either be DISPLAY or SYSTEM
split_list = data.split(":", 1)
if split_list[0] == "DISPLAY":
# Display the rest of the received message on screen
window.text_to_display(split_list[1])
elif split_list[0] == "SYSTEM":
# Use to update background information
# Split the string into a list with max 2 items
# The first index of system list determines what is to be done with the second
system_list = split_list[1].split("#",1)
if system_list[0] == "SALT":
window.send_salted_password(system_list[1])
if system_list[0] == "LOGIN_SUCCESS":
window.hide_login_panel()
if system_list[0] == "OPEN_MAP":
window.open_close_map()
if system_list[0] == "UPDATE_ROOM":
window.locationBox.setText("Location: " + system_list[1])
if system_list[0] == "UPDATE_HERO_NAME":
window.heroNameBox.setText("Hero: " + system_list[1])
else:
print(split_list)
''' #==============================================================================
Main Thread
''' #==============================================================================
def main_thread(clientInfo):
print("main_thread running")
clientInfo.connected = False
# Server Connection
while (clientInfo.connected is False) and (clientInfo.running is True):
try:
if clientInfo.server_socket is None:
clientInfo.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if clientInfo.server_socket is not None:
if testing_client:
clientInfo.server_socket.connect(("127.0.0.1", 8222)) # Change when setting up server online
else:
clientInfo.server_socket.connect(("46.101.56.200", 9234))
clientInfo.connected = True
clientInfo.current_receive_thread = threading.Thread(target=receive_thread, args=(clientInfo,))
clientInfo.current_receive_thread.start()
print("Connected to Server.")
window.display_login_panel()
while clientInfo.connected is True:
time.sleep(1.0)
except socket.error:
print("No connection to Server.")
time.sleep(1.0)
clientInfoLock.acquire()
clientInfoLock.release()
''' #==============================================================================
Main
''' #==============================================================================
if __name__ == "__main__":
if running:
# Create qtApp
app = QtWidgets.QApplication(sys.argv)
# Create window
window = QtWindow()
window.show()
# main()
clientInfo.current_main_thread = threading.Thread(target=main_thread, args=(clientInfo,))
clientInfo.current_main_thread.start()
# Event loop
sys.exit(app.exec_())
|
import math
import sys
import time
import torch
import torchvision.models.detection.mask_rcnn
import utils
from coco_eval import CocoEvaluator
from coco_utils import get_coco_api_from_dataset
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, scaler=None):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.6f}"))
header = f"Epoch: [{epoch}]"
lr_scheduler = None
if epoch == 0:
warmup_factor = 1.0 / 1000
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=warmup_factor, total_iters=warmup_iters
)
for images, targets in metric_logger.log_every(data_loader, print_freq, header):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) if isinstance(v, torch.Tensor) else v for k, v in t.items()} for t in targets]
with torch.cuda.amp.autocast(enabled=scaler is not None):
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
if not math.isfinite(loss_value):
print(f"Loss is {loss_value}, stopping training")
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
if scaler is not None:
scaler.scale(losses).backward()
scaler.step(optimizer)
scaler.update()
else:
losses.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
return metric_logger
def _get_iou_types(model):
model_without_ddp = model
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_without_ddp = model.module
iou_types = ["bbox"]
if isinstance(model_without_ddp, torchvision.models.detection.MaskRCNN):
iou_types.append("segm")
if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN):
iou_types.append("keypoints")
return iou_types
@torch.inference_mode()
def evaluate(model, data_loader, device):
n_threads = torch.get_num_threads()
# FIXME remove this and make paste_masks_in_image run on the GPU
torch.set_num_threads(1)
cpu_device = torch.device("cpu")
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = "Test:"
coco = get_coco_api_from_dataset(data_loader.dataset)
iou_types = _get_iou_types(model)
coco_evaluator = CocoEvaluator(coco, iou_types)
for images, targets in metric_logger.log_every(data_loader, 100, header):
images = list(img.to(device) for img in images)
if torch.cuda.is_available():
torch.cuda.synchronize()
model_time = time.time()
outputs = model(images)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
model_time = time.time() - model_time
res = {target["image_id"]: output for target, output in zip(targets, outputs)}
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
coco_evaluator.accumulate()
coco_evaluator.summarize()
torch.set_num_threads(n_threads)
return coco_evaluator
|
# ---------------------------------------------fibonacci function--------------------------------------------------
"""
fibonacci for sum serise with first value 0 for n=0 and 1 for n =1
implimenting function using : recursion.
"""
def fibonacci(n):
if n==0:
return 0
elif n==1:
return 1
return fibonacci(n-1)+fibonacci(n-2)
#------------------------------------------------------lucas function -------------------------------------------------
"""
lucas for sum serise with first value 2 for n=0 and 1 for n =1
implimenting function using : recursion.
"""
def lucas(n):
if n==0:
return 2
elif n==1 :
return 1
return lucas(n-1)+lucas(n-2)
# ----------------------------------------------------sum_series function -----------------------------------------------------
"""
function cheack if the input fibonacci or lucas or the sum of both.......
n index use to make a serise
n1 for determine the base case for index 0
n2 for determine the base case for index 1
"""
def sum_series(n,n1=0,n2=1):
if n==0:
return n1
elif n==1:
return n2
return sum_series(n-1,n1,n2)+sum_series(n-2,n1,n2)
|
property_names = [line.rstrip('\n')
for line in open('graph_txt_files/txt_functions/invariants.txt')]
|
import sys
import os
def newick_to_nexus(input_newick_path, output_nexus_path):
with open(output_nexus_path, "w") as writer:
writer.write("#NEXUS\n")
writer.write("BEGIN TREES;\n")
writer.write(" TREE tree1 = " + open(input_newick_path).readlines()[0].replace("\n", "") + "\n")
writer.write("END TREES;\n")
|
from django.conf.urls import url
from API import views
urlpatterns = [
url(r'^dealers/todos$', views.dealer_list),
url(r'^dealers/ciudad/(?P<ciudad>[\w\-]+)$', views.dealer_city),
url(r'^dealers/region/(?P<pk>[0-9]+)$', views.dealer_region),
url(r'^dealers/catalogo$', views.add_libro_catalogo),
url(r'^dealers/(?P<pk>[0-9]+)/catalogo$', views.dealer_catalogo),
url(r'^dealers/catalogo/todos$', views.all_dealer_catalogos),
url(r'^dealer/catalogo$', views.catalogo_avanzado.as_view()),
url(r'^dealers$', views.dealer_avanzado.as_view()),
url(r'^libros/todos$', views.libros_list),
url(r'^libros/genero/(?P<pk>[0-9]+)$', views.libros_genero),
url(r'^libros/crear$', views.create_libro),
url(r'^libros/ultimos/(?P<cantidad>[0-9]+)$', views.ultimos_libros),
url(r'^libros$', views.libros_avanzado.as_view()),
url(r'^libros/top/(?P<cantidad>[0-9]+)$', views.top_libros),
url(r'^regiones/todas$', views.region_list),
url(r'^ciudades/region/(?P<pk>[0-9]+)$', views.ciudades_region),
url(r'^direcciones/$', views.direccion_list),
url(r'^generar_pago/subject=(?P<data_subject>[a-zA-Z0-9 ]+)&amount=(?P<data_amount>[\w\-]+)&payer_email=(?P<data_payer_email>[^@]+@[^@]+\.[^@]+)&pedido_id=(?P<pedido_id>[a-zA-Z0-9 ]+)¬ify_url=(?P<data_notify_url>http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)&return_url=(?P<data_return_url>http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)&cancel_url=(?P<data_cancel_url>http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)$', views.generar_pago),
url(r'^pedidos$', views.pedido_avanzado.as_view()),
url(r'^pedidos/libros$', views.pedido_Libro_avanzado.as_view()),
url(r'^lector$', views.lector_avanzado.as_view()),
url(r'^generos/todos$', views.genero_list),
url(r'^generos/ventas/(?P<cantidad>[0-9]+)$', views.generos_vendidos),
url(r'^autores/todos$', views.autor_list),
url(r'^autores/crear$', views.create_autor),
url(r'^pedidos/todos$', views.all_pedidos),
url(r'^pedidos/crear$', views.create_pedido),
url(r'^pedidos/agregar/libro', views.add_libro_pedido),
url(r'^pedidos/libros/(?P<pk>[0-9]+)$', views.libros_pedido),
url(r'^usuarios/todos$', views.all_users),
url(r'^editoriales/todas$', views.all_editoriales),
url(r'^lectores/crear$', views.create_lector),
url(r'^lectores/pedidos', views.add_lector_pedido),
]
|
from .base import *
# from .local import *
# from .production import *
# try:
# from .local import *
# except:
# pass
# Design the video
# for teacher
# 1)creating a new profile
# Tips:
# load images that thanks tutor for good work done
# load images with your certificates
# 2)Search for openings
# 3)message the student
# recieve offers/counter offers/apply/confirm
# Tips:
# Discuss the dates of availability of the two parties
# discuss the prices of the hourly rates and the payment scheme
# tutor to be reminded that he should bring his certificates for his first session
# 4)Check out extra features, advertising, business analytics and content management
# for parents/student
# 1)create a new profile
# 2)To create a 2 new job openings
# 3)search for the teachers - browse the reviews for teacher
# 4)Message teachers
# recieve offers/counter offers/apply/confirm
# Tips:
# Discuss the dates of availability of the two parties
# discuss the prices of the hourly rates and the payment scheme
# tutor to be reminded that he should bring his certificates for his first session
#mission statement
#we are here to help and empower peoplem specifically entreprenuers. We think they are the future and the creativity that is harnessed
#by these group of people is potentially unlimited.
|
class CQueue:
def __init__(self):
self.input_stack = []
self.output_stack = []
def append_tail(self, value: int) -> None:
self.input_stack.append(value)
def delete_head(self) -> int:
if not self.output_stack:
while self.input_stack:
self.output_stack.append(self.input_stack.pop())
return self.output_stack.pop() if self.output_stack else -1
q1 = CQueue()
q1.append_tail(3)
assert q1.delete_head() == 3
assert q1.delete_head() == -1
q2 = CQueue()
assert q2.delete_head() == -1
q2.append_tail(5)
q2.append_tail(2)
assert q2.delete_head() == 5
assert q2.delete_head() == 2
|
#!/usr/bin/env python3
import os
import numpy as np
import pandas as pd
# SETTTINGS
cd = os.path.join(os.path.dirname(__file__))
pd.set_option('display.width', 10000)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
# INITIAL DATA FRAMES
gas_df = pd.read_csv(os.path.join(cd, 'EIA_Retail_Gasoline_Prices.csv'), parse_dates=[0])
print(gas_df.head(10))
dates_df = pd.DataFrame({'Date': pd.date_range(start='2001-01-01', end='2018-09-30')})
print(dates_df.head(10))
## CONVERT WEEK DATES STRINGS TO DATES
for wk in ['Week1_EndDate', 'Week2_EndDate', 'Week3_EndDate', 'Week4_EndDate', 'Week5_EndDate']:
gas_df[wk] = gas_df[wk].where(gas_df[wk].str.strip() == '',
gas_df[wk].str.strip() + '/' + \
gas_df['Month_Year'].dt.year.astype('str')
)
gas_df[wk] = gas_df[wk].where(gas_df[wk].str.strip() != '',
np.nan)
gas_df[wk] = pd.to_datetime(gas_df[wk], format='%m/%d/%Y')
print(gas_df.head(10))
# CLEAN UP VARYING WEEK 5 COLUMN
gas_df['Week5_Value'] = gas_df['Week5_Value'].where(gas_df['Week5_Value'].str.strip() != '',
np.nan).astype('float')
gas_df = gas_df.set_index('Month_Year')
# BUILD LIST OF WEEK DFS
df_list = [(gas_df.filter(like=str(i))
.rename({'Week'+str(i)+'_EndDate': 'Date',
'Week'+str(i)+'_Value': 'Gas_Price'},
axis='columns')
.query('Gas_Price > 0')
.reset_index(drop=True)
) for i in range(1,6)]
for df in df_list:
print(df.head())
print()
# APPEND ALL WEEK DFS
final_df = pd.concat(df_list, ignore_index=True).sort_values('Date')
# EXPAND DATA FRAME FOR DAILY RECORDS
final_df = pd.merge(dates_df, final_df, on='Date', how='left')
print(final_df.head(20))
# FORWARD FILL WEEKLY PRICES
final_df['Gas_Price'] = final_df['Gas_Price'].ffill()
print(final_df.head(20))
# EXPORT TO CSV
final_df.to_csv(os.path.join(cd, 'US_Gas_Prices.csv'), index=False)
|
from django import forms
class Mpesaform(forms.Form):
phone = forms.CharField(widget=forms.NumberInput(attrs={
'class': 'form-control', 'placeholder':'254725696052'
}))
|
'''
:Date: Jul 1, 2011
:authors: Gary Belvin
'''
from binascii import a2b_hex
from charm.schemes.pkenc.pkenc_rsa import RSA_Enc, RSA_Sig
from charm.toolbox.conversion import Conversion
from charm.toolbox.securerandom import WeakRandom
import unittest
from random import Random
debug = False
class Test(unittest.TestCase):
def testRSAEnc(self):
rsa = RSA_Enc()
(pk, sk) = rsa.keygen(1024)
#m = integer(34567890981234556498) % pk['N']
m = b'This is a test'
c = rsa.encrypt(pk, m)
orig_m = rsa.decrypt(pk, sk, c)
assert m == orig_m, 'o: =>%s\nm: =>%s' % (orig_m, m)
def testRSAVector(self):
# ==================================
# Example 1: A 1024-bit RSA Key Pair
# ==================================
# ------------------------------
# Components of the RSA Key Pair
# ------------------------------
# RSA modulus n:
n = a2b_hex('\
bb f8 2f 09 06 82 ce 9c 23 38 ac 2b 9d a8 71 f7 \
36 8d 07 ee d4 10 43 a4 40 d6 b6 f0 74 54 f5 1f \
b8 df ba af 03 5c 02 ab 61 ea 48 ce eb 6f cd 48 \
76 ed 52 0d 60 e1 ec 46 19 71 9d 8a 5b 8b 80 7f \
af b8 e0 a3 df c7 37 72 3e e6 b4 b7 d9 3a 25 84 \
ee 6a 64 9d 06 09 53 74 88 34 b2 45 45 98 39 4e \
e0 aa b1 2d 7b 61 a5 1f 52 7a 9a 41 f6 c1 68 7f \
e2 53 72 98 ca 2a 8f 59 46 f8 e5 fd 09 1d bd cb '.replace(' ',''))
n = Conversion.OS2IP(n, True)
# RSA public exponent e:
e = a2b_hex('11')
e = Conversion.OS2IP(e, True)
# Prime p:
p = a2b_hex('\
ee cf ae 81 b1 b9 b3 c9 08 81 0b 10 a1 b5 60 01 \
99 eb 9f 44 ae f4 fd a4 93 b8 1a 9e 3d 84 f6 32 \
12 4e f0 23 6e 5d 1e 3b 7e 28 fa e7 aa 04 0a 2d \
5b 25 21 76 45 9d 1f 39 75 41 ba 2a 58 fb 65 99 '.replace(' ',''))
p = Conversion.OS2IP(p, True)
# Prime q:
q = a2b_hex('\
c9 7f b1 f0 27 f4 53 f6 34 12 33 ea aa d1 d9 35 \
3f 6c 42 d0 88 66 b1 d0 5a 0f 20 35 02 8b 9d 86 \
98 40 b4 16 66 b4 2e 92 ea 0d a3 b4 32 04 b5 cf \
ce 33 52 52 4d 04 16 a5 a4 41 e7 00 af 46 15 03'.replace(' ',''))
q = Conversion.OS2IP(q, True)
phi_N = (p - 1) * (q - 1)
e = e % phi_N
d = e ** -1
# ----------------------------------
# Step-by-step RSAES-OAEP Encryption
# ----------------------------------
# Message to be encrypted:
M = a2b_hex('\
d4 36 e9 95 69 fd 32 a7 c8 a0 5b bc 90 d3 2c 49'.replace(' ',''))
lhash = a2b_hex('\
da 39 a3 ee 5e 6b 4b 0d 32 55 bf ef 95 60 18 90 \
af d8 07 09'.replace(' ', ''))
# DB:
db = a2b_hex('\
da 39 a3 ee 5e 6b 4b 0d 32 55 bf ef 95 60 18 90 \
af d8 07 09 00 00 00 00 00 00 00 00 00 00 00 00 \
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 \
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 \
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 \
00 00 00 00 00 00 00 00 00 00 01 d4 36 e9 95 69 \
fd 32 a7 c8 a0 5b bc 90 d3 2c 49'.replace(' ', ''))
# Seed:
seed = a2b_hex('\
aa fd 12 f6 59 ca e6 34 89 b4 79 e5 07 6d de c2 \
f0 6c b5 8f '.replace(' ',''))
# dbMask:
dbmask = a2b_hex('\
06 e1 de b2 36 9a a5 a5 c7 07 d8 2c 8e 4e 93 24 \
8a c7 83 de e0 b2 c0 46 26 f5 af f9 3e dc fb 25 \
c9 c2 b3 ff 8a e1 0e 83 9a 2d db 4c dc fe 4f f4 \
77 28 b4 a1 b7 c1 36 2b aa d2 9a b4 8d 28 69 d5 \
02 41 21 43 58 11 59 1b e3 92 f9 82 fb 3e 87 d0 \
95 ae b4 04 48 db 97 2f 3a c1 4e af f4 9c 8c 3b \
7c fc 95 1a 51 ec d1 dd e6 12 64'.replace(' ',''))
# maskedDB:
maskeddb = a2b_hex('\
dc d8 7d 5c 68 f1 ee a8 f5 52 67 c3 1b 2e 8b b4 \
25 1f 84 d7 e0 b2 c0 46 26 f5 af f9 3e dc fb 25 \
c9 c2 b3 ff 8a e1 0e 83 9a 2d db 4c dc fe 4f f4 \
77 28 b4 a1 b7 c1 36 2b aa d2 9a b4 8d 28 69 d5 \
02 41 21 43 58 11 59 1b e3 92 f9 82 fb 3e 87 d0 \
95 ae b4 04 48 db 97 2f 3a c1 4f 7b c2 75 19 52 \
81 ce 32 d2 f1 b7 6d 4d 35 3e 2d '.replace(' ',''))
# seedMask:
seedmask = a2b_hex('\
41 87 0b 5a b0 29 e6 57 d9 57 50 b5 4c 28 3c 08 \
72 5d be a9 '.replace(' ',''))
# maskedSeed:
maskedseed = a2b_hex('\
eb 7a 19 ac e9 e3 00 63 50 e3 29 50 4b 45 e2 ca \
82 31 0b 26 '.replace(' ',''))
# EM = 00 || maskedSeed || maskedDB:
em = a2b_hex('\
00 eb 7a 19 ac e9 e3 00 63 50 e3 29 50 4b 45 e2 \
ca 82 31 0b 26 dc d8 7d 5c 68 f1 ee a8 f5 52 67 \
c3 1b 2e 8b b4 25 1f 84 d7 e0 b2 c0 46 26 f5 af \
f9 3e dc fb 25 c9 c2 b3 ff 8a e1 0e 83 9a 2d db \
4c dc fe 4f f4 77 28 b4 a1 b7 c1 36 2b aa d2 9a \
b4 8d 28 69 d5 02 41 21 43 58 11 59 1b e3 92 f9 \
82 fb 3e 87 d0 95 ae b4 04 48 db 97 2f 3a c1 4f \
7b c2 75 19 52 81 ce 32 d2 f1 b7 6d 4d 35 3e 2d '.replace(' ',''))
# Encryption:
enc = a2b_hex('\
12 53 e0 4d c0 a5 39 7b b4 4a 7a b8 7e 9b f2 a0 \
39 a3 3d 1e 99 6f c8 2a 94 cc d3 00 74 c9 5d f7 \
63 72 20 17 06 9e 52 68 da 5d 1c 0b 4f 87 2c f6 \
53 c1 1d f8 23 14 a6 79 68 df ea e2 8d ef 04 bb \
6d 84 b1 c3 1d 65 4a 19 70 e5 78 3b d6 eb 96 a0 \
24 c2 ca 2f 4a 90 fe 9f 2e f5 c9 c1 40 e5 bb 48 \
da 95 36 ad 87 00 c8 4f c9 13 0a de a7 4e 55 8d \
51 a7 4d df 85 d8 b5 0d e9 68 38 d6 06 3e 09 55 '.replace(' ',''))
rsa = RSA_Enc()
pk = { 'N':n, 'e':e }
sk = { 'phi_N':phi_N, 'd':d , 'N': n}
c = rsa.encrypt(pk, M, seed)
C = Conversion.IP2OS(c)
if debug:
print("RSA OEAP step by step")
print("Label L = empty string")
print("lHash = ", lhash)
print("DB = ", db)
print("seed = ", seed)
print("dbMask = ", dbmask)
print("maskedDB = ", maskeddb)
print("seedMask = ", seedmask)
print("maskedSeed = ", maskedseed)
print("EM = ", em)
assert C == enc
def testRSASig(self):
length = Random().randrange(1, 1024)
length = 128
M = WeakRandom().myrandom(length, True)
rsa = RSA_Sig()
(pk, sk) = rsa.keygen(1024)
S = rsa.sign(sk, M)
assert rsa.verify(pk, M, S)
def testPSSVector(self):
# ==================================
# Example 1: A 1024-bit RSA Key Pair
# ==================================
# ------------------------------
# Components of the RSA Key Pair
# ------------------------------
# RSA modulus n:
n = a2b_hex('\
a2 ba 40 ee 07 e3 b2 bd 2f 02 ce 22 7f 36 a1 95 \
02 44 86 e4 9c 19 cb 41 bb bd fb ba 98 b2 2b 0e \
57 7c 2e ea ff a2 0d 88 3a 76 e6 5e 39 4c 69 d4 \
b3 c0 5a 1e 8f ad da 27 ed b2 a4 2b c0 00 fe 88 \
8b 9b 32 c2 2d 15 ad d0 cd 76 b3 e7 93 6e 19 95 \
5b 22 0d d1 7d 4e a9 04 b1 ec 10 2b 2e 4d e7 75 \
12 22 aa 99 15 10 24 c7 cb 41 cc 5e a2 1d 00 ee \
b4 1f 7c 80 08 34 d2 c6 e0 6b ce 3b ce 7e a9 a5 '.replace(' ',''))
n = Conversion.OS2IP(n, True)
# RSA public exponent e:
e = a2b_hex('01 00 01'.replace(' ',''))
e = Conversion.OS2IP(e, True)
# Prime p:
p = a2b_hex('\
d1 7f 65 5b f2 7c 8b 16 d3 54 62 c9 05 cc 04 a2 \
6f 37 e2 a6 7f a9 c0 ce 0d ce d4 72 39 4a 0d f7 \
43 fe 7f 92 9e 37 8e fd b3 68 ed df f4 53 cf 00 \
7a f6 d9 48 e0 ad e7 57 37 1f 8a 71 1e 27 8f 6b '.replace(' ',''))
p = Conversion.OS2IP(p, True)
# Prime q:
q = a2b_hex('\
c6 d9 2b 6f ee 74 14 d1 35 8c e1 54 6f b6 29 87 \
53 0b 90 bd 15 e0 f1 49 63 a5 e2 63 5a db 69 34 \
7e c0 c0 1b 2a b1 76 3f d8 ac 1a 59 2f b2 27 57 \
46 3a 98 24 25 bb 97 a3 a4 37 c5 bf 86 d0 3f 2f'.replace(' ',''))
q = Conversion.OS2IP(q, True)
phi_N = (p - 1) * (q - 1)
e = e % phi_N
d = e ** -1
# ---------------------------------
# Step-by-step RSASSA-PSS Signature
# ---------------------------------
# Message to be signed:
m = a2b_hex('\
85 9e ef 2f d7 8a ca 00 30 8b dc 47 11 93 bf 55 \
bf 9d 78 db 8f 8a 67 2b 48 46 34 f3 c9 c2 6e 64 \
78 ae 10 26 0f e0 dd 8c 08 2e 53 a5 29 3a f2 17 \
3c d5 0c 6d 5d 35 4f eb f7 8b 26 02 1c 25 c0 27 \
12 e7 8c d4 69 4c 9f 46 97 77 e4 51 e7 f8 e9 e0 \
4c d3 73 9c 6b bf ed ae 48 7f b5 56 44 e9 ca 74 \
ff 77 a5 3c b7 29 80 2f 6e d4 a5 ff a8 ba 15 98 \
90 fc '.replace(' ',''))
# mHash:
mHash = a2b_hex('\
37 b6 6a e0 44 58 43 35 3d 47 ec b0 b4 fd 14 c1 \
10 e6 2d 6a'.replace(' ',''))
# salt:
salt = a2b_hex('\
e3 b5 d5 d0 02 c1 bc e5 0c 2b 65 ef 88 a1 88 d8 \
3b ce 7e 61'.replace(' ',''))
# M':
mPrime = a2b_hex('\
00 00 00 00 00 00 00 00 37 b6 6a e0 44 58 43 35 \
3d 47 ec b0 b4 fd 14 c1 10 e6 2d 6a e3 b5 d5 d0 \
02 c1 bc e5 0c 2b 65 ef 88 a1 88 d8 3b ce 7e 61'.replace(' ',''))
# H:
H = a2b_hex('\
df 1a 89 6f 9d 8b c8 16 d9 7c d7 a2 c4 3b ad 54 \
6f be 8c fe'.replace(' ',''))
# DB:
DB = a2b_hex('\
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 \
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 \
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 \
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 \
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 \
00 00 00 00 00 00 01 e3 b5 d5 d0 02 c1 bc e5 0c \
2b 65 ef 88 a1 88 d8 3b ce 7e 61'.replace(' ',''))
# dbMask:
dbMask = a2b_hex('\
66 e4 67 2e 83 6a d1 21 ba 24 4b ed 65 76 b8 67 \
d9 a4 47 c2 8a 6e 66 a5 b8 7d ee 7f bc 7e 65 af \
50 57 f8 6f ae 89 84 d9 ba 7f 96 9a d6 fe 02 a4 \
d7 5f 74 45 fe fd d8 5b 6d 3a 47 7c 28 d2 4b a1 \
e3 75 6f 79 2d d1 dc e8 ca 94 44 0e cb 52 79 ec \
d3 18 3a 31 1f c8 97 39 a9 66 43 13 6e 8b 0f 46 \
5e 87 a4 53 5c d4 c5 9b 10 02 8d'.replace(' ',''))
# maskedDB:
maskedDB = a2b_hex('\
66 e4 67 2e 83 6a d1 21 ba 24 4b ed 65 76 b8 67 \
d9 a4 47 c2 8a 6e 66 a5 b8 7d ee 7f bc 7e 65 af \
50 57 f8 6f ae 89 84 d9 ba 7f 96 9a d6 fe 02 a4 \
d7 5f 74 45 fe fd d8 5b 6d 3a 47 7c 28 d2 4b a1 \
e3 75 6f 79 2d d1 dc e8 ca 94 44 0e cb 52 79 ec \
d3 18 3a 31 1f c8 96 da 1c b3 93 11 af 37 ea 4a \
75 e2 4b db fd 5c 1d a0 de 7c ec'.replace(' ',''))
# Encoded message EM:
EM = a2b_hex('\
66 e4 67 2e 83 6a d1 21 ba 24 4b ed 65 76 b8 67 \
d9 a4 47 c2 8a 6e 66 a5 b8 7d ee 7f bc 7e 65 af \
50 57 f8 6f ae 89 84 d9 ba 7f 96 9a d6 fe 02 a4 \
d7 5f 74 45 fe fd d8 5b 6d 3a 47 7c 28 d2 4b a1 \
e3 75 6f 79 2d d1 dc e8 ca 94 44 0e cb 52 79 ec \
d3 18 3a 31 1f c8 96 da 1c b3 93 11 af 37 ea 4a \
75 e2 4b db fd 5c 1d a0 de 7c ec df 1a 89 6f 9d \
8b c8 16 d9 7c d7 a2 c4 3b ad 54 6f be 8c fe bc'.replace(' ',''))
# Signature S, the RSA decryption of EM:
S = a2b_hex('\
8d aa 62 7d 3d e7 59 5d 63 05 6c 7e c6 59 e5 44 \
06 f1 06 10 12 8b aa e8 21 c8 b2 a0 f3 93 6d 54 \
dc 3b dc e4 66 89 f6 b7 95 1b b1 8e 84 05 42 76 \
97 18 d5 71 5d 21 0d 85 ef bb 59 61 92 03 2c 42 \
be 4c 29 97 2c 85 62 75 eb 6d 5a 45 f0 5f 51 87 \
6f c6 74 3d ed dd 28 ca ec 9b b3 0e a9 9e 02 c3 \
48 82 69 60 4f e4 97 f7 4c cd 7c 7f ca 16 71 89 \
71 23 cb d3 0d ef 5d 54 a2 b5 53 6a d9 0a 74 7e'.replace(' ',''))
if debug:
print("PSS Test Step by Step")
print("mHash = Hash(M)", mHash)
print("salt = random ", salt)
print("M' = Padding || mHash || salt", mPrime)
print("H = Hash(M')", H)
print("DB = Padding || salt", DB)
print("dbMask = MGF(H, length(DB))", dbMask)
print("maskedDB = DB xor dbMask", maskedDB)
print("EM = maskedDB || H || 0xbc", EM)
print("S = RSA decryption of EM", S)
rsa = RSA_Sig()
sk = { 'phi_N':phi_N, 'd':d , 'N': n}
sig = rsa.sign(sk, m, salt)
assert S == sig
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
#!/usr/bin/env python
__author__ = "Master Computer Vision. Team 02"
__license__ = "M6 Video Analysis"
# Import libraries
import os
import cv2
import numpy as np
def remove_dots(img, se_size):
"""
Description: remove dots
If se_size not specified, it is assumed to be in the center
Input: img, se_size
Output: img
"""
# Create kernel as ellipse
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (se_size, se_size))
# Apply opening, obtained by the erosion of an image followed by a dilation
img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
return img
def remove_vertical_lines(image, se_size):
"""
Description: remove vertical lines
If se_size not specified, it is assumed to be in the center
Input: img, se_size
Output: img
"""
# Create kernel as rectangular box, with only one column
# to adjust structuring element to vertical lines considered as noise
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, se_size))
# Apply closing, obtained by the dilation of an image followed by an erosion
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
return img
def remove_horizontal_lines(img, se_size):
"""
Description: remove horizontal lines
If se_size not specified, it is assumed to be in the center
Input: img, se_size
Output: img
"""
# Create kernel as rectangular box, with only one row
# to adjust structuring element to horizontal lines considered as noise
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (se_size, 1))
# Apply closing, obtained by the dilation of an image followed by an erosion
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
return img
def dilation(img, se_size):
kernel = np.ones((se_size,se_size),np.uint8)
img = cv2.dilate(img.astype(np.float32), kernel, iterations=1)
return img
def erosion(img, se_size):
kernel = np.ones((se_size,se_size),np.uint8)
img = cv2.erode(img.astype(np.float32), kernel, iterations=1)
return img
|
#!/usr/bin/env python
import sys
import rospy
from core_api.srv import *
global_namespace = ''
def setpoint_local_position(lx, ly, lz, yaw=0.0, tolerance= 1.0, async = False, relative= False, yaw_valid= False, body_frame= False):
global global_namespace
rospy.wait_for_service('/'+ global_namespace +'/navigation/position_set')
try:
handle = rospy.ServiceProxy('/'+ global_namespace +'/navigation/position_set', PositionSet)
# building message structure
header_msg = std_msgs.msg.Header(1,rospy.Time(0.0,0.0),'a')
twist = geometry_msgs.msg.Twist(geometry_msgs.msg.Vector3(lx,ly,lz),geometry_msgs.msg.Vector3(0.0,0.0,yaw))
twiststamped_msg= geometry_msgs.msg.TwistStamped(header_msg, twist)
req_msg = PositionSetRequest(twiststamped_msg, tolerance, async, relative, yaw_valid, body_frame)
resp = handle(req_msg)
return resp.success
except rospy.ServiceException, e:
rospy.logerr("pos set service call failed %s", e)
return None
def make_square(side_length):
global global_namespace
#first get the global namespace to call the subsequent services
#wait for service to come up
rospy.wait_for_service('/get_global_namespace')
try:
res = rospy.ServiceProxy('/get_global_namespace', ParamGetGlobalNamespace)
op = res()
global_namespace = str(op.param_info.param_value)
except rospy.ServiceException, e:
rospy.logerr("global namespace service not available", e)
#cannot continue without global namespace
return None
# Next take off to an altitue of 3.0 meters
rospy.wait_for_service('/'+ global_namespace +'/navigation/take_off')
try:
handle = rospy.ServiceProxy('/'+ global_namespace +'/navigation/take_off', TakeOff)
resp = handle(takeoff_alt=3.0)
except rospy.ServiceException, e:
rospy.logerr("takeoff service call failed %s", e)
# cannot continue without taking off
return None
print "Took off successfully"
# Then call the position set service for each edge of a square shaped trajectory
if setpoint_local_position(side_length,0,-3.0):
print "Successfully reached 1st waypoint"
else:
rospy.logerr("Failed to set position")
return None
if setpoint_local_position(side_length,side_length,-3.0):
print "Successfully reached 2nd waypoint"
else:
rospy.logerr("Failed to set position")
return None
if setpoint_local_position(0,side_length,-3.0):
print "Successfully reached 3rd waypoint"
else:
rospy.logerr("Failed to set position")
return None
if setpoint_local_position(0,0,-3.0):
print "Successfully reached 4th waypoint"
else:
rospy.logerr("Failed to set position")
return None
# Finally land the drone
rospy.wait_for_service('/'+ global_namespace +'/navigation/land')
try:
handle = rospy.ServiceProxy('/'+ global_namespace +'/navigation/land', Land)
resp = handle(False)
except rospy.ServiceException, e:
rospy.logerr("land service call failed %s", e)
return None
print "Landed Successfully. Exiting script."
if __name__ == "__main__":
if len(sys.argv) == 2:
make_square(sys.argv[1])
else:
print "This node need side_length of square(float) as an argument"
sys.exit(1)
|
#!/usr/bin/python
# -*-coding:Utf-8 -*
from requests import get
import textwrap
from bs4 import BeautifulSoup
from dateutil.parser import parse
from dateutil import relativedelta
from dateutil.tz import *
import datetime
from clize import clize, run # sert à pouvoir passer des arguments au script
import pickle
import notify2
import os
import sys
# Fonction inspirée de http://www.developpez.net/forums/d448416/autres-langages/python-zope/general-python/mode-console-couleur-shell/
def couleur(name):
""" Couleur pour affichage dans le terminal"""
colors = {
"default" : "\033[0m",
# style
"bold" : "\033[1m",
"underline" : "\033[4m",
"blink" : "\033[5m",
"reverse" : "\033[7m",
"concealed" : "\033[8m",
# couleur texte
"black" : "\033[30m",
"red" : "\033[31m",
"green" : "\033[32m",
"yellow" : "\033[33m",
"blue" : "\033[34m",
"magenta" : "\033[35m",
"cyan" : "\033[36m",
"white" : "\033[37m",
# couleur fond
"on_black" : "\033[40m",
"on_red" : "\033[41m",
"on_green" : "\033[42m",
"on_yellow" : "\033[43m",
"on_blue" : "\033[44m",
"on_magenta" : "\033[45m",
"on_cyan" : "\033[46m",
"on_white" : "\033[47m"
}
return colors[name]
def conky_color(name):
""" Couleurs pour affichage dans le conky """
colors = {
"red" : "$color5",
"yellow" : "$color3",
"default": "$color2"
}
return colors[name]
# Décorateur permettant de parser les options
@clize(alias={'nbr': ('n',), 'conky': ('c',), 'length': ('l',), 'display': ('d',)})
def main(url, nbr=5, conky=False, length=100, display=False) :
os.chdir(os.path.dirname(sys.argv[0]))
try:
notifs = get(url).text
except:
print("Non connecté")
return
soup = BeautifulSoup(notifs, "xml")
now = datetime.datetime.now(tzutc())
storage = list()
# traitements pour chaque item
for item in soup.findAll('item')[0:nbr]:
item.title.string = item.title.string.replace('\n\n', ' ')
# On ajoute 2 heures à l'heure de l'xml, elle n'est pas bonne
# On fait ça pour la date de toutes les notifs
#TODO: ajouter une option pour l'offset des heures
date = parse(item.pubDate.string) + relativedelta.relativedelta(hours=+3)
hour = date.hour
minute = date.minute
if hour < 10:
hour = "0" + str(hour)
else:
hour = str(hour)
if minute < 10:
minute = "0" + str(minute)
else:
minute = str(minute)
pub = format_chaine(item.title.string, length)
#Si on veut afficher qqch
if display:
#Si le script est lancé par un conky
if conky:
image = os.path.dirname(sys.argv[0]) + "/images/FaceBook_48x48.png"
# couleurs différentes en fonction de la date de pub
# en rouge si la notif a moins de 2 heures
if now - relativedelta.relativedelta(minutes =+ 60) < date:
print(conky_color('red') + "- " + hour + ":" + minute + " " + pub + conky_color('default'))
# en jaune si moins de 5 heures
elif now - relativedelta.relativedelta(minutes =+ 220) < date :
print(conky_color('yellow') + "- " + hour + ":" + minute + " " + pub + conky_color('default'))
else :
# On imprime le titre
print("- " + pub)
#Si le script est lancé en shell
else:
image = os.path.abspath(os.path.curdir + "/images/FaceBook_48x48.png")
# Couleurs différentes en fonction de la date de pub
# En rouge si la notif a moins de 2 heures. Attention,
#une heure de décalage dû au temps de facebook
if now - relativedelta.relativedelta(minutes =+ 60) < date:
print(couleur('red') + "- " + hour + ":" + minute + " " + pub + couleur('default'))
# En jaune si moins de 5 heures
elif now - relativedelta.relativedelta(minutes =+ 220) < date :
print(couleur('yellow') + "- " + hour + ":" + minute + " " + pub + couleur('default'))
else :
# On imprime le titre
print("- " + pub)
else:
image = os.path.abspath(os.path.curdir + "/images/FaceBook_48x48.png")
storage.append(pub)
#On charge le fichier temporaire
if os.path.isfile('temp') :
with open('temp', 'rb') as my_file:
depickler = pickle.Unpickler(my_file)
#decoded = json.load(my_file)
decoded = depickler.load()
else:
decoded = ""
if not notify2.init("Facebook notifs"):
print("Failed to initialize notifications")
return
#On print les pubs qui ne sont pas dans le fichier temporaire
for each_pub in storage:
if each_pub not in decoded:
n = notify2.Notification("Facebook", each_pub, image)
if not n.show():
print("Failed to send notification")
return
#On stocke toutes les pubs dans le fichier temporaire,
#pr comparaison au prochain appel
with open('temp', 'wb') as my_file:
pickler = pickle.Pickler(my_file)
pickler.dump(storage)
def format_chaine(publi, char):
output = ""
text = textwrap.fill(publi, char, replace_whitespace=True)
text = text.split('\n')
for indice, ligne in enumerate(text):
if indice == 0:
ligne += "\n"
elif indice != len(text) -1:
ligne = " " + ligne + "\n"
else:
ligne = " " + ligne + "\n"
output = output + ligne
return output
if __name__ == "__main__":
run(main)
|
import ssh
server = ssh.Connection(host='10.100.52.148', username='mayank', private_key='mayank')
result = server.execute('ls')
print(result)
|
cdef = []
cdef.append(
"""
typedef struct _MonoDomain MonoDomain;
typedef struct _MonoAssembly MonoAssembly;
typedef struct _MonoImage MonoImage;
typedef struct _MonoMethodDesc MonoMethodDesc;
typedef struct _MonoMethod MonoMethod;
typedef struct _MonoObject MonoObject;
MonoDomain* mono_jit_init(const char *root_domain_name);
void mono_jit_cleanup(MonoDomain *domain);
MonoAssembly* mono_domain_assembly_open(MonoDomain *domain, const char *name);
MonoImage* mono_assembly_get_image(MonoAssembly *assembly);
void mono_config_parse(const char* path);
MonoMethodDesc* mono_method_desc_new(const char* name, bool include_namespace);
MonoMethod* mono_method_desc_search_in_image(MonoMethodDesc *method_desc, MonoImage *image);
void mono_method_desc_free(MonoMethodDesc *method_desc);
MonoObject* mono_runtime_invoke(MonoMethod *method, void *obj, void **params, MonoObject **exc);
void* mono_object_unbox(MonoObject *object);
"""
)
|
# -*- coding: utf-8 -*-
#
# Last modification: 4 July. 2019
# Author: Rayanne Souza
import numpy as np
import matplotlib.pyplot as plt
import itertools
from keras.models import load_model
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import confusion_matrix
# Plot based on https://www.kaggle.com/danbrice/keras-plot-history-full-report-and-grid-search
def plot_confusion_matrix(cm, classes,
normalize=False,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
title='Normalized confusion matrix'
else:
title='Confusion matrix'
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
print('save_confusion_matrix')
plt.gcf().subplots_adjust(bottom=0.15)
plt.savefig('matrix.jpg')
plt.show()
# Loading model
model = load_model('my_model.h5')
# Loading test set
x_test = np.load('test\x_test.npy')
y_test = np.load('test\y_test.npy')
# Making prediction
y_pred = model.predict(x_test)
pred_bool = np.argmax(y_pred,axis = 1) # Convert predictions classes to one hot vectors
y_test_bool = np.argmax(y_test,axis = 1) # Convert validation observations to one hot vectors
cmatrix = confusion_matrix(y_test_bool, pred_bool) # compute the confusion matrix
# # Evaluating network performance
plot_labels = ['akiec', 'bcc', 'bkl', 'df', 'nv', 'vasc','mel']
plot_confusion_matrix(cmatrix, plot_labels) # plot the confusion matrix
print(classification_report(y_test_bool, pred_bool))
print("Test_accuracy = %f ; Test_loss = %f" % (test_acc, test_loss))
print("Val_accuracy = %f ; Val_loss = %f" % (val_acc, val_loss))
|
# -*- coding: utf-8 -*-
import scrapy
from tools.items import DianpingItem
from datetime import datetime
# from scrapy_tools.storage.rabbitmq import RabbitMQSignal
"""
'append',
'count',
'css',
'extend',
'extract',
'extract_first',
'extract_unquoted',
'index',
'insert',
'pop',
're',
're_first',
'remove',
'reverse',
'select',
'sort',
'x',
'xpath'
"""
class DianpingSpider(scrapy.Spider):
name = "dianping"
allowed_domains = ["dianping.com"]
start_urls = ['http://www.dianping.com/search/category/2/10/r2578']
custom_settings = {
'EXTENSIONS': {
'scrapy_tools.storage.rabbitmq.RabbitMQSignal': 200
}, 'DEFAULT_REQUEST_HEADERS': {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Host': 'www.dianping.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36',
}
}
def parse(self, response):
item = DianpingItem()
item['title'] = 'dian ping'
item['date'] = datetime.today()
yield item
|
"""update
Revision ID: 48deb381adcc
Revises: 18076d8ef708
Create Date: 2015-12-05 21:15:57.393419
"""
# revision identifiers, used by Alembic.
revision = '48deb381adcc'
down_revision = '18076d8ef708'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('posts', sa.Column('body_html', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('posts', 'body_html')
### end Alembic commands ###
|
#!/usr/bin/env python
"""
pyjld.builder.tools
@author: Jean-Lou Dupont
"""
__author__ = "Jean-Lou Dupont"
__email = "python (at) jldupont.com"
__fileid = "$Id: tools.py 24 2009-04-02 01:54:53Z jeanlou.dupont $"
__all__ = ['findPackage','extractNsPackage', 'findPackages', 'getShortAndLongDescription',
'copyEggs', 'makeEggReleaseDir', 'pprintFiles', 'keepBaseNames',
'makeModuleName',
]
import os
import textwrap
from string import Template
from pyjld.os import safe_walkup, safe_mkdir, versa_copy
def findPackages(root):
"""
Retrieves the packages from the root path
Verifies the :param root: directory for the presence of
valid packages i.e. directory name of the form ::
namespace.package
"""
dir_list = os.listdir(root)
packages=[]
for path in dir_list:
try:
ns, package = path.split('.')
#filter out .svn etc.
if ns:
packages.append((path, ns, package))
except:
continue
return packages
def findPackage( root=os.getcwd() ):
"""
Finds the package's root
The package directory must be of the form ``ns.package``.
The :param root: represents the starting directory in the
child hierarchy of the said package.
The default :param root: parameter is the current directory.
"""
for path in safe_walkup(root):
if path is None:
return None, None, None
name = os.path.basename( path )
ns, package = extractNsPackage(name)
if ns is not None:
return (path, ns, package)
return None, None, None
def makeModuleName(ns, package):
"""
"""
return "%s.%s" % (ns, package)
def extractNsPackage(name, filterEmptyNs=True):
"""
Extracts the tuple (ns,package) from the specified name
The parameter :param name: must correspond to a directory name.
"""
ns, package = None, None
try: ns, package = name.split('.')
except: pass
if not ns:
ns, package = None, None
return ns, package
def getShortAndLongDescription(module):
"""
Returns the short and long description documentation strings
from a loaded module
"""
try:
short_description, long_description = (
textwrap.dedent(d).strip()
for d in module.__doc__.split('\n\n', 1) )
except:
raise RuntimeError('Error extracting short & long description from module [%s]' % str(module))
return short_description, long_description
def makeEggReleaseDir(version, root):
"""
Make an `egg` release directory in the `root` directory
The directory path will be according to the following ::
$root/$version/eggs
"""
path = [root, version, 'eggs']
existed, path = safe_mkdir(path)
return existed, path
def copyEggs(list, source_pkg, release_dir):
r"""
Copies a list of eggs
:param list: eggs list
:param source_pkg: the source directory
:param release_dir: the target directory
The egg list is of the form ::
[ (egg_type, egg_version, egg_name), ... ]
The function returns the list of files copied in the form ::
[ (src_file, dest_file), ... ]
"""
files=[]
for egg_type, egg_version, egg_name in list:
src_file, dest_file = versa_copy([source_pkg, 'trunk', egg_name], release_dir)
files.append((src_file, dest_file))
return files
def pprintFiles( files, base_msg ):
"""
Pretty prints a list of file names using :param base_msg:
message as string template
The template parameters declared are:
* `src`
* `dest`
"""
tpl = Template(base_msg)
for file in files:
src_file, dest_file = file
print tpl.safe_substitute(src=src_file, dest=dest_file)
def keepBaseNames(files):
"""
Maps the :param files: list consisting of ::
[ (src_file, dest_file) ]
to their basenames.
"""
newList = []
for file in files:
src_file, dest_file = file
base_src = os.path.basename(src_file)
base_dest = os.path.basename(dest_file)
newList.append((base_src, base_dest))
return newList
|
import sys, curses, argparse
from curses import wrapper
filepath, width, height, row, column, max_height = None, None, None, None, None, None
max_window_widths = None
lines = list()
def get_args():
parser = argparse.ArgumentParser(description='View a window of a file\'s text')
parser.add_argument('-F', help='filepath for the file to be navigated', required=True)
parser.add_argument('-W', help='maximum width of content window', required=False)
parser.add_argument('-H', help='maximum height of content window', required=False)
parser.add_argument('-R', help='window row start (0 indexing)', required=False)
parser.add_argument('-C', help='window column start (0 indexing)', required=False)
args = parser.parse_args()
global filepath, width, height, row, column, max_height
filepath = args.F
some_none = (args.W == None) or (args.H == None) or (args.R == None) or (args.C == None)
all_none = (args.W == None) and (args.H == None) and (args.R == None) and (args.C == None)
if not some_none:
width = int(args.W)
height = int(args.H)
row = int(args.R)
column = int(args.C)
elif not some_none == all_none:
print("must specify R, C, W, and H (row, column, width, height) if you pass one of the options")
exit
elif all_none: # all none. use default values
row = 0
column = 0
width = 10
height = 5
# project-specific processing of file
def process_file():
global filepath, height, width, row, column, max_window_widths, lines, max_height
f = open(filepath, "r")
line_widths = list()
# lines = list() # globally defined
# dump first line for this format
f.readline()
_line = f.readline()
while _line:
if not _line[0] == '/':
lines.append(_line[0:-1]) # cut off newline character
line_widths.append(len(_line) - 1)
_line = f.readline()
f.close()
max_height = len(lines)
height = min(height, max_height)
# create a list of the maximum width among a set of lines coming from the filepath
max_window_widths = [0] * max(1, len(lines) - height + 1)
# generate list of best widths within window given a row index
for w_index in range(0, len(max_window_widths)):
local_max = 0
for i in range(w_index, w_index + height):
local_max = max(local_max, line_widths[i])
max_window_widths[w_index] = local_max
# if window too big, shrink width to match width of (longest) line
# m = 0
# for x in max_window_widths:
# m = max(m, x)
m = max(max_window_widths)
if width > m:
width = m
# if window is out of bounds, shift it by the out of bounds amount
if row + height >= max_height:
row = max(0, (max_height - height) - 1)
if column + width >= max_window_widths[row]:
column = max(0, max_window_widths[row] - width)
def draw_window(stdscr, pad):
global width, height, row, column
coord_string = "({},{}) ".format(row, column)
# pad.addstr(0, 0, coord_string)
stdscr.addstr(0, 0, coord_string, curses.color_pair(1))
x_offset = len(coord_string)
y_offset = 1
# draw vertical bar beneath coordinate pair
for y in range(y_offset, y_offset + height + 1):
if (y + row - 1) % 5 == 0:
stdscr.addstr(y, x_offset, '├', curses.color_pair(1))
else:
stdscr.addstr(y, x_offset, '│', curses.color_pair(1))
# draw horizontal bar beneath coordinate pair
for x in range(x_offset, x_offset + width + 1):
if (x + column - 1) % 5 == 0:
stdscr.addstr(y_offset, x, '┬', curses.color_pair(1))
else:
stdscr.addstr(y_offset, x, '─', curses.color_pair(1))
# clear old lines that may be below expanded coordinate pair ((99,99) --> (100, 99))
for x in range(0, x_offset):
for y in range(y_offset, y_offset + height + 1):
stdscr.addch(y, x, ' ')
# write out the dimensions being displayed as a reminder in the format [w: _, h: _]
window_str = " [w: {}, h: {}] ".format(width, height)
# for x in range(x_offset + width, x_offset + width + len(window_str)):
stdscr.addstr(y_offset + height + 1, x_offset + width - 1, window_str, curses.color_pair(1))
# clear lines that are now beyond our old window
# write contents of file in window
for r_index in range(row, min(row + height, len(lines))):
l = lines[r_index]
for c_index in range(column, min(column + width, len(l))):
stdscr.addch(1 + y_offset + r_index - row, 1 + x_offset + c_index - column, l[c_index])
def refresh_screen(stdscr):
stdscr = curses.initscr()
win = curses.initscr()
# TODO: change THIS PADDING???
pad = curses.newpad(300, 300)
stdscr.clear()
curses.noecho()
pad.refresh(0,0, 0,0, width,height)
# grab globals
global row, column, max_height
c = ""
while 1:
draw_window(stdscr, pad)
c = win.getch()
if c == curses.KEY_UP:
if row == 0:
continue
row -= 1
elif c == curses.KEY_DOWN:
if row + height == max_height:
continue
row += 1
elif c == curses.KEY_RIGHT:
# no limit on column?
column += 1
elif c == curses.KEY_LEFT:
if column == 0:
continue
column -= 1
elif c == 113: # 'q'
quit()
# stdscr.addstr("UP")
# pad.addch(3,0, chr(c))
# stdscr.addstr(chr(c))
def main(stdscr):
curses.init_pair(1, curses.COLOR_BLUE, curses.COLOR_BLACK)
get_args() # get arguments / default values (filepath, row, column, width, height)
process_file()
refresh_screen(stdscr)
# print(filepath, row, column, width, height)
#if __name__=='__main__':
# main()
wrapper(main)
|
def rotten_tomato_score(actor_name,cur):
import pymysql, json, requests
from bs4 import BeautifulSoup
userAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'
headers = {
'User-Agent':userAgent
}
#adjust actor_name for searching
names = actor_name.split(' ')
name=''
for i in names:
if i != names[0]:
name += "-"
name += i
urls = ['https://www.rottentomatoes.com/napi/search/all?type=movie&searchQuery=',
'https://www.rottentomatoes.com/napi/search/all?after=MQ%3D%3D&type=movie&searchQuery=',
'https://www.rottentomatoes.com/napi/search/all?after=Mg%3D%3D&type=movie&searchQuery=',
'https://www.rottentomatoes.com/napi/search/all?after=Mw%3D%3D&type=movie&searchQuery=',
'https://www.rottentomatoes.com/napi/search/all?after=NA%3D%3D&type=movie&searchQuery=']
num = 0
for url in urls:
res = requests.get(url+name, headers=headers)
movies = res.json()
for movie in movies['movie']['items']:
if movie['tomatometerScore']!={}:
num += 1
sql = '''UPDATE movie
SET tomato_rating = %s
WHERE movie_name = %s;
'''
tomatoRating =float(movie['tomatometerScore']['score']) / 10
cur.execute(sql,(tomatoRating, movie['name']))
|
__copyright__ = """ Copyright (c) 2021 HangYan. """
__license__ = 'MIT license'
__version__ = '1.0'
__author__ = 'topaz1668@gmail.com'
import os
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--push_list', dest='push_list', type=list, default=[])
args = parser.parse_args()
# MESSAGE CONFIG
REGION = "cn-123"
ACCESS_KEY_ID = "$$$$$$$$$$$$$$$$"
ACCESS_KEY_SECRET = "$$$$$$$$$$$$$$$$"
# API CONFIG
SECRET = b'$$$$$$$$$$$$$$$$'
SECRET_KEY = '$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$'
TOKEN_KEY = b'$$$$$$$$'
DES_KEY = b'$$$$$$$$'
VERIFYCODE_TIMEOUT = 60 * 5
TOKEN_TIMEOUT = 3600 * 1000 * 24
# DATABASE CONFIG
USERNAME = "root"
PASSWORD = "123456"
HOST = "127.0.0.1"
PORT = "3306"
DATABASE = "ManageFileDB"
REDIS_HOST = "127.0.0.1"
REDIS_PORT = "3306"
REDIS_PASSWD = "123456"
ERROR_CODE = b'0x00'
SUCCESS_CODE = b'0x01'
RECORD_PAGE_LIMIT = 10
PAGE_LIMIT = 5
REGISTER_USER_PAGE_LIMIT = 20
CHUNK_SIZE = 1014 * 1024 * 10
FILE_STORE_TIME = 60 * 60 * 24
INTRANET_PORT = 8010
EXTERNAL_PORT = 8011
PUBLIC_PORT = 8012
access_logfile = "logfiles/nginx_access.log"
SSH_PORT = 22
INTRANET_HOST = "192.168.0.12"
INTRANET_SSH_USERNAME = "topaz" # ssh 用户名
INTRANET_SSH_PWD = "123" # 密码
EXTERNAL_HOST = "192.168.0.11"
EXTERNAL_SSH_USERNAME = "topaz" # ssh 用户名
EXTERNAL_SSH_PWD = "123" # 密码
|
import requests
q = requests.get('https://www.thenationalnews.com/image/policy:1.906316:1567597962/sp05-Man-City-Trophy-Tour.jpg?f=16x9&w=940&$p$f$w=0ee2b1c')
print(q.content)
with open ('chelsea.jpeg','wb') as n:
n.write(q.content)
n.close()
|
# 前缀和 + 维护最左最优边界
# 三次遍历做准备工作
class Solution:
def platesBetweenCandles(self, s: str, queries: List[List[int]]) -> List[int]:
n = len(s)
preSum = [0] * (n+1)
for i in range(1, n+1):
preSum[i] = preSum[i-1] + (1 if s[i-1] == '*' else 0)
left, right = [0] * n, [0] * n
r, l = -1, -1
for i in range(n-1, -1, -1):
if s[i] == '|':
r = i
left[i] = r
for i in range(n):
if s[i] == '|':
l = i
right[i] = l
res = []
for l, r in queries:
i1, i2 = left[l], right[r]
if (i1 == -1 or i1 >= r) or (i2 == -1 or i2 <= l):
res.append(0)
continue
res.append(preSum[i2] - preSum[i1])
return res
|
# coding: utf-8
# In[1]:
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
import glob
import os
import numpy as np
import pandas as pd
import sys
import cv2
import sys
# In[2]:
class FaceImage(Dataset):
def __init__(self,picture_file,path):
self.data_path = path
self.picture_file = picture_file
def __getitem__(self,index):
file_path = os.path.join(self.data_path,self.picture_file[index])
img = cv2.imread(file_path,cv2.IMREAD_GRAYSCALE)
img = np.expand_dims(img,0)
img =torch.tensor(img).float()
return img
def __len__(self):
return len(os.listdir(self.data_path))
# In[3]:
class ImageNet(nn.Module):
def __init__(self):
super(ImageNet,self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 48, kernel_size=3,stride =(1,1)),
nn.Dropout2d(0.3),
nn.MaxPool2d(2),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(48, 128, kernel_size=3,stride =(1,1)),
nn.Dropout2d(0.2),
nn.MaxPool2d(2),
nn.ReLU()
)
self.conv3 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3,stride =(1,1)),
nn.Dropout2d(0.5),
nn.MaxPool2d(2),
nn.ReLU()
)
self.fcn1 = nn.Linear(in_features =4096 ,out_features =512 ,bias = True)
self.fcn2 = nn.Linear(in_features = 512,out_features =256 ,bias = True)
self.fcn3 = nn.Linear(in_features =256 ,out_features =7 ,bias = True)
def forward(self , x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x.view(x.size(0),-1)
#print("size:",x.shape)
x = F.relu(self.fcn1(x))
x = F.relu(self.fcn2(x))
out = self.fcn3(x)
return out
# In[4]:
picture_file = sorted(os.listdir(sys.argv[1]))
test_dataset = FaceImage(picture_file,sys.argv[1])
test_loader = torch.utils.data.DataLoader(test_dataset,batch_size =256)
# In[8]:
model = torch.load("Imagenet_best.pkl",map_location = "cpu")
model = model.cpu()
# In[9]:
model.eval()
ans = []
for idx,test in enumerate(test_loader):
test =test
ans.append(model(test))
# In[1]:
ans_final =[]
for a in ans:
for pre in a:
pre = pre.tolist()
ans_final.append(pre.index(max(pre)))
# In[11]:
with open(sys.argv[2],"w") as f:
print("id,label", file = f)
for id,label in enumerate(ans_final):
print("{},{}".format(id,label) ,file = f)
|
"""
Program: labelDemo.py
Page: 263
Author: Chris
Simple python GUI window illustrating the input and output fields
"""
from breezypythongui import EasyFrame
class TextFieldDemo(EasyFrame):
"""Converts an input string to uppercase and displays the result"""
def __init__(self):
"""Sets up the window and the label"""
EasyFrame.__init__(self, title = "Text Field Demo")
# Label and field for input
self.addLabel(text = "Input", row = 0, column = 0)
self.inputField = self.addTextField(text = "", row = 0, column = 1)
# Label and field for the output
self.addLabel(text = "Output", row = 1, column = 0)
self.outputField = self.addTextField(text = "", row = 1, column = 1, state = "readonly")
# the command button
self.addButton(text = "Convert", row = 2, column = 0, columnspan = 2, command = self.convert)
# Event handling method for the button
def convert(self):
""" Inputs the string, converts it to uppercase and outputs the result"""
text = self.inputField.getText()
result = text.upper()
self.outputField.setText(result)
def main():
"""initiates and pops up the window"""
TextFieldDemo().mainloop()
# Global call to the main() function
main()
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
temp = ListNode(0)
temp.next = head
solve1, solve2 = temp, temp
while solve2:
if(n<= -1):
solve1 = solve1.next
solve2 = solve2.next
n-=1
solve1.next = solve1.next.next
return temp.next
|
# Generated by Django 2.2.1 on 2019-08-26 15:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('snippets', '0017_delete_account'),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.FileField(upload_to='media/')),
('birthday', models.DateTimeField(default='1970-01-01')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
# RGB色彩空间 RGB三个颜色取值(0~255)可形成一个三维空间(立方体)
# color space(锥状空间)
# HSV(柱状空间 )360 180 180可以进行归一化等操作转化为HSV
import cv2 as cv
import numpy as np
def extract_object():
capture = cv.VideoCapture('D:/DOC/python Opencv learning/test.mp4')
while True:
ret, frame = capture.read() # ret是读取函数的返回值,若读到最后一帧读不出来了就返回False
if ret == False: # 如果返回值是False
print("read video fail")
break;
# convert to hsv
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
# 仅将蓝色的区域提取出来
lower_hsv = np.array([100, 43, 46]) # 蓝色的低值
upper_hsv = np.array([124, 255, 255]) # 蓝色的高值
mask = cv.inRange(hsv,
lowerb=lower_hsv,
upperb=upper_hsv) # src, 低值, 高值
dst = cv.bitwise_and(frame, frame , mask=mask) # 通过按位与
cv.imshow('Video', frame)
cv.imshow('Mask', dst)
print("show video captures")
c = cv.waitKey(40)
if c == 27: # 27->ESC
break;
def color_space_demo(image):
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) # BGR转换为灰度
cv.imshow("gray", gray)
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV) # BGR转换为HSV
cv.imshow("hsv", hsv)
yuv = cv.cvtColor(image, cv.COLOR_BGR2YUV)
cv.imshow("yuv",yuv)
# 自己写的,压缩图像倍数的函数
def compression(image):
n = 10 # 压缩倍数
[hight, width, layer] = image.shape
image1 = cv.resize(image, (int(hight / (n/2.25)), int(width / n))) # 压缩
cv.imshow("output image", image1)
return image1
# 读取图片并显示
src1 = cv.imread(r'D:\DOC\python Opencv learning\test.jpg') # 读图片
# src = compression(src1)
# srcVideo = 'D:/DOC/python Opencv learning/test.mp4'
cv.namedWindow('input image', cv.WINDOW_AUTOSIZE)
cv.imshow("input image", src1) # 通过 OPENCV的GUI显示图像
color_space_demo(src1)
extract_object()
b, g, r = cv.split(src1)
cv.imshow("blue", b)
cv.imshow("green", g)
cv.imshow("red", r)
src1[:, :, 2] = 0 # 将第3个通道改为0
src1 = cv.merge([b, g, r])
cv.imshow("change", src1)
cv.waitKey(0) # 等待一个按键q输入后退出
cv.destroyAllWindows() # 关闭所有窗口
|
from flask import redirect, url_for, request, render_template, flash
from app import app
import sqlite3 as sql
@app.route('/')
@app.route('/index')
def index():
if request.args:
logged_in = request.args['logged_in']
user = request.args['user']
if logged_in:
return render_template('index.html', user=user, logged_in=True)
else:
return render_template('index.html')
@app.route('/login')
def login():
return render_template('login_v1.html')
@app.route('/register')
def register():
return render_template('register.html')
@app.route('/success', methods = ['POST','GET'])
def success():
if request.method == 'POST':
user = request.form['userID']
passwd = request.form['password']
typeUser = request.form['userType']
with sql.connect("users.db") as con:
cur = con.cursor()
cur.execute("SELECT name,password,type FROM users WHERE name=? AND password=? AND type=?",(user,passwd,typeUser) )
res = cur.fetchall()
print (res)
if len(res) == 0:
#flash('Login failed!')
return redirect(url_for('index'))
else:
return redirect(url_for('index', user=user, logged_in=True))
else:
flash('Login failed!')
return "LOGIN FAILED!!"
@app.route('/registered', methods=['POST', 'GET'])
def registration():
if request.method == 'POST':
user = request.form['userID']
passwd = request.form['password']
typeUser = request.form['userType']
with sql.connect("users.db") as con:
cur = con.cursor()
cur.execute("INSERT INTO users (name,password,type)VALUES(?, ?, ?)", (user, passwd, typeUser))
con.commit()
msg = "Record successfully added"
return "Successful registration for user:%s [%s] with password:%s" % (user, typeUser, passwd)
else:
return redirect(url_for('login'))
|
n, x = map(int, input().split())
ice_cream = x
distress_child = 0
for _ in range(n):
sign, amount = input().split()
if sign == '+':
ice_cream += int(amount)
elif sign == '-' and ice_cream >= int(amount):
ice_cream -= int(amount)
elif sign == '-' and ice_cream<int(amount):
distress_child += 1
print(ice_cream, distress_child)
|
# 2**15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
# What is the sum of the digits of the number 2**1000?
# Comments Section:
# - Straigh forward algorithm
def problem16():
value = str(2**1000)
sum = 0
for i in value:
sum += int(i)
return sum
|
from faker import Faker
fake = Faker()
import csv, datetime, os
fake = Faker(['es_ES'])
total= 13
today = datetime.date.today()
output="database-"+ str(today) + ".csv"
data = []
count = 0
for _ in range(total):
count +=1
name = fake.name()
address = fake.address()
color = fake.color()
phone = fake.phone_number()
nif = fake.doi()
ssn = fake.ssn()
cp = "cp: " + fake.postcode()
count_bank = "count_bank: " + fake.iban()
data = [
count,
name ,
nif,
address ,
cp ,
color ,
phone ,
ssn,
count_bank
]
print(data)
|
import json
def greet_user():
"""Great the user by name."""
username = get_username()
if username:
prompt = input("Are you " + username + " (y/n)? ")
if prompt == 'y':
print("Welcome back, " + username + "!")
elif prompt == 'n':
username = newuser()
print("We'll remember you when you come back, " + username + '!')
else:
username = newuser()
print("We'll remember you when you come back, " + username + '!')
def get_username():
"""Get username if available"""
filename = 'user.json'
try:
with open(filename) as fobj:
username = json.load(fobj)
except FileNotFoundError:
return None
else:
return username
def newuser():
"""Prompt for a new username"""
username = input('Please enter your name: ')
filename = 'user.json'
with open(filename, 'w') as fobj:
json.dump(username, fobj)
return username
|
import os
import torch
import copy
from torch.utils.data import DataLoader
import utils.DataProcessing as DP
import utils.LSTMClassifier as LSTMC
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
use_plot = True # Give the tag that whether you want to save data and plot the figure
use_save = True # Give the tag that whether save
# if use_plot:
# ....
# if use_save:
if use_save: # If Yes, import the serialize package and data time
import pickle
#序列化用的包
from datetime import datetime
# Store path ..\LSTM-Classification-Pytorch\data\test_txt\1.txt
# ..\LSTM-Classification-Pytorch\data\train_txt
DATA_DIR = 'data' # ..\LSTM-Classification-Pytorch\ $ DATA_DIR \test_txt\1.txt
TRAIN_DIR = 'train_txt' # ..\LSTM-Classification-Pytorch\data\ $TRAIN_DIR \1.txt
TEST_DIR = 'test_txt' # Same like above
# Train file list document ..LSTM-Classification-Pytorch\data\train_txt.txt
# Test file list document ..LSTM-Classification-Pytorch\data\test_txt.txt
# For each file, store the l;ist of document. Eg. 1.txt|2.txt|3.txt....
TRAIN_FILE = 'train_txt.txt'
TEST_FILE = 'test_txt.txt'
# Training data label file list document ..LSTM-Classification-Pytorch\data\train_label.txt
# Store the list of training data label e.g. 1|0|2| 3| 5 , project to train_txt.txt
# Supervisor Learning
TRAIN_LABEL = 'train_label.txt'
TEST_LABEL = 'test_label.txt'
## parameter setting
epochs = 2 # Epoch of learning processing
# Save time set epochs as 10
# Epochs = 50
batch_size = 5 # Batch Gradient decrease
use_gpu = torch.cuda.is_available() # IF use CUDA
# Will use following:
# if use_gpu:
# model = model.cuda()
learning_rate = 0.01 # Initialize learning rate
# model = LSTMC.LSTMClassifier(embedding_dim=embedding_dim,hidden_dim=hidden_dim,
# vocab_size=len(corpus.dictionary),label_size=nlabel, batch_size=batch_size, use_gpu=use_gpu)
def adjust_learning_rate(optimizer, epoch):
lr = learning_rate * (0.1 ** (epoch // 10)) # New learning rate = learning rate * (0.1 ^ (Epoch // 10)) Adative learning rate algorithm
for param_group in optimizer.param_groups: # optimizer通过param_group来管理参数组.param_group中保存了参数组及其对应的学习率,动量等等.
# 可以通过更改param_group[‘lr’]的值来更改对应参数组的学习率
param_group['lr'] = lr
return optimizer
if __name__=='__main__':
### parameter setting
embedding_dim = 100
hidden_dim = 50
sentence_len = 32
# Store path ..\LSTM-Classification-Pytorch\data\train_txt
train_file = os.path.join(DATA_DIR, TRAIN_FILE) # joint connect the path String 'train_txt.txt'
test_file = os.path.join(DATA_DIR, TEST_FILE) # 'data\\test_txt.txt'
fp_train = open(train_file, "r") # Only read document
# Offer the index for train dacument 1.txt
# 1.txt
# 2.txt
# 3.txt
''' Character Meaning
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' open for exclusive creation, failing if the file already exists
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newlines mode (deprecated)'''
train_filenames = [os.path.join(TRAIN_DIR, line.strip()) for line in fp_train]
# Read all train_filenames in variable
# Result: class 'list'>: ['train_txt\\1.txt', 'train_txt\\2.txt', 'train_txt\\3.txt', 'train_txt\\4.txt', 'train_txt\\5.txt',
# 'train_txt\\6.txt', 'train_txt\\7.txt', 'train_txt\\8.txt', 'train_txt\\9.txt', 'train_txt\\10.txt',
filenames = copy.deepcopy(train_filenames) # Hard copy. "Filenames" copy a independent version of "train_filenames"
# 0001 = {str} 'train_txt\\2.txt'
# 0000 = {str} 'train_txt\\1.txt'
# 0002 = {str} 'train_txt\\3.txt'
# 0003 = {str} 'train_txt\\4.txt' ...etc
fp_train.close()
# close file
# Same as above
fp_test = open(test_file, 'r')
test_filenames = [os.path.join(TEST_DIR, line.strip()) for line in fp_test]
fp_test.close()
# Read test files
filenames.extend(test_filenames) # 7574 documents
# Now "filenames" have both test and train file name
# 0001 = {str} 'train_txt\\2.txt' plus 0971 = {str} 'test_txt\\1.txt'
# 0000 = {str} 'train_txt\\1.txt' 0972 = {str} 'test_txt\\2.txt'
# 0002 = {str}'train_txt\\3.txt' 0973 = {str} 'test_txt\\3.txt'
corpus = DP.Corpus(DATA_DIR, filenames) # 这个最后创建的东西有点看不懂 ??????
# return ids # Tokenize Tensor
nlabel = 8 # Label from 0 to 7
### create model
model = LSTMC.LSTMClassifier(embedding_dim=embedding_dim,hidden_dim=hidden_dim,
vocab_size=len(corpus.dictionary),label_size=nlabel, batch_size=batch_size, use_gpu=use_gpu)
# len(corpus.dictionary) 23590
# len(corpus.dictionary) --- return len(self.idx2word)
# LSTMClassifier(
# (word_embeddings): Embedding(23590, 100)
# (lstm): LSTM(100, 50)
# (hidden2label): Linear(in_features=50, out_features=8, bias=True)
if use_gpu:
model = model.cuda()
### data processing
dtrain_set = DP.TxtDatasetProcessing(DATA_DIR, TRAIN_DIR, TRAIN_FILE, TRAIN_LABEL, sentence_len, corpus)
# data type <utils.DataProcessing.TxtDatasetProcessing object at 0x000001F8149C9518>
# 以后怎么标注这种object ???????
# Create training tensor
# 读入训练文档 https://zhuanlan.zhihu.com/p/35698470 这个看看
train_loader = DataLoader(dtrain_set, # Input dataset
batch_size=batch_size, # batch_size (int, optional): how many samples per batch to load (default: 1).
shuffle=True, # set to ``True`` to have the data reshuffled at every epoch (default: False).
num_workers=4 # 0的话表示数据导入在主进程中进行,其他大于0的数表示通过多个进程来导入数据,可以加快数据导入速度
)
dtest_set = DP.TxtDatasetProcessing(DATA_DIR, TEST_DIR, TEST_FILE, TEST_LABEL, sentence_len, corpus)
# Create testing tensor
test_loader = DataLoader(dtest_set,
batch_size=batch_size,
shuffle=False,
num_workers=4
)
# dataset:加载的数据集(Dataset对象)
# batch_size:每一个batch拿几个数据
# shuffle::是否将数据打乱
# sampler: 样本抽样,后续会详细介绍
# num_workers:使用多进程加载的进程数,0
# 代表不使用多进程
# collate_fn: 如何将多个样本数据拼接成一个batch,一般使用默认的拼接方式即可
# pin_memory:是否将数据保存在pin
# memory区,pin
# memory中的数据转到GPU会快一些
# drop_last:dataset中的数据个数可能不是batch_size的整数倍,drop_last为True会将多出来不足一个batch的数据丢弃
optimizer = optim.SGD(model.parameters(), lr=learning_rate) # Optimaztion: SGD 算法
# class torch.optim.SGD(params, lr=, momentum = 0,dampening = 0,weight_decay = 0,nesterov = False)
# params(iterable) – 待优化参数的iterable或者是定义了参数组的dict
# lr(float) – 学习率
# momentum(float, 可选) – 动量因子(默认:0)
# weight_decay(float, 可选) – 权重衰减(L2惩罚)(默认:0)
# dampening(float, 可选) – 动量的抑制因子(默认:0)
# nesterov(bool, 可选) – 使用Nesterov动量(默认:False)
loss_function = nn.CrossEntropyLoss() # 交叉熵损失函数 官方文档上有详细的公式计算,这里就不备注了
train_loss_ = [] # 这里创建的是一个数组,为了之后绘图的时候按照值和他们对应的索引画出变化图
test_loss_ = []
train_acc_ = []
test_acc_ = []
for epoch in range(epochs):
optimizer = adjust_learning_rate(optimizer, epoch) # 上面已经备注
## training epoch
total_acc = 0.0
total_loss = 0.0
total = 0.0
for iter, traindata in enumerate(train_loader): # Create index for sequence train_loader, pick batch and shuffle every time.
# enumerate的作用就是对可迭代的数据进行标号并将其里面的数据和标号一并打印出来。
# 每一个 iter 释放一小批数据用来学习
# ??????????这里读数据怎么读的看不懂
# 文档在dataloade.py里面
train_inputs, train_labels = traindata # train_inputs torch.Size([5, 32] 5来自batch_size, 32 来自sen_len
# train_labels torch.Size([5, 1]) 5来自batch_size,真实label
#print (train_labels.shape) torch.Size([5, 1])
train_labels = torch.squeeze(train_labels) # torch.squeeze() 这个函数主要对数据的维度进行压缩,去掉维数为1的的维度,比如是一行或者一列这种,一个一行三列(1,3)的数去掉第一个维数为一的维度之后就变成(3)行
# squeeze(a)就是将a中所有为1的维度删掉
#print('Epoch: ', epoch, '| Step: ', iter, '| train_inputs: ',train_inputs.numpy(), '| train_labels: ', train_labels.size(), '| train_labels:.size ', train_inputs.size())
# Epoch: 0 | Step: 1084 train_inputs:.size torch.Size([5, 32] train_labels: [5 0 0 0 1] torch.Size([5])
if use_gpu:
train_inputs, train_labels = Variable(train_inputs.cuda()), train_labels.cuda()
else: train_inputs = Variable(train_inputs)
model.zero_grad() #清空梯度缓存
# https://discuss.pytorch.org/t/why-do-we-need-to-set-the-gradients-manually-to-zero-in-pytorch/4903/3
# 根据pytorch中的backward()函数的计算,当网络参量进行反馈时,梯度是被积累的而不是被替换掉;
# 但是在每一个batch时毫无疑问并不需要将两个batch的梯度混合起来累积,因此这里就需要每个batch设置一遍zero_grad 了。
# 现在还不是很理解
model.batch_size = len(train_labels) # batch_size = 5
model.hidden = model.init_hidden() # model.hidden: tuple type return (h0, c0) size 5 * 50 tensors,
# print(train_inputs.shape) torch.Size([5, 32])
# print(train_inputs.t().shape) torch.Size([32, 5])
output = model(train_inputs.t()) # Transpose train_inputs tensor and use it as input
# Output torch.Size([5, 8])
# print(Variable(train_labels).size())
loss = loss_function(output, Variable(train_labels)) # Calculate error cross_entropy(predicted value, class )
# train_labels torch.Size([5, 1])
# Variable(train_labels) torch.Size([5]
# 公式在官方文档里,这里不注释了
loss.backward() # torch.autograd.backward(variables, grad_variables=None, retain_graph=None, create_graph=False)
# 这里是默认情况,相当于out.backward(torch.Tensor([1.0]))
# 给定图的叶子节点variables, 计算图中变量的梯度和。 计算图可以通过链式法则求导。如果variables中的任何一个variable是 非标量(non-scalar)的,且requires_grad=True。
# 那么此函数需要指定grad_variables,它的长度应该和variables的长度匹配,里面保存了相关variable的梯度(对于不需要gradient tensor的variable,None是可取的)。
# 此函数累积leaf variables计算的梯度。你可能需要在调用此函数之前将leaf variable的梯度置零。
# 参数:
#
# variables(变量的序列) - 被求微分的叶子节点,即 ys 。
# grad_variables((张量,变量)的序列或无) - 对应variable的梯度。仅当variable不是标量且需要求梯度的时候使用。
# retain_graph(bool,可选) - 如果为False,则用于释放计算grad的图。请注意,在几乎所有情况下,没有必要将此选项设置为True,通常可以以更有效的方式解决。默认值为create_graph的值。
# create_graph(bool,可选) - 如果为True,则将构造派生图,允许计算更高阶的派生产品。默认为False
# 更新的三步:
1.
# optimizer = optim.SGD(model.parameters(), lr=learning_rate)
# 这一句代码中optimizer获取到了所有parameters的引用,每个parameter都包含梯度(gradient),optimizer可以把梯度应用上去更新parameter。
2.
# loss = loss_function(output,Variable(train_labels))
# prediction和true class之间进行比对(熵或者其他lossfunction),产生最初的梯度
# loss.backward()
# 反向传播到整个网络的所有链路和节点。节点与节点之间有联系,因此可以反向链式传播梯度
# 3.
optimizer.step()
# apply所有的梯度以更新parameter的值.因为step()更新所有参数,所以不用指明梯度
_, predicted = torch.max(output.data, 1) # 返回每一行中最大值的那个元素,且返回其索引
# Predicted 前面那个逗号是为了返回索引,而不是具体的值,但是具体怎么看代码不知道 ???????
# 输入:The size of output.data torch.Size([5, 8]),
# 输出:predicted torch.Size([5])
# train_loss_ = [] # 初始化训练和测试集正确率和损失
# test_loss_ = []
# train_acc_ = []
# test_acc_ = []
# print(train_labels.size())
total_acc += (predicted == train_labels).sum() # 多少个训练对了,是个size 0的tensor,要用。item()来看
# print (total_acc.item())
total += len(train_labels) # len(train_labels) = 5, 每次训练一次加5
total_loss += loss.item() # 需要额外加上.item() 来获得里面的值
train_loss_.append(total_loss / total) # 每做一次加一次
train_acc_.append(total_acc / total)
# 注释到这
## testing epoch
total_acc = 0.0
total_loss = 0.0
total = 0.0
for iter, testdata in enumerate(test_loader):
test_inputs, test_labels = testdata
test_labels = torch.squeeze(test_labels)
if use_gpu:
test_inputs, test_labels = Variable(test_inputs.cuda()), test_labels.cuda()
else: test_inputs = Variable(test_inputs)
model.batch_size = len(test_labels)
model.hidden = model.init_hidden()
output = model(test_inputs.t())
loss = loss_function(output, Variable(test_labels))
# calc testing acc
_, predicted = torch.max(output.data, 1)
total_acc += (predicted == test_labels).sum()
total += len(test_labels)
total_loss += loss.item()
test_loss_.append(total_loss / total)
test_acc_.append(total_acc / total)
print('[Epoch: %3d/%3d] Training Loss: %.3f, Testing Loss: %.3f, Training Acc: %.3f, Testing Acc: %.3f'
% (epoch, epochs, train_loss_[epoch], test_loss_[epoch], train_acc_[epoch], test_acc_[epoch]))
param = {}
param['lr'] = learning_rate
param['batch size'] = batch_size
param['embedding dim'] = embedding_dim
param['hidden dim'] = hidden_dim
param['sentence len'] = sentence_len
result = {}
result['train loss'] = train_loss_
result['test loss'] = test_loss_
result['train acc'] = train_acc_
result['test acc'] = test_acc_
result['param'] = param
if use_plot:
import PlotFigure as PF
PF.PlotFigure(result, use_save)
if use_save:
filename = 'log/LSTM_classifier_' + datetime.now().strftime("%d-%h-%m-%s") + '.pkl'
result['filename'] = filename
fp = open(filename, 'wb')
pickle.dump(result, fp)
fp.close()
print('File %s is saved.' % filename)
#
# 1)注释每一句代码,不清楚的标记上,开会讨论
# 2)注释的本质就是讲每个函数的输入、输出是什么,最好结合debug,看内存中的数据形式,比如输入是3x5的矩阵,代表了xxx
# 3)不要花费大量时间看教程和视频,以这个代码为主,代码有什么问题,针对性的搜什么问题
# 4)确实有困难,及时回复,初学者有困难很正常,不用自己憋着等很久,不好意思问
# 5)建议做笔记,记录自己的经验教训
# 6)建议注册github账号,把代码放在github上
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2019-01-13 13:52
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('course', '0018_widget'),
]
operations = [
migrations.CreateModel(
name='Achievement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('percentage', models.FloatField(default=1.0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(1)])),
('is_canceled', models.BooleanField(default=False, verbose_name='Canceled')),
],
),
migrations.CreateModel(
name='Badge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('icon_file_name', models.ImageField(blank=True, upload_to='')),
('icon_external_url', models.URLField(blank=True, max_length=2000)),
('name', models.CharField(max_length=100)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Course')),
],
),
migrations.CreateModel(
name='ClassBadge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, max_length=2000)),
('badge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Badge')),
('course_class', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.CourseClass')),
],
),
migrations.AddField(
model_name='achievement',
name='class_badge',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.ClassBadge'),
),
migrations.AddField(
model_name='achievement',
name='enrollment',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Enrollment'),
),
migrations.AlterUniqueTogether(
name='classbadge',
unique_together=set([('badge', 'course_class')]),
),
migrations.AlterUniqueTogether(
name='badge',
unique_together=set([('course', 'name')]),
),
migrations.AlterUniqueTogether(
name='achievement',
unique_together=set([('enrollment', 'class_badge')]),
),
]
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.engine.target import Field, Target
class PyenvInstallSentinelField(Field):
none_is_valid_value = True
alias = "_sentinel"
help = "<internal>"
default = False
class PyenvInstall(Target):
alias = "_pyenv_install"
help = "<internal target>"
core_fields = (PyenvInstallSentinelField,)
|
def adjust(coin, price):
q, r = divmod(price, coin)
return price if not r else (q + 1) * coin
|
import spacy
nlp = spacy.load('de')
doc = nlp(u'Ich bin ein Berliner.')
print(' '.join('{word}/{tag}'.format(word=t.orth_, tag=t.pos_) for t in doc))
|
'''
Base class for public-key encryption
Notes: This class implements an interface for a standard public-key encryption scheme.
A public key encryption consists of four algorithms: (paramgen, keygen, encrypt, decrypt).
'''
from charm.toolbox.schemebase import *
class PKEnc(SchemeBase):
def __init__(self):
SchemeBase.__init__(self)
SchemeBase.setProperty(self, scheme="PKEnc")
self.baseSecDefs = Enum('OW_CPA','OW_CCA1','OW_CCA','IND_CPA','IND_CCA1','IND_CCA',
'NM_CPA','NM_CCA1','NM_CCA','KA_CPA','KA_CCA1','KA_CCA')
def paramgen(self, param1=None, param2=None):
return NotImplemented
def keygen(self, securityparam):
return NotImplemented
def encrypt(self, pk, M):
return NotImplemented
def decrypt(self, pk, sk, c):
return NotImplemented
|
from django.contrib import admin
from blog.models import *
# Register your models here.
class ArticleAdmin(admin.ModelAdmin):
#fields = ('title','desc','content',)
list_display=('title','desc','click_count','user','category')
list_display_links=('title','desc',)
list_editable = ('click_count',)
fieldsets = (
(None,{
'fields':('title','desc','content','user','category')
}),
('高级设置',{
'classes':('collapse',),
'fields':('click_count','is_recommend','tag')
})
)
class Media:
# 在管理后台的HTML文件中加入js文件, 每一个路径都会追加STATIC_URL/
js = (
'js/kindeditor-4.1.7/kindeditor-min.js',
'js/kindeditor-4.1.7/lang/zh_CN.js',
'js/kindeditor-4.1.7/config.js',
)
admin.site.register(User)
admin.site.register(Tag)
admin.site.register(Category)
admin.site.register(Article,ArticleAdmin)
#admin.site.register(Article)
admin.site.register(Comment)
admin.site.register(Links)
admin.site.register(Ad)
admin.site.register(Hellspawn)
|
# -*- coding: utf-8 -*-
# @Time : 2019/12/10 16:54
# @Author : Jeff Wang
# @Email : jeffwang987@163.com OR wangxiaofeng2020@ia.ac.cn
# @Software: PyCharm
import numpy as np
import cv2
img = cv2.imread('./picture/beach.png')
"""0. 彩色图像处理基础
0. 功能:符合人眼视觉,简化目标物区分,根据颜色目标识别。
1. 领域:全彩色,伪彩色。
2. 描述光源质量:辐射率(放出能量),光强(接受能量),亮度(主管描述彩色强度,不好度量)
3. 人眼敏感度:65%红,33%绿。2%蓝。
4. 混色处理法:加色法(光,RGB相加为白色),减色法(颜料,RGB相加为黑色)
5. 颜色特性:色调(Hue),饱和度(Saturation),亮度(Value)
"""
"""1. 彩色模型
0. RGB:像素深度是每个像素的比特数,RGB一般是3*8=24Bytes深度,共可以表述2**24=16777216种颜色。
1. CMY:青、深红、黄,用相减法。一般用于彩色打印机。
2. HSV:V与彩色信息无关(将亮度与彩色信息分开,便于图像处理),HS与人眼感受相关。
3. YCbCr:Y指亮度,Cb和Cr由(R-Y)和(B-Y)调整得到,用于JPEG等。
4. 彩色转灰度,有两种方式,一个是直接(R+G+B)/3,一种是加权平均Y = 0.299R+0.587G+0.114B.
"""
# 0. RGB->HSV ; HSV->RGB
# 数学变换公式见该目录下RGB2HSV.png和HSV2RGB.png
# img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# img_bgr = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR)
# plt.subplot(121), plt.imshow(img_hsv)
# plt.subplot(122), plt.imshow(img_bgr)
# plt.show()
"""2. 彩色图像分割
HSV空间:H描述擦色,S分离感兴趣特征区域。V一般不用。
RGB空间:定义一个要分割的颜色(R0,G0,B0),如果颜色(Ri,Gi,Bi)与之欧氏距离很近,则认为近似。
"""
# 0. K均值在RGB空间的应用。
# 步骤:0. 任意选K个聚类中心,比如beach这个照片有蓝天海洋、树木、沙滩三个类别,那么K=3。
# 1. 按照最小距离分配像素点归属于哪一个聚类中心。
# 2. 计算各聚类中心新的向量值(该类型下所有像素的RGB均值)。
# 3. 计算代价函数J=所有像素到其聚类中心距离之和(这里距离指RGB or HSV距离)
# 4. 一直迭代到J收敛。
# 应用:以下是在RGB空间的应用(可以看出他把沙滩和海放在一个类别了,说明RGB分割效果不是很好,有时间再搞一个HSV分割)
# 0. 先随机生成K个中心
K = 3
centroid_r = np.random.randint(0, 255, [K, 1])
centroid_g = np.random.randint(0, 255, [K, 1])
centroid_b = np.random.randint(0, 255, [K, 1])
centroid_0 = np.concatenate((centroid_b, centroid_g, centroid_r), axis=1)[0]
centroid_1 = np.concatenate((centroid_b, centroid_g, centroid_r), axis=1)[1]
centroid_2 = np.concatenate((centroid_b, centroid_g, centroid_r), axis=1)[2]
# 1. 把K个中心复制img.shape[0]*img.shape[1]遍,也就是展开,为了之后矩阵操作更快
centroid_reshape_0 = np.tile(centroid_0, (img.shape[0] * img.shape[1], 1))
centroid_reshape_1 = np.tile(centroid_1, (img.shape[0] * img.shape[1], 1))
centroid_reshape_2 = np.tile(centroid_2, (img.shape[0] * img.shape[1], 1))
# 2. 把原图像也展开,而不是二维排列,为了和上面的质心矩阵一样size,矩阵操作更快
img_reshape = img.reshape(img.shape[0] * img.shape[1], 3)
# 3. dist用来存储每个像素RGB三个值和质心RGB的欧氏距离
dist = np.zeros([img.shape[0] * img.shape[1], K], dtype=np.float32)
# 4. 设定好初始的J(确保在第一轮迭代后,J_last=9999不会小于真正计算出来的J),开始迭代
J_last = 99999
J = 9999
t = 10 # 运行次数,一般10次以内,不然直接退出
while J < J_last and t > 0:
print(t)
t = t-1
J_last = J
# 0.分别计算三个到质心的距离
dist[:, 0] = np.linalg.norm(centroid_reshape_0 - img_reshape, axis=1)
dist[:, 1] = np.linalg.norm(centroid_reshape_1 - img_reshape, axis=1)
dist[:, 2] = np.linalg.norm(centroid_reshape_2 - img_reshape, axis=1)
# 1. 取最小距离的那个质心作为该像素的标号
label = np.argmin(dist, axis=1)
index0 = np.argwhere(label == 0)
index1 = np.argwhere(label == 1)
index2 = np.argwhere(label == 2)
# 2. 看看该质心下跟了多少个像素
length0 = len(index0)
length1 = len(index1)
length2 = len(index2)
# 3. 根据这些像素重新分配质心
centroid_0 = np.round(1/length0*np.sum(img_reshape[np.squeeze(index0, axis=1)], axis=0))
centroid_1 = np.round(1/length0*np.sum(img_reshape[np.squeeze(index1, axis=1)], axis=0))
centroid_2 = np.round(1/length0*np.sum(img_reshape[np.squeeze(index2, axis=1)], axis=0))
# 4. 把质心展开,方便矩阵操作
centroid_reshape_0 = np.tile(centroid_0, (img.shape[0] * img.shape[1], 1))
centroid_reshape_1 = np.tile(centroid_1, (img.shape[0] * img.shape[1], 1))
centroid_reshape_2 = np.tile(centroid_2, (img.shape[0] * img.shape[1], 1))
# 计算代价函数,在我们这里是所有像素到其质心的距离之和
J0 = np.sum(np.linalg.norm(img_reshape[np.squeeze(index0, axis=1)] - np.tile(centroid_0, (length0, 1)), axis=1))
J1 = np.sum(np.linalg.norm(img_reshape[np.squeeze(index1, axis=1)] - np.tile(centroid_1, (length1, 1)), axis=1))
J2 = np.sum(np.linalg.norm(img_reshape[np.squeeze(index2, axis=1)] - np.tile(centroid_2, (length2, 1)), axis=1))
J = (J0+J1+J2)/(img.shape[0]*img.shape[1])
# 5. 计算掩膜,准备画图
mask0 = np.zeros(img.shape, dtype=np.uint8)
mask1 = np.zeros(img.shape, dtype=np.uint8)
mask2 = np.zeros(img.shape, dtype=np.uint8)
for i in range(length0):
mask0[np.squeeze(index0)[i]//img.shape[1], np.squeeze(index0)[i]%img.shape[1]] = 1
for i in range(length1):
mask1[np.squeeze(index1)[i]//img.shape[1], np.squeeze(index1)[i]%img.shape[1]] = 1
for i in range(length2):
mask2[np.squeeze(index2)[i]//img.shape[1], np.squeeze(index2)[i]%img.shape[1]] = 1
out0 = mask0*img
out1 = mask1*img
out2 = mask2*img
cv2.imshow('0', img)
cv2.imshow('1', out0)
cv2.imshow('2', out1)
cv2.imshow('3', out2)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import os, pickle
import numpy as np
import pandas as pd
from statsmodels.distributions import ECDF
import model_run
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as skm
from statsmodels.graphics.gofplots import qqplot
import argparse
parser = argparse.ArgumentParser(description="Visualize some PCE evaluation metrics")
parser.add_argument("exp_name", type=str, help="name of PCM experiment to reference")
parser.add_argument("-r", "--reference", type=str, required=True,
help="Reference sample design to plot")
parser.add_argument("-i", "--interact", action="store_true",
help="Enable interactive mode (draw plots to screen)")
parser.add_argument("--params", action='store_true',
help="Include plots for ARG/MBN parameterizations")
parser.add_argument("--plots", nargs="+", type=str,
help="Control which plots to produce")
def compute_stats(obs, act):
mae = skm.mean_absolute_error(act, obs)
r2 = skm.r2_score(act, obs)
rmse = np.sqrt(np.sum((obs-act)**2.)/len(act))
nrmse = rmse/np.sqrt(np.sum((act**2.)/len(act)))
rel_err = 100.*(obs - act)/act
## Mask egregiously high values (1000% error) which screw up the spread
rel_err = rel_err[np.abs(rel_err) <= 1000.]
mre = np.mean(rel_err)
mre_std = np.std(rel_err)
stats = {
'mae': mae, 'r2': r2, 'rmse': rmse, 'nrmse': nrmse,
'mre': mre, 'mre_std': mre_std,
}
return stats
def plot_dists_base(param, parcel, var_name, param_name, lims, exp_name = '', ref_name='', savefig=False, **kwargs):
pct_levs = np.linspace(0., 100., 11)
fig, axs = plt.subplots(1, 2, figsize=(10, 4))
plt.subplots_adjust(wspace=0.25, bottom=0.15)
ax_cdf, ax_pdf = axs
## Compute empirical CDFs
param_cdf = ECDF(param.ravel())
param_percentiles = [np.percentile(param.ravel(), x) for x in pct_levs]
parcel_cdf = ECDF(parcel.ravel())
parcel_percentiles = [np.percentile(parcel.ravel(), x) for x in pct_levs]
ax_cdf.plot(parcel_percentiles, pct_levs/100., color='k', lw=5,
label="parcel model")
ax_cdf.plot(param_percentiles, pct_levs/100., "--o", ms=8, label=param_name)
ax_cdf.legend(loc='best')
ax_cdf.set_xlim(*lims)
ax_cdf.set_xlabel(var_name)
ax_cdf.set_ylim(0, 1)
ax_cdf.set_ylabel("Cumulative Probability")
## PDFs
ax_pdf = sns.distplot(parcel, hist=False,
color='k', label="parcel model", ax=ax_pdf,
kde_kws={'lw': 5})
ax_pdf = sns.distplot(param, hist=False, kde_kws={'linestyle': 'dashed'},
label=param_name, ax=ax_pdf)
ax_pdf.set_xlim(*lims)
ax_pdf.set_xlabel(var_name)
ax_pdf.set_ylabel("Probability Density")
ax_pdf.legend(loc="best")
if savefig:
var_fn = fn_out_fix(var_name)
plt.savefig(os.path.join(plot_dir, "%s_%s_%s_%s_cdfs.pdf" % (ref_name, exp_name, var_fn, param_name)))
return ax_cdf, ax_pdf
def plot_one_one_base(param, parcel, var_name, param_name, lims, coloring='k', loglog=False, ax=None, error_pct=0.5, exp_name='', ref_name='', savefig=False, **kwargs):
if not ax:
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
## Mask for infinites
mask = np.isfinite(parcel)
parcel = parcel[mask]
param = param[mask]
if isinstance(coloring, (pd.Series, np.ndarray, list)):
coloring = coloring[mask]
ax.scatter(parcel, param, marker='.', s=30, c=coloring,
edgecolor='none', alpha=0.8, cmap=plt.get_cmap("OrRd"), **kwargs)
oo = np.linspace(lims[0], lims[1], 100)
ax.plot(oo, oo, color='grey', lw=3)
ax.plot(oo, oo*error_pct, color='k', lw=1, alpha=0.8)
ax.plot(oo, oo*(1.+error_pct), color='k', lw=1, alpha=0.8)
ax.set_xlim(lims)
ax.set_ylim(lims)
if loglog:
ax.loglog()
ax.set_xlabel("%s, parcel model" % var_name)
ax.set_ylabel("%s, %s" % (var_name, param_name))
stats = compute_stats(param, parcel)
ax.text(0.05, 0.775, stat_label.format(**stats),
transform=ax.transAxes, fontsize=12)
if savefig:
var_fn = fn_out_fix(var_name)
plt.savefig(os.path.join(plot_dir, "%s_%s_%s_%s_oneone.pdf" % (ref_name, exp_name, var_fn, param_name)))
return ax
###########################################################
## Plot aesthetics and configuration
sns.set(style="darkgrid", context="talk", palette="Dark2")
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
def fn_out_fix(s, chars="_${}"):
for char in chars:
s = s.replace(char, "")
return s
fn_fix = lambda s: s.replace("_", "\_")
def param_name(s):
if s in ["ARG", "MBN"]:
return s
else:
bits = s.split("_")
order = int(bits[-1])
return "PCE order %d" % order
stat_label = "RMSE: {rmse:1.2f} ({nrmse:1.2f})\n" + \
" MAE: {mae:1.2f}\n" + \
" R$^2$: {r2:1.2f}\n" + \
" MRE: {mre:2.2f}$\%$ ({mre_std:2.2f}$\%$)"
z_func = lambda z: 10.**z
plot_dir = "figs/"
N_lims = 1, 5
S_lims = -5, -1
###########################################################
if __name__ == "__main__":
args = parser.parse_args()
print "Creating plots for %s" % args.exp_name
print " Sample data:", args.reference
if args.interact:
print " Interactive mode"
plt.ion()
else:
plt.ioff()
if args.params:
print " Plotting ARG/MBN figures"
if not args.plots:
ALL_PLOTS = True
print " Plotting all plots"
else:
ALL_PLOTS = False
print " Plotting", ", ".join(args.plots)
print "\n"
## Over-write plotting functions with exp_name
def plot_one_one(*v, **kw):
return plot_one_one_base(*v, exp_name=args.exp_name, ref_name=args.reference,
savefig=(not args.interact), **kw)
def plot_dists(*v, **kw):
return plot_dists_base(*v, exp_name=args.exp_name, ref_name=args.reference,
savefig=(not args.interact),**kw)
## Unload the configured experiment
exp_dict = pickle.load(open("%s_exp.dict" % args.exp_name, 'r'))
results_dict = pickle.load(open("%s_results.dict" % args.exp_name, 'r'))
design_df = pd.read_csv("%s.csv" % args.reference, index_col=0)
results_df = pd.read_csv("%s_results.csv" % args.reference, index_col=0)
pce_keys = results_dict.keys()
if args.params:
param_keys = pce_keys + ["ARG", "MBN"]
else:
param_keys = pce_keys
all_plots = {}
###########################################################
## SET 1) one-one plots
if ALL_PLOTS or "oneone" in args.plots:
print "One-one plots..."
oo_kwargs = {
#'coloring': 10.**design_df['logV'],
}
# a) log10(Smax)
var_key = "Smax"
var_name = r"log10(S_max)"
lims = [-4, -1]
parcel = results_df['%s_parcel' % var_key]
print var_name
for key in param_keys:
print " ", key
ax = plot_one_one(results_df['%s_%s' % (var_key, key)], parcel,
var_name, param_name(key), lims, **oo_kwargs)
fig = ax.get_figure()
all_plots[var_name, key] = (fig, ax)
if args.interact: plt.show()
# b) Smax
var_key = "Smax"
var_name = r"S_max"
lims = [1e-4, 5e-1]
parcel = z_func(results_df['%s_parcel' % var_key])
print var_name
for key in param_keys:
print " ", key
ax = plot_one_one(z_func(results_df['%s_%s' % (var_key, key)]), parcel,
var_name, param_name(key), lims, loglog=True, **oo_kwargs)
fig = ax.get_figure()
all_plots[var_name, key] = (fig, ax)
if args.interact: plt.show()
# c) Neq
var_key = "Neq"
var_name = r"log10(N_eq)"
lims = [1, 4]
parcel = results_df['%s_parcel' % var_key]
print var_name
for key in param_keys:
print " ", key
ax = plot_one_one(results_df['%s_%s' % (var_key, key)], parcel,
var_name, param_name(key), lims, **oo_kwargs)
fig = ax.get_figure()
all_plots[var_name, key] = (fig, ax)
if args.interact: plt.show()
# e) Nderiv
var_key = "Nderiv"
var_name = r"log10(N_d)"
lims = [1, 4]
parcel = results_df['Neq_parcel']
print var_name
for key in pce_keys:
print " ", key
ax = plot_one_one(results_df['%s_%s' % (var_key, key)], parcel,
var_name, param_name(key), lims, **oo_kwargs)
fig = ax.get_figure()
all_plots[var_name, key] = (fig, ax)
if args.interact: plt.show()
# c) Nderiv
var_key = "Nderiv"
var_name = r"N$_{d}$"
lims = map(lambda x: 10.**x, N_lims)
parcel = 10.**(results_df['Neq_parcel'])
print var_name
for key in pce_keys:
print " ", key
ax = plot_one_one(10.**(results_df['%s_%s' % (var_key, key)]), parcel,
var_name, param_name(key), lims, loglog=True, **oo_kwargs)
fig = ax.get_figure()
all_plots[var_name, key] = (fig, ax)
if args.interact: plt.show()
###########################################################
## SET 2) one-one plots
if ALL_PLOTS or "pdf" in args.plots:
print "CDFs/PDFs..."
pdf_kwargs = { }
# a) log10(Smax)
var_key = "Smax"
var_name = r"log10(S_max)"
lims = [-5, 0]
parcel = results_df['%s_parcel' % var_key]
print var_name
for key in param_keys:
print " ", key
axs = plot_dists(results_df['%s_%s' % (var_key, key)], parcel,
var_name, param_name(key), lims, **pdf_kwargs)
fig = axs[0].get_figure()
all_plots[var_name, key] = (fig, axs)
if args.interact: plt.show()
# b) log10(Neq)
var_key = "Neq"
var_name = r"log10(N_eq)"
lims = [0, 4]
parcel = results_df['%s_parcel' % var_key]
print var_name
for key in param_keys:
print " ", key
axs = plot_dists(results_df['%s_%s' % (var_key, key)], parcel,
var_name, param_name(key), lims, **pdf_kwargs)
fig = axs[0].get_figure()
all_plots[var_name, key] = (fig, axs)
if args.interact: plt.show()
# c) log10(Nderiv)
var_key = "Nderiv"
var_name = r"log10(N_d)"
lims = [0, 4]
parcel = results_df['Neq_parcel']
print var_name
for key in pce_keys:
print " ", key
axs = plot_dists(results_df['%s_%s' % (var_key, key)], parcel,
var_name, param_name(key), lims, **pdf_kwargs)
fig = axs[0].get_figure()
all_plots[var_name, key] = (fig, axs)
if args.interact: plt.show()
## Clean up
#plt.close('all')
'''
###############################################################
## Loop over all the experiments
run_names = sorted(results_dict.keys())
for i, run_name in enumerate(run_names):
#if run_name != "expansion_order_4": continue
run_dict = results_dict[run_name]
n_terms.append(run_dict['pce'].nterms)
pce_lhs_results = run_dict['pce'](z_design)
np.save("%s_%s_pce_smax.npy" % (exp_name, run_name), pce_lhs_results)
pce_lhs_results_positive = np.ma.masked_less_equal(pce_lhs_results, 0)
pce_ecdf = ECDF(pce_lhs_results.ravel())
pce_percentiles = [np.percentile(pce_lhs_results.ravel(), x) for x in pct_levs]
n_pos = pce_lhs_results_positive.count()
n_tot = len(pce_lhs_results)
if use_log:
ln_pce = np.log(pce_lhs_results[pce_lhs_results > 0.])
ln_ana = np.log(lhs_results[pce_lhs_results > 0.])
rmse = np.sqrt(np.sum((ln_pce - ln_ana)**2.)/n_good)
RMSEs.append(rmse)
else:
rmse = np.sqrt(np.sum(pce_lhs_results - lhs_results)**2.)/n_tot
RMSEs.append(rmse)
mae = skm.mean_absolute_error(lhs_results, pce_lhs_results)
r2 = skm.r2_score(lhs_results, pce_lhs_results)
rel_err = 100.*(pce_lhs_results - lhs_results)/lhs_results
mre = np.mean(rel_err)
mre_std = np.std(rel_err)
print run_name
print """
response fn eval
----------------
(min, max): {min}, {max}
# of terms: {n_terms}
RMSE: {rmse}
MAE: {mae}
R^2: {r2}
mean rel err: {mre:2.2f}% ({mre_std:2.2f}%)
""".format(n_terms=run_dict['pce'].nterms, rmse=rmse, mae=mae, r2=r2,
min=pce_lhs_results.min(), max=pce_lhs_results.max(),
mre=mre, mre_std=mre_std)
#if not run_name == "sparse_grid_level_6": continue
if PLOT_1 or PLOT_2 or PLOT_3 or PLOT_5:
fig, [[ax_cdf, ax_pdf, ax_bins],
[ax_oo, ax_nact, ax_af],
[ax_oo_bin, ax_nact_bin, ax_af_bin]] = \
plt.subplots(3, 3, num=run_name, figsize=(15, 12))
###########################################################
## 1) CDF/PDF for true vs each PCE generated in the experiment
print "Figure 1 - CDF/PDF for each PCE from experiment"
if PLOT_1:
ax_cdf.plot(lhs_percentiles, pct_levs/100., color='k', lw=5,
label="analytical")
ax_cdf.plot(pce_percentiles, pct_levs/100., "-", ms=2, label=fn_fix(run_name))
ax_cdf.legend(loc='lower right')
ax_cdf.set_xlim(res_min, res_max)
if use_log:
ax_cdf.semilogx()
ax_cdf.set_xlabel('Response')
ax_cdf.set_ylim(0, 1)
ax_cdf.set_ylabel("Cumulative Probability")
if use_log:
mask = pce_lhs_results > 0
ax_pdf = sns.distplot(lhs_results[mask], hist=False,
color='k', label="analytical", ax=ax_pdf,
kde_kws={'lw': 5})
ax_pdf = sns.distplot(pce_lhs_results[mask], hist=False,
color='r', label=fn_fix(run_name), ax=ax_pdf)
else:
ax_pdf = sns.distplot(lhs_results, hist=False,
color='k', label="analytical", ax=ax_pdf,
kde_kws={'lw': 5})
ax_pdf = sns.distplot(pce_lhs_results, hist=False,
color='r', label=fn_fix(run_name), ax=ax_pdf)
ax_pdf.set_xlabel('Response')
ax_pdf.set_ylabel("Probability Density")
ax_pdf.legend(loc="upper left")
else: print "...skipping"
###########################################################
## 2) one-one plot for analytical vs each PCE
print "Figure 2 - One-to-one plots for each PCE from experiment"
if PLOT_2:
if use_log:
ss = np.logspace(np.log10(res_min), np.log10(res_max), 100)
mask = pce_lhs_results > 0
ax_oo.scatter(lhs_results[mask], pce_lhs_results[mask], marker='.', s=12,
color='k', alpha=0.5, label=run_name, edgecolor='none')
ax_oo.loglog()
ax_oo.plot(ss, ss, color='grey', lw=3)#, zorder=100)
else:
ss = np.linspace(res_min, res_max, 100)
ax_oo.scatter(lhs_results, pce_lhs_results, color='k', alpha=0.5,
marker='.', s=12, label=run_name,
edgecolor='none')
#bins = np.linspace(res_min, res_max, 21)
#ax_oo.hist2d(lhs_results, pce_lhs_results, bins=bins,
# norm=LogNorm())
ax_oo.plot(ss, ss, color='grey', lw=3)#, zorder=100)
ax_oo.plot(ss, ss*.5, color='k', lw=1, alpha=0.8)#, zorder=100)
ax_oo.plot(ss, ss*2., color='k', lw=1, alpha=0.8)#, zorder=100)
ax_oo.set_xlim(res_min, res_max)
ax_oo.set_ylim(res_min, res_max)
ax_oo.set_xlabel("Analytical")
ax_oo.set_ylabel("Emulator")
ax_oo.set_title("Modeled response function", loc='left')
ax_oo.text(0.05, 0.775, stat_label.format(rmse=rmse, mae=mae,
r2=r2, mre=mre, mre_std=mre_std),
transform=ax_oo.transAxes)
else: print "...skipping"
###########################################################
## 2) one-one plot for analytical vs each PCE, but number activated
print "Figure 3 - One-to-one plots for Nacts from experiment"
if PLOT_3:
fn = model_run.__dict__[exp_dict['function_name']]
zipped = zip(design.T, pce_lhs_results)
fn_nact = lambda z, smax : fn(*z, fn_toggle=smax)
pce_nacts = np.array([fn_nact(z, z_func(smax)) for z, smax in zipped])
if READ_CACHED_NACTS:
pce_nacts = np.load("%s_%s_pce_nact.npy" % (exp_name, run_name))
else:
pce_nacts = np.array([fn_nact(z, 10.**(smax)) for z, smax in zipped])
np.save("%s_%s_pce_nact.npy" % (exp_name, run_name), pce_nacts)
ss = np.linspace(10, 40000, 100)
ax_nact.set_xlabel("Analytical")
ax_nact.set_ylabel("Emulator")
ax_nact.set_title("Computed CDNC", loc='left')
mask = lhs_results > np.log10(0.01/100) # 0.05 % Smax
ax_nact.scatter(lhs_nacts[mask], pce_nacts[mask],
c=lhs_results[mask], marker='.', s=12, label=run_name,
edgecolor='none', cmap=plt.get_cmap("OrRd"))
#bins = np.logspace(1, 4.4, 21)
#ax_nact.hist2d(lhs_nacts, pce_nacts)
ax_nact.plot(ss, ss, color='grey', lw=3)#, zorder=100)
ax_nact.plot(ss, ss*.5, color='k', lw=1, alpha=0.8)#, zorder=100)
ax_nact.plot(ss, ss*2., color='k', lw=1, alpha=0.8)#, zorder=100)
ax_nact.semilogx()
ax_nact.semilogy()
ax_nact.set_xlim(10, 1000)
ax_nact.set_ylim(10, 1000)
rmse = np.sqrt(np.sum(pce_nacts - lhs_nacts)**2.)/n_tot
mae = skm.mean_absolute_error(lhs_nacts, pce_nacts)
r2 = skm.r2_score(lhs_nacts[mask], pce_nacts[mask])
rel_err = 100*(pce_nacts - lhs_nacts)/lhs_nacts
rel_err = np.ma.masked_greater(rel_err, 10)
mre = np.mean(rel_err[mask])
mre_std = np.std(rel_err[mask])
print """
diag CDNC eval
----------------
RMSE: {rmse}
MAE: {mae}
R^2: {r2}
mean rel err: {mre:2.2f}% ({mre_std:2.2f}%)
""".format(n_terms=run_dict['pce'].nterms, rmse=rmse, mae=mae, r2=r2,
mre=mre, mre_std=mre_std)
ax_nact.text(0.05, 0.775, stat_label.format(rmse=rmse, mae=mae, r2=r2,
mre=mre, mre_std=mre_std),
transform=ax_nact.transAxes)
else: print "...skipping"
###########################################################
## 3) one-one plot for analytical vs each PCE, but act frac
print "Figure 3 - One-to-one plots for act fracs from experiment"
if PLOT_3:
lhs_afs = lhs_nacts/Ns
pce_afs = pce_nacts/Ns
ss = np.linspace(0, 1, 100)
ax_af.set_xlim(0, 1)
ax_af.set_ylim(0, 1)
ax_af.set_xlabel("Analytical")
ax_af.set_ylabel("Emulator")
ax_af.set_title("Computed Act. Fraction", loc='left')
ax_af.scatter(lhs_afs, pce_afs,
c=lhs_results, marker='.', s=12, label=run_name,
edgecolor='none', cmap=plt.get_cmap("OrRd"))
ax_af.plot(ss, ss, color='grey', lw=3)#, zorder=100)
ax_af.plot(ss, ss*.5, color='k', lw=1, alpha=0.8)#, zorder=100)
ax_af.plot(ss, ss*2., color='k', lw=1, alpha=0.8)#, zorder=100)
else: print "...skipping"
##########################################################
## 3) binned plot of spread as a fcn of log(Smax)
print "Figure 5 - binned spread - log(Smax)"
if PLOT_5:
plt.rc('text', usetex=False)
log_smax_bins = np.linspace(res_min, res_max, 12+1)
bin_names = ["%2.1f - %2.1f\n%2d" % (l, r, i) for i, (l, r) \
in enumerate(zip(log_smax_bins[:-1], log_smax_bins[1:])) ]
bin_numbers = np.digitize(lhs_results, log_smax_bins[1:-1])
bin_df = pd.DataFrame(data={ "bin_numbers": bin_numbers,
"ana_lhs": lhs_results,
"pce_lhs": pce_lhs_results,
"ana_nacts": lhs_nacts,
"pce_nacts": pce_nacts,
"ana_afs": lhs_afs,
"pce_afs": pce_afs,
"rel_lhs_diff": (pce_lhs_results - lhs_results)/lhs_results,
"rel_nacts_diff": (pce_nacts - lhs_nacts)/lhs_nacts,
"rel_afs_diff": (pce_afs - lhs_afs)/lhs_afs })
Smaxes = z_func(bin_df.ana_lhs)*100.
rel_errs = bin_df.rel_lhs_diff*100.
mask = (Smaxes > 0.01) & (Vs > 0.2) & (Vs < 10.0)
print " Number of vals in rel err sample:", len(rel_errs[mask])
ax_oo_bin.scatter(Smaxes[mask], rel_errs[mask],
edgecolor='None', color='k', alpha=0.5, marker='.')
ax_oo_bin.semilogx()
ax_oo_bin.set_ylim(-50, 50)
ax_oo_bin.set_xlim(1e-2, 5.)
#ax_oo_bin.set_xticklabels(bin_names, rotation=90)
#ax_oo_bin.set_xlabel(fn_fix("bin_number"))
ax_oo_bin.set_xlabel("Smax ($\%$)")
ax_oo_bin.set_ylabel(fn_fix("Relative Error in Smax ($\%$)"))
sns.barplot(bin_numbers, ci=None, palette="OrRd", ax=ax_bins)
ax_bins.set_xlabel(fn_fix("bin_number"))
plt.rc('text', usetex=True)
else: print "...skipping"
##########################################################
## 4) binned plot of spread in nact as a fcn of log(Smax)
print "Figure 5 - binned spread - Nact"
if PLOT_5:
plt.rc('text', usetex=False)
Smaxes = z_func(bin_df.ana_lhs)*100.
rel_errs = bin_df.rel_nacts_diff*100.
mask = (Smaxes > 0.01) & (Vs > 0.2) & (Vs < 10.0)
ax_nact_bin.scatter(Smaxes[mask], rel_errs[mask],
edgecolor='None', color='k', alpha=0.5, marker='.')
ax_nact_bin.semilogx()
#ax_nact_bin.set_xticklabels(bin_names, rotation=90)
ax_nact_bin.set_ylim(-150, 150)
ax_nact_bin.set_xlim(1e-2, 5.)
#ax_nact_bin.set_xlabel(fn_fix("bin_number"))
ax_nact_bin.set_xlabel("Smax ($\%$)")
ax_nact_bin.set_ylabel(fn_fix("Relative Error in CDNC ($\%$)"))
plt.rc('text', usetex=True)
else: print "...skipping"
##########################################################
## 4) binned plot of spread in nact as a fcn of log(Smax)
print "Figure 5 - binned spread - act fracs"
if PLOT_5:
plt.rc('text', usetex=False)
Smaxes = z_func(bin_df.ana_lhs)*100.
rel_errs = bin_df.rel_afs_diff*100.
mask = (Smaxes > 0.01) & (Vs > 0.2) & (Vs < 10.0)
ax_af_bin.scatter(Smaxes[mask], rel_errs[mask],
edgecolor='none', color='k', alpha=0.5, marker='.')
ax_af_bin.semilogx()
#ax_af_bin.set_xticklabels(bin_names, rotation=90)
ax_af_bin.set_ylim(-150, 150)
ax_af_bin.set_xlim(1e-2, 5.)
#ax_af_bin.set_xlabel(fn_fix("bin_number"))
ax_af_bin.set_xlabel("Smax ($\%$)")
ax_af_bin.set_ylabel(fn_fix("Relative Error in act. frac. ($\%$)"))
plt.rc('text', usetex=True)
else: print "...skipping"
###########################################################
if PLOT_1 or PLOT_2 or PLOT_3 or PLOT_5:
sns.despine()
#plt.draw()
plt.tight_layout()
plt.pause(0.1)
#fig.tight_layout()
#break
c = raw_input("save/continue?")
if c == "y":
plt.savefig(plot_dir+exp_name+"_"+run_name[-1]+".pdf",
transparent=True, bbox_inches='tight')
###########################################################
## 3) RMSE in ln(response)
print "Figure 4 - Error"
if PLOT_4:
fig = plt.figure(num="error_change", figsize=(5,4))
ax_err = fig.add_subplot(111)
err_df = pd.DataFrame(data={'nterms': n_terms[::-1],
'RMSE': RMSEs[::-1]})
print err_df
err_df.plot(x='nterms', y="RMSE", ax=ax_err)
ax_err.scatter(n_terms, RMSEs)
plt.pause(0.1)
else: print "...skipping"
###########################################################
## 6) Matrix of scatterplots to evaluate LHS distribution
print "Figure 6 - LHS scatterplots"
if PLOT_6:
plt.rc('text', usetex=False)
## Not quite working at the moment due to bug in seaborn
#fig = plt.figure(num="LHS scatterplot")
var_names = [v[0] for v in exp_dict['variables']]
design_fix = design[:, ::50].copy()
labels = np.random.randint(1, 4, len(design_fix[0, :]))
for i, v in enumerate(var_names):
if v.startswith("log"):
var_names[i] = v[2:]
design_fix[i, :] = z_func(design_fix[i, :])
design_df = pd.DataFrame(design_fix.T, columns=var_names)
design_df['label'] = labels
#g = sns.PairGrid(design_df, hue='label', diag_sharey=False, size=1.5)
#g.map_lower(plt.scatter)
#g.map_diag(sns.kdeplot, lw=3)
from itertools import combinations
var_combos = np.array(list(combinations(var_names, 2)))
inds = np.random.choice(range(len(var_combos)), 4)
var_combos = var_combos[inds]
fig, axes = plt.subplots(2, 2, num="LHS scatterplot")
for i, ax in enumerate(axes.ravel()):
var_x, var_y = var_combos[i]
design_df.plot(var_x, var_y, kind='scatter', ax=ax)
plt.tight_layout()
plt.pause(0.1)
plt.rc('text', usetex=True)
else: print "...skipping"
'''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib as mpl
import platform
# for rendering graph on remote server.
# see: https://qiita.com/TomokIshii/items/3a26ee4453f535a69e9e
if platform.system() != "Darwin":
mpl.use('Agg')
import warnings
warnings.filterwarnings('ignore', category=FutureWarning, message="Conversion of the second")
warnings.filterwarnings('ignore', category=RuntimeWarning, message="invalid value encountered in sqrt")
import chainer
import chainer.functions as F
import chainer.links as L
from chainer.datasets import TransformDataset
import os
import numpy as np
from PIL import Image
import json
import argparse
from chainer import training
from chainer.training import extensions
class Block(chainer.Chain):
"""
畳み込み層
"""
def __init__(self, out_channels, ksize, stride=1, pad=1):
super(Block, self).__init__()
with self.init_scope():
self.conv = L.Convolution2D(None, out_channels, ksize, stride, pad)
self.bn = L.BatchNormalization(out_channels)
def __call__(self, x):
h = self.conv(x)
h = self.bn(h)
return F.relu(h)
class Mymodel(chainer.Chain):
def __init__(self, n_out):
super(Mymodel, self).__init__()
with self.init_scope():
self.block1_1 = Block(64, 8, 2, 2) # n_in = args.size (300)^2 * 3 = 270000
self.block1_2 = Block(64, 5)
self.block2_1 = Block(128, 3)
self.block2_2 = Block(128, 3)
self.block3_1 = Block(256, 3)
self.block3_2 = Block(256, 3)
self.block4_1 = Block(512, 3)
self.block4_2 = Block(256, 3)
self.fc1 = L.Linear(4096)
self.fc2 = L.Linear(2048)
#↓中身を調べている最中
#self.bn_fc1 = L.BatchNormalization(512)
self.fc3 = L.Linear(n_out)
def loss_func(self, x, t):
y = self.predict(x)
# loss = F.sigmoid_cross_entropy(y, t)
loss = F.bernoulli_nll(t.astype("f"), y) / len(y)
# labelが付いている(t_が1)場合: -log(y_) y_は0となるかも?
# 付いていない(t_が0)場合: -log(1-y_) ここでt_,y_ はx, yの要素
# 以上の総和をバッチサイズで割る
chainer.reporter.report({'loss': loss}, self)
accuracy = self.accuracy(y.data, t)
chainer.reporter.report({'accuracy': accuracy[0]}, self) # dataひとつひとつのlabelが完全一致している確率
chainer.reporter.report({'frequent_error': accuracy[1]}, self) # batchの中で最も多く間違って判断したlabel
chainer.reporter.report({'acc_66': accuracy[2]}, self) # 66番ラベルの正解率
return loss
def accuracy(self, y, t):
y = chainer.cuda.to_cpu(y)
t = chainer.cuda.to_cpu(t)
y_binary = (y > 0).astype(int)
accuracy1 = sum([1 if all(i) else 0 for i in (y_binary == t)]) / len(y) # dataひとつひとつのlabelが完全一致している確率
acc_66 = np.sum((y_binary[:, 66] == t[:, 66]).astype(int)) / len(y)
return accuracy1, np.sum((y_binary != t).astype(int), 0).argsort()[-1] + 1, acc_66
def predict(self, x):
# 64 channel blocks:
h = self.block1_1(x)
h = F.dropout(h, ratio=0.3)
h = self.block1_2(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 128 channel blocks:
h = self.block2_1(h)
h = F.dropout(h, ratio=0.3)
h = self.block2_2(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 256 channel blocks:
h = self.block3_1(h)
h = F.dropout(h, ratio=0.3)
h = self.block3_2(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 512 channel blocks:
h = self.block4_1(h)
h = F.dropout(h, ratio=0.3)
h = self.block4_2(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
h = F.dropout(h, ratio=0.4)
h = self.fc1(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.5)
h = self.fc2(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.5) # dropout 多すぎる?
return self.fc3(h)
class Transform(object):
def __init__(self, args, json_data):
self.label_variety = args.label_variety
self.size = args.size
self.json_data = json_data
self.data_folder = 'data/' + args.object + '_images/'
def __call__(self, num):
img_data = Image.open(self.data_folder + str(num + 1) + '.jpg')
img_data = img_data.resize([self.size] * 2, Image.ANTIALIAS) # 画像を一定サイズに揃える
array_img = np.asarray(img_data).transpose(2, 0, 1).astype(np.float32) / 255. # データを整えて各値を0~1の間に収める
label = np.array([1 if i in self.json_data[num] else 0 for i in range(self.label_variety)])
# すべてのlabel番号に対しlebelがついているならば1,そうでないならば0を入れたリスト
#
# 例: 1, 2, 10 のラベルがついている場合
# [1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ...]
return array_img, label
def main():
parser = argparse.ArgumentParser(description='Linear iMaterialist_Challenge:')
parser.add_argument('--batchsize', '-b', type=int, default=128,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=1,
help='Number of sweeps over the dataset to train')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='', # result/resume.npz',
help='Resume the training from snapshot')
parser.add_argument('--early-stopping', type=str,
help='Metric to watch for early stopping')
parser.add_argument('--frequency', '-f', type=int, default=20,
help='Frequency of taking a snapshot')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--unit', '-u', type=int, default=256,
help='Number of units')
parser.add_argument('--noplot', dest='plot', action='store_false',
help='Disable PlotReport extension'),
parser.add_argument('--size', type=int, default=128), # 正規化する時の一辺のpx
parser.add_argument('--label_variety', type=int, default=228), # 確認できたlabelの総数 この中で判断する
parser.add_argument('--total_photo_num', type=int, default=10000), # 使用する写真データの数
parser.add_argument('--object', type=str, default='train') # train or test のどちらか選んだ方のデータを使用する
args = parser.parse_args()
model = Mymodel(args.label_variety)
if args.gpu >= 0:
# Make a specified GPU current
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
with open('input/train.json', 'r') as f:
json_data = np.array([[int(j) for j in i["labelId"]] for i in json.load(f)["annotations"][:args.total_photo_num]])
dataset = TransformDataset(range(args.total_photo_num), Transform(args, json_data))
train, test = chainer.datasets.split_dataset_random(dataset, int(args.total_photo_num * 0.8), seed=3110)
# 2割をvalidation用にとっておく
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
stop_trigger = (args.epoch, 'epoch')
# Early stopping option
if args.early_stopping:
stop_trigger = chainer.training.triggers.EarlyStoppingTrigger(
monitor=args.early_stopping, verbose=True,
max_trigger=(args.epoch, 'epoch'))
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=args.gpu, loss_func=model.loss_func)
trainer = training.Trainer(updater, stop_trigger, out=args.out)
evaluator = extensions.Evaluator(test_iter, model, device=args.gpu, eval_func=model.loss_func)
evaluator.trigger = 50, 'iteration'
trainer.extend(evaluator)
trainer.extend(extensions.dump_graph('main/loss'))
frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
trainer.extend(extensions.snapshot(), trigger=(frequency, 'iteration'))
trainer.extend(extensions.LogReport(trigger=(1, 'iteration')))
if args.plot and extensions.PlotReport.available():
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', trigger=(1, 'iteration'), file_name='loss.png'))
trainer.extend(
extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'epoch', trigger=(1, 'iteration'), file_name='accuracy.png'))
trainer.extend(
extensions.PlotReport(
['main/frequent_error', 'validation/main/frequent_error'],
'epoch', trigger=(1, 'iteration'), file_name='frequent_error.png'))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
'main/frequent_error', 'validation/main/frequent_error', 'main/acc_66', 'elapsed_time'
]))
trainer.extend(extensions.ProgressBar())
if os.path.isfile(args.resume) and args.resume:
pass
#chainer.serializers.load_npz("result/snapshot_iter_0", trainer)
#chainer.serializers.load_npz("result/snapshot_iter_0", model, path='updater/model:main/')
# Run the training
trainer.run()
# chainer.serializers.save_npz("resume.npz", model)#学習データの保存
if __name__ == '__main__':
main()
|
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import AbstractUser
from django.db import models
class UserManager(BaseUserManager):
def _create_user(self, username, email, password, **extra_fields):
"""
Create and save a user with the given email, username and password.
"""
if not email:
raise ValueError('The email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, username, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, password, **extra_fields)
class User(AbstractUser):
email = models.EmailField(max_length=254, unique=True)
password = models.CharField(max_length=32)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
return self.email
class Profile(models.Model):
picture = models.ImageField(null=True)
username = models.CharField(max_length=50)
user = models.OneToOneField('users.User', on_delete=models.CASCADE, null=True)
introduction = models.TextField()
def __str__(self):
return self.username
class Like(models.Model):
user = models.ForeignKey('users.User', on_delete=models.CASCADE)
image = models.ForeignKey('photos.Photo', on_delete=models.CASCADE)
liked_at = models.DateTimeField(auto_now_add=True)
|
import numpy as np
from ActivationFunctions import *
from LossFunctions import *
"""
Our Dense class creates the layer, taking as parameters the folowing:
- layerSize: the size of the actual layer
- activation: this specifies the activation function we'll use for this layer
- weightBounds: the interval in which all weights will be initialized
"""
class Dense:
def __init__(self, layerSize, activation="sigmoid", weightBounds=(-1, 1)):
self.length = layerSize
self.bounds = weightBounds
self.function = activation
self.activation = activation
self.derivative = activation
if self.activation == "relu":
self.derivative = ReLU_prime
self.activation = ReLU
elif self.activation == "tanh":
self.activation = tanh
self.derivative = tanh_prime
elif self.activation == "softmax":
self.activation = softmax
self.derivative = lambda x: 1
else:
self.activation = sigmoid
self.derivative = sigmoid_prime
|
l=[]
n=int(input("enter no.of elements :"))
for i in range(n):
i=int(input("enter element :"))
l.append(i)
print(max(l))
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
import pygame
import OpenGL.GL as gl
import OpenGL.GLU as glu
import numpy as np
import itertools
import fractions
import copy
import numpy as np
#local imports
from common import SETTINGS, COLORS, VSYNC_PATCH_HEIGHT_DEFAULT, VSYNC_PATCH_WIDTH_DEFAULT, DEFAULT_FLASH_RATE
from common import UserEscape
from screen import Screen
from checkerboard import CheckerBoard
class CheckerBoardFlasherScreen(Screen):
def setup(self,
nrows,
check_width = None,
check_color1 = 'white',
check_color2 = 'black',
screen_background_color = 'neutral-gray',
fixation_dot_color = None,
flash_rate = DEFAULT_FLASH_RATE,
#rate_compensation = None,
vsync_patch = "bottom-right",
vsync_value = None,
):
Screen.setup(self,
background_color = screen_background_color,
vsync_patch = vsync_patch,
)
#run colors through filter to catch names and convert to RGB
check_color1 = COLORS.get(check_color1, check_color1)
check_color2 = COLORS.get(check_color2, check_color2)
# set checkerboard-related attributes
if check_width is None:
check_width = 2.0/nrows #fill whole screen
self.board_width = check_width*nrows
self.nrows = nrows
self.CB1 = CheckerBoard(nrows, check_width, color1 = check_color1, color2 = check_color2, fixation_dot_color = fixation_dot_color)
self.CB2 = CheckerBoard(nrows, check_width, color1 = check_color2, color2 = check_color1, fixation_dot_color = fixation_dot_color) #reversed pattern
#self.CB_cycle = itertools.cycle((self.CB1,self.CB2))
# set time-related attributes
self._last_CB_change_time = None
self.flash_rate = flash_rate
self.flash_interval = 1.0/flash_rate
#self.rate_compensation = rate_compensation
# get useful coordinate values for checkerboard rendering locations
self.xC, self.yC = (-0.5*self.board_width,-0.5*self.board_width)
def start_time(self,t):
# get start time and set current CB objects (and their change times)
Screen.start_time(self,t)
self._t0 = t
self._last_CB_change_time = t
self._current_CB = self.CB1
self._last_CB = self.CB2
def render(self):
# do general OpenGL stuff
Screen.render_before(self)
# translate to position of left board and render
gl.glLoadIdentity()
gl.glTranslatef(self.xC, self.yC, 0.0)
self._current_CB.render()
# do FixationCross and Vsync Patch if needed
Screen.render_after(self)
def update(self, t, dt):
Screen.update(self, t, dt) #important, this handles vsync updates
#we need to render if the vsync patch is ready
self.ready_to_render = self.vsync_patch.ready_to_render
# otherwise, only update a checkerboard if its flash_interval has elapsed
if (t - self._last_CB_change_time) >= self.flash_interval:
self._last_CB_change_time = t
#swap the checkerboards
self._current_CB, self._last_CB = (self._last_CB, self._current_CB)
self.ready_to_render = True
def run(self, **kwargs):
# loop rate set too high so it should run effectively as fast as python is capable of looping
Screen.run(self, display_loop_rate = 10000, **kwargs)
################################################################################
class CheckerBoardFlasherColorFunctionScreen(CheckerBoardFlasherScreen):
def setup(self,
color_function,
**kwargs):
CheckerBoardFlasherScreen.setup(self,**kwargs)
self._color_function = color_function
def update(self, t, dt):
c1, c2 = self._color_function(t - self._t0)
self.CB1.color1 = c1
self.CB1.color2 = c2
self.CB2.color1 = c2
self.CB2.color2 = c1
CheckerBoardFlasherScreen.update(self, t, dt)
################################################################################
# TEST CODE
################################################################################
if __name__ == "__main__":
import sys, random, itertools
#ensure that video mode is at the maxium FPS
if sys.platform.startswith("linux"):
from subprocess import call
call(["xrandr","-r","144"])
import neurodot_present
neurodot_present.settings['vsync_version'] = 2
RAMP_DURATION = 10.0
DWELL_TIME = 5.0
def contrast_ramp(duration, c_max = 1.0):
def cf(x):
y = x/duration
y = min(y,c_max)
y1 = 0.5*(1 + y)
y2 = 0.5*(1 - y)
c1 = (y1, y1, y1)
c2 = (y2, y2, y2)
return (c1, c2)
return cf
def brightness_ramp(duration, c_max = 1.0):
def cf(x):
c = x/(duration)
c = min(c,c_max)
c1 = (c , c , c )
c2 = (0.0,0.0,0.0)
return (c1, c2)
return cf
def exp_brightness_ramp(duration,
e_min = -5.0,
c_max = 1.0,
b = 10.0,
):
def cf(x):
c = b**(e_min*(1.0 - x/(duration)))
c = min(c,c_max)
c1 = (c , c , c )
c2 = (0.0,0.0,0.0)
return (c1, c2)
return cf
CBF = CheckerBoardFlasherColorFunctionScreen.with_pygame_display(
display_mode = (1024,512),
debug = True
)
CBF.setup(color_function = exp_brightness_ramp(RAMP_DURATION),
#color_function = contrast_ramp(RAMP_DURATION),
nrows = 128,
flash_rate = 13,
screen_background_color = COLORS['black'],
)
while True:
CBF.run(duration = RAMP_DURATION, vsync_value = 1)
|
# -*- coding:utf-8 -*-
from model.user import User
def load():
return User()
|
"""Treadmill commaand line helpers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Disable too many lines in module warning.
#
# pylint: disable=C0302
import codecs
import copy
import functools
import io
import logging
import os
import pkgutil
import re
import sys
import tempfile
import traceback
import click
import pkg_resources
import six
from six.moves import configparser
import treadmill
from treadmill import utils
from treadmill import context
from treadmill import plugin_manager
from treadmill import restclient
from botocore import exceptions
from treadmill import subproc
EXIT_CODE_DEFAULT = 1
# Disable unicode_literals click warning.
click.disable_unicode_literals_warning = True
def init_logger(name):
"""Initialize logger.
"""
# Logging configuration must be unicode file
utf8_reader = codecs.getreader('utf8')
log_conf_file = utf8_reader(
pkg_resources.resource_stream(
'treadmill',
'/logging/{name}'.format(name=name)
)
)
try:
logging.config.fileConfig(log_conf_file)
except configparser.Error:
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
traceback.print_exc(file=f)
click.echo('Error parsing log conf: {name}'.format(name=name),
err=True)
def init_profile():
"""Initailize profile.
"""
default_aliases = ['aliases']
profile = context.GLOBAL.get_profile_name()
if profile:
default_aliases.append('aliases.{}'.format(profile))
subproc.ALIASES_PATH = os.environ.get(
'TREADMILL_ALIASES_PATH',
':'.join(default_aliases)
)
def make_commands(section, **click_args):
"""Make a Click multicommand from all submodules of the module."""
class MCommand(click.MultiCommand):
"""Treadmill CLI driver."""
def __init__(self, *args, **kwargs):
if kwargs and click_args:
kwargs.update(click_args)
click.MultiCommand.__init__(self, *args, **kwargs)
def list_commands(self, ctx):
"""Return list of commands in section."""
return sorted(plugin_manager.names(section))
def get_command(self, ctx, cmd_name):
try:
return plugin_manager.load(section, cmd_name).init()
except ImportError as import_err:
print(
'dependency error: {}:{} - {}'.format(
section, cmd_name, str(import_err)
),
file=sys.stderr
)
except KeyError:
raise click.UsageError('Invalid command: %s' % cmd_name)
return MCommand
def _read_password(value):
"""Heuristic to either read the password from file or return the value."""
if os.path.exists(value):
with io.open(value) as f:
return f.read().strip()
else:
return value
def handle_context_opt(ctx, param, value):
"""Handle eager CLI options to configure context.
The eager options are evaluated directly during parsing phase, and can
affect other options parsing (like required/not).
The only side effect of consuming these options are setting attributes
of the global context.
"""
def parse_dns_server(dns_server):
"""Parse dns server string"""
if ':' in dns_server:
hosts_port = dns_server.split(':')
return (hosts_port[0].split(','), int(hosts_port[1]))
else:
return (dns_server.split(','), None)
if not value or ctx.resilient_parsing:
return None
if value == '-':
return None
opt = param.name
if opt == 'cell':
context.GLOBAL.cell = value
elif opt == 'dns_domain':
context.GLOBAL.dns_domain = value
elif opt == 'dns_server':
context.GLOBAL.dns_server = parse_dns_server(value)
elif opt == 'ldap':
context.GLOBAL.ldap.url = value
elif opt == 'ldap_suffix':
context.GLOBAL.ldap_suffix = value
elif opt == 'ldap_user':
context.GLOBAL.ldap.user = value
elif opt == 'ldap_pwd':
context.GLOBAL.ldap.password = _read_password(value)
elif opt == 'zookeeper':
context.GLOBAL.zk.url = value
elif opt == 'profile':
context.GLOBAL.set_profile_name(value)
else:
raise click.UsageError('Invalid option: %s' % param.name)
return value
class _CommaSepList(click.ParamType):
"""Custom input type for comma separated values."""
name = 'list'
def convert(self, value, param, ctx):
"""Convert command line argument to list."""
if value is None:
return []
try:
return value.split(',')
except AttributeError:
self.fail('%s is not a comma separated list' % value, param, ctx)
LIST = _CommaSepList()
class Enums(click.ParamType):
"""Custom input type for comma separated enums."""
name = 'enumlist'
def __init__(self, choices):
self.choices = choices
def get_metavar(self, param):
return '[%s]' % '|'.join(self.choices)
def get_missing_message(self, param):
return 'Choose from %s.' % ', '.join(self.choices)
def convert(self, value, param, ctx):
"""Convert command line argument to list."""
if value is None:
return []
choices = []
try:
for val in value.split(','):
if val in self.choices:
choices.append(val)
else:
self.fail(
'invalid choice: %s. (choose from %s)' %
(val, ', '.join(self.choices)),
param, ctx
)
return choices
except AttributeError:
self.fail('%s is not a comma separated list' % value, param, ctx)
class _KeyValuePairs(click.ParamType):
"""Custom input type for key/value pairs."""
name = 'key/value pairs'
def convert(self, value, param, ctx):
"""Convert command line argument to list."""
if value is None:
return {}
items = re.split(r'([\w\.\-]+=)', value)
items.pop(0)
keys = [key.rstrip('=') for key in items[0::2]]
values = [value.rstrip(',') for value in items[1::2]]
return dict(zip(keys, values))
DICT = _KeyValuePairs()
def validate_memory(_ctx, _param, value):
"""Validate memory string."""
if value is None:
return
if not re.search(r'\d+[KkMmGg]$', value):
raise click.BadParameter('Memory format: nnn[K|M|G].')
return value
def validate_disk(_ctx, _param, value):
"""Validate disk string."""
if value is None:
return
if not re.search(r'\d+[KkMmGg]$', value):
raise click.BadParameter('Disk format: nnn[K|M|Gyy].')
return value
def validate_cpu(_ctx, _param, value):
"""Validate cpu string."""
if value is None:
return
if not re.search(r'\d+%$', value):
raise click.BadParameter('CPU format: nnn%.')
return value
def validate_cpuset_cores(_ctx, _param, value):
"""Validate cpuset cores string."""
if value is None:
return
if not re.search(r'\d+\-?\d*(,\d+\-?\d*)*$', value):
raise click.BadParameter('CPU cores format: nnn[,nnn-[nnn]].')
return value
def validate_reboot_schedule(_ctx, _param, value):
"""Validate reboot schedule specification."""
if value is None:
return
try:
utils.reboot_schedule(value)
except ValueError:
raise click.BadParameter('Invalid reboot schedule. (eg.: "sat,sun")')
return value
def combine(list_of_values, sep=','):
"""Split and sum list of sep string into one list.
"""
combined = sum(
[str(values).split(sep) for values in list(list_of_values)],
[]
)
if combined == ['-']:
combined = None
return combined
def out(string, *args):
"""Print to stdout."""
if args:
string = string % args
click.echo(string)
def handle_exceptions(exclist):
"""Decorator that will handle exceptions and output friendly messages."""
def wrap(f):
"""Returns decorator that wraps/handles exceptions."""
exclist_copy = copy.copy(exclist)
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
"""Wrapped function."""
if not exclist_copy:
f(*args, **kwargs)
else:
exc, handler = exclist_copy.pop(0)
try:
wrapped_f(*args, **kwargs)
except exc as err:
if isinstance(handler, six.string_types):
click.echo(handler, err=True)
elif handler is None:
click.echo(str(err), err=True)
else:
click.echo(handler(err), err=True)
sys.exit(EXIT_CODE_DEFAULT)
@functools.wraps(f)
def _handle_any(*args, **kwargs):
"""Default exception handler."""
try:
return wrapped_f(*args, **kwargs)
except click.UsageError as usage_err:
click.echo('Usage error: %s' % str(usage_err), err=True)
sys.exit(EXIT_CODE_DEFAULT)
except Exception as unhandled: # pylint: disable=W0703
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
traceback.print_exc(file=f)
click.echo('Error: %s [ %s ]' % (unhandled, f.name),
err=True)
sys.exit(EXIT_CODE_DEFAULT)
return _handle_any
return wrap
OUTPUT_FORMAT = None
def make_formatter(pretty_formatter):
"""Makes a formatter."""
def _format(item, how=None):
"""Formats the object given global format setting."""
if OUTPUT_FORMAT is None:
how = pretty_formatter
else:
how = OUTPUT_FORMAT
try:
fmt = plugin_manager.load('treadmill.formatters', how)
return fmt.format(item)
except KeyError:
return str(item)
return _format
def bad_exit(string, *args):
"""System exit non-zero with a string to sys.stderr.
The printing takes care of the newline"""
if args:
string = string % args
click.echo(string, err=True)
sys.exit(-1)
def echo_colour(colour, string, *args):
"""click.echo colour with support for placeholders, e.g. %s"""
if args:
string = string % args
click.echo(click.style(string, fg=colour))
def echo_green(string, *args):
"""click.echo green with support for placeholders, e.g. %s"""
echo_colour('green', string, *args)
def echo_yellow(string, *args):
"""click.echo yellow with support for placeholders, e.g. %s"""
echo_colour('yellow', string, *args)
def echo_red(string, *args):
"""click.echo yellow with support for placeholders, e.g. %s"""
echo_colour('red', string, *args)
def handle_not_authorized(err):
"""Handle REST NotAuthorizedExceptions"""
msg = str(err)
msgs = [re.sub(r'failure: ', ' ', line) for line in msg.split(r'\n')]
echo_red('Not authorized.')
click.echo('\n'.join(msgs), nl=False)
def handle_cli_exceptions(exclist):
"""Decorator that will handle exceptions and output friendly messages."""
def wrap(f):
"""Returns decorator that wraps/handles exceptions."""
exclist_copy = copy.copy(exclist)
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
"""Wrapped function."""
if not exclist_copy:
f(*args, **kwargs)
else:
exc, handler = exclist_copy.pop(0)
try:
wrapped_f(*args, **kwargs)
except exc as err:
if handler is None:
raise click.UsageError(
err.response['Error']['Message']
)
elif isinstance(handler, str):
click.echo(err, err=True)
sys.exit(EXIT_CODE_DEFAULT)
@functools.wraps(f)
def _handle_any(*args, **kwargs):
"""Default exception handler."""
try:
return wrapped_f(*args, **kwargs)
except Exception as unhandled: # pylint: disable=W0703
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
traceback.print_exc(file=f)
click.echo('Error: %s [ %s ]' % (unhandled, f.name),
err=True)
sys.exit(EXIT_CODE_DEFAULT)
return _handle_any
return wrap
REST_EXCEPTIONS = [
(restclient.NotFoundError, 'Resource not found'),
(restclient.AlreadyExistsError, 'Resource already exists'),
(restclient.ValidationError, None),
(restclient.NotAuthorizedError, handle_not_authorized),
(restclient.BadRequestError, None),
(restclient.MaxRequestRetriesError, None)
]
CLI_EXCEPTIONS = [
(exceptions.ClientError, None),
(exceptions.PartialCredentialsError, 'Partial Crendentials'),
(exceptions.NoCredentialsError, 'No Creds'),
]
ON_REST_EXCEPTIONS = handle_exceptions(REST_EXCEPTIONS)
ON_CLI_EXCEPTIONS = handle_cli_exceptions(CLI_EXCEPTIONS)
|
from morphing_agents.mujoco.ant.designs import sample_uniformly
from morphing_agents.mujoco.ant.designs import DEFAULT_DESIGN
from morphing_agents.mujoco.ant.elements import LEG_UPPER_BOUND
from morphing_agents.mujoco.ant.elements import LEG_LOWER_BOUND
from morphing_agents.mujoco.ant.elements import LEG
from morphing_agents.mujoco.ant.env import MorphingAntEnv
from examples.instrument import generate_experiment_kwargs
from examples.development.variants import TOTAL_STEPS_PER_UNIVERSE_DOMAIN_TASK
from ray import tune
from math import floor
import argparse
import importlib
import ray
import multiprocessing
import tensorflow as tf
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser('GenerateAntDataset')
parser.add_argument('--local-dir',
type=str,
default='./data')
parser.add_argument('--num-legs',
type=int,
default=4)
parser.add_argument('--dataset-size',
type=int,
default=1)
parser.add_argument('--num-samples',
type=int,
default=1)
parser.add_argument('--num-parallel',
type=int,
default=1)
parser.add_argument('--method',
type=str,
choices=['uniform', 'curated'])
args = parser.parse_args()
TOTAL_STEPS_PER_UNIVERSE_DOMAIN_TASK[
'gym']['MorphingAnt']['v0'] = 100000
ub = np.array(list(LEG_UPPER_BOUND))
lb = np.array(list(LEG_LOWER_BOUND))
scale = (ub - lb) / 2
designs = [DEFAULT_DESIGN]
while len(designs) < args.dataset_size:
try:
if args.method == 'uniform':
d = sample_uniformly(num_legs=args.num_legs)
elif args.method == 'curated':
d = [LEG(*np.clip(np.array(
leg) + np.random.normal(0, scale / 8), lb, ub))
for leg in DEFAULT_DESIGN]
else:
d = DEFAULT_DESIGN
MorphingAntEnv(fixed_design=d)
designs.append(d)
except Exception:
print(f"resampling design that errored: {d}")
def run_example(example_module_name, example_argv, local_mode=False):
"""Run example locally, potentially parallelizing across cpus/gpus."""
example_module = importlib.import_module(example_module_name)
example_args = example_module.get_parser().parse_args(example_argv)
variant_spec = example_module.get_variant_spec(example_args)
trainable_class = example_module.get_trainable_class(example_args)
experiment_kwargs = generate_experiment_kwargs(variant_spec, example_args)
experiment_kwargs['config'][
'dataset_id'] = tune.grid_search(list(range(args.dataset_size)))
experiment_kwargs['config'][
'environment_params'][
'training'][
'kwargs'][
'fixed_design'] = tune.sample_from(
lambda spec: designs[spec.config.dataset_id])
experiment_kwargs['config'][
'environment_params'][
'training'][
'kwargs'][
'expose_design'] = False
ray.init(
num_cpus=example_args.cpus,
num_gpus=example_args.gpus,
resources=example_args.resources or {},
local_mode=local_mode,
include_webui=example_args.include_webui,
temp_dir=example_args.temp_dir)
tune.run(
trainable_class,
**experiment_kwargs,
with_server=example_args.with_server,
server_port=example_args.server_port,
scheduler=None,
reuse_actors=True)
num_cpus = multiprocessing.cpu_count()
num_gpus = len(tf.config.list_physical_devices('GPU'))
run_example('examples.development', (
'--algorithm', 'SAC',
'--universe', 'gym',
'--domain', 'MorphingAnt',
'--task', 'v0',
'--exp-name', f'ant-dataset-{args.method}',
'--checkpoint-frequency', '10',
'--mode=local',
'--local-dir', args.local_dir,
'--num-samples', f'{args.num_samples}',
'--cpus', f'{num_cpus}',
'--gpus', f'{num_gpus}',
'--trial-cpus', f'{num_cpus // args.num_parallel}',
'--trial-gpus', f'{num_gpus / args.num_parallel}'))
|
#!/usr/bin/python
def get_data(filename):
f = open(filename, "r")
line = f.readline()
case_count = int(line)
case_list = []
for i in xrange(case_count):
button_list = []
line = f.readline()
pieces = line.split()
button_count = int(pieces.pop(0))
case_list.append((button_count, button_list))
for j in xrange(button_count):
button_list.append((pieces.pop(0),int(pieces.pop(0))))
f.close()
return case_list
def process_case(case):
button_count, button_list = case
step_count = 0
cur_pos = {'O':0, 'B':0}
for i in xrange(button_count):
button = button_list.pop(0)
other_index = 0
while other_index < len(button_list) and button[0] == button_list[other_index][0]:
other_index += 1
button_pressed = False
other_button = button_list[other_index] if other_index < len(button_list) else None
# print "button, other button", button, other_button
while not button_pressed:
next_pos = move(cur_pos[button[0]], button[1])
if next_pos is None:
button_pressed = True
else:
cur_pos[button[0]] = next_pos
if other_index < len(button_list):
other_next_pos = move(cur_pos[button_list[other_index][0]], button_list[other_index][1])
if other_next_pos is not None:
cur_pos[button_list[other_index][0]] = other_next_pos
step_count += 1
return step_count - 1
# return next pos
def move(cur_pos, target_pos):
next_pos = None
if cur_pos > target_pos:
next_pos = cur_pos - 1
elif cur_pos < target_pos:
next_pos = cur_pos + 1
return next_pos
|
#!/usr/bin/python
import argparse
from glob import glob
import json
import os
from shutil import copyfile
def parse_args():
info = "Combines FASC files from PyRosetta job distributor and converts \
them into a more convenient csv table. Also extracts best decoys by a \
given criterion if desired."
parser = argparse.ArgumentParser(description=info)
parser.add_argument("directory", type=str,
help="Read .fasc files from what directory?")
parser.add_argument("-n", "--num_decoys", type=int, default=0,
help="Collect the top [how many] decoys? By default, no top decoy \
selection will be taken.")
parser.add_argument("-od", "--out_directory", type=str,
help="Where should best decoys be copied? If num_decoys is given and \
there is no input for this option, by default a subfolder will be \
created in the given directory called \
[original_dir]_best_[number]_decoys.")
parser.add_argument("-s", "--selection_criterion", type=str,
default='total_score', help="By what property should best decoys be \
selected? Default is total_score.")
parser.add_argument("-m", "--merge_fascs", action='store_true',
help="If multiple .fasc files were created for a single set, this \
option allows them to be sorted together. It is assumed that the \
difference will only be in numbers after an underscore, and that the \
beginning name will be the same for all clustered .fasc files.")
args = parser.parse_args()
return args
def fasc_to_dict_list(fasc_file, sortby='total_score'):
"""
Reads in a fasc file and converts it to a list of dicts. Then sorts the list
by a given criterion ('total_score' by default) and returns it.
Note: does not work for files from older versions of Rosetta, which output
text strings that don't resemble dicts. Also adds several items to the decoy
dict: basename, set (which .fasc file or group the decoy was from), and rank
(based on the sorting criterion). Rank is one-indexed, not zero-indexed.
"""
fasc_lines = []
# Reading in FASC files
if isinstance(fasc_file, list):
fasc_base = os.path.basename(fasc_file[0])
fasc_cluster = fasc_base[:fasc_base.rfind('_')]
for f in fasc_file:
with open(f, 'r') as read:
fasc_lines += read.readlines()
else:
fasc_base = os.path.basename(fasc_file)
fasc_cluster = fasc_base[:fasc_base.rfind('.fasc')]
with open(fasc_file, 'r') as read:
fasc_lines += read.readlines()
# Collecting decoy data from FASC file as a list of dicts
decoys_data = [json.loads(fl) for fl in fasc_lines]
# Sorting list
decoys_data.sort(key=lambda x: x[sortby])
# Add basename, set, and rank to each decoy
for n, i in enumerate(decoys_data):
dec_base = os.path.basename(i['filename'])
i['decoy_basename'] = dec_base[:dec_base.index('.')] # Remove file extension
i['decoy_set'] = fasc_cluster
i['decoy_rank'] = n + 1
return decoys_data
def make_condensed_fasc_header(decoy_list):
"""
For a given list of dicts representing decoys, creates a list for to be
used as a header. Collects all keys from the dicts in the decoy_list,
including any that are not present in all decoys. Eliminates pdb_name and
decoy from the list, since they are redundant, and nstruct since that is
unnecessary when the list is converted to CSV. Moves filename, decoy_set,
and decoy_rank to the start of the list, followed by total_score and
anything else with 'total', followed by anything with 'constraint'.
Preappends a column for the decoy basename followed by a variable number of
columns for non-identical underscore-separated parts of the decoy basename
so it is easy to sort decoys later. Makes a masking list for the decoys'
names so that splitting them later, they can be appropriately tabulated with
the non-identical parts included and the identical parts excluded.
"""
# Initialize header
header = ['decoy_basename']
keylist = []
# Collect all keys
for decoy in decoy_list:
for k in decoy.keys():
if k not in keylist:
keylist.append(k)
keylist.remove('pdb_name')
keylist.remove('decoy')
keylist.remove('nstruct')
keylist.sort()
# Collect list of basenames split by underscore
basenames = [d['decoy_basename'].split('_') for d in decoy_list]
# Add columns to header for decoy name differences, make name mask
decoy_name_mask = []
for i in zip(*basenames):
if len(set(i)) > 1: # True if not all elements are identical
header.append('')
decoy_name_mask.append(1)
else:
decoy_name_mask.append(0)
# Moving properties from keylist to header
header += ['filename', 'decoy_set', 'decoy_rank', 'total_score']
# Moving any totals from keylist to header
header += [k for k in keylist if 'total' in k and k not in header]
# Moving any constraints from keylist to header
header += [k for k in keylist if 'constraint' in k and k not in header]
# Moving everything else from keylist to header
header += [k for k in keylist if k not in header]
return header, decoy_name_mask
def convert_dec_dict_to_csv_line(decoy_dict, header, mask):
"""
Takes in a decoy as a dict and populates a list of the values of that dict
in order matching the given header, filling in blanks if the decoy does not
have a given key. Uses the mask to populate columns with non-identical decoy
name features. Then converts the list to a comma-separated string.
"""
# Collect basename to initialize output line
outlist = [decoy_dict['decoy_basename']]
# Add non-identical name components to output line
basename_split = decoy_dict['decoy_basename'].split('_')
outlist += [i for i,j in zip(basename_split, mask) if j]
# Add all other values in order according to header
for score in header[header.index('filename'):]:
if score in decoy_dict.keys():
outlist.append(decoy_dict[score])
else:
outlist.append('')
# Convert to string
return ','.join([str(i) for i in outlist])
def check_make_folder(directory):
"""
Checks whether a directory exists, and creates it if it doesn't
"""
if not os.path.isdir(directory):
os.makedirs(directory)
return
def text_to_numbers(in_list):
"""
Takes a list that includes numbers and text all stored as strings.
Returns the same list with the numbers converted to floats.
"""
new_list = []
for i in in_list:
try:
new_list.append(float(i))
except ValueError:
new_list.append(i)
return new_list
def cleanup_mutations_section(report_lines, start_point):
"""
Some sets are missing mutable residues present in others. This function
maintiains column alignment across all sets by inserting what will appear
as blank cells where a mutable residue is missing.
"""
max_len = max([len(line) for line in report_lines])
res_columns = list(range(start_point, max_len, 4))
for c in res_columns:
mutated_residues = [int(line[c]) for line in report_lines if line[c] != "NONE"]
if len(mutated_residues) == 0:
return
first_des_res = min(mutated_residues)
for line in report_lines:
if int(line[c]) != first_des_res:
for i in range(4):
line.insert(c, '=""')
return
def main(args):
# Getting fasc files
folder = args.directory.rstrip('/')
base_name = os.path.basename(folder)
folder_search = os.path.join(folder, "*.fasc")
fasc_files = glob(folder_search)
fasc_files.sort()
# Group .fasc files if flag is given
if args.merge_fascs:
fasc_groups = {}
for f in fasc_files:
fasc_base = os.path.basename(f)
fasc_cluster = fasc_base[:fasc_base.rfind('_')]
if fasc_cluster in fasc_groups:
fasc_groups[fasc_cluster].append(f)
else:
fasc_groups[fasc_cluster] = [f]
fasc_files = list(fasc_groups.values())
# Collecting fasc lines
master_decoy_list = []
for f in fasc_files:
fasc_extract = fasc_to_dict_list(f, sortby=args.selection_criterion)
master_decoy_list += fasc_extract
# Making header line and decoy name mask
header, name_mask = make_condensed_fasc_header(master_decoy_list)
# Making combined report
report_name = os.path.join(folder, base_name + '_combined_reports.csv')
with open(report_name, 'w') as r:
# Writing header
r.write(', '.join(header) + '\n')
# Writing decoy lines
for decoy in master_decoy_list:
dec_line = convert_dec_dict_to_csv_line(decoy, header, name_mask)
r.write(dec_line + '\n')
print(report_name)
# Copying best decoys
if args.num_decoys:
# Name output directory
if args.out_directory:
outdir = args.out_directory
else:
foldername = '{}_best_{}_decoys'.format(base_name, args.num_decoys)
outdir = os.path.join(folder, foldername)
check_make_folder(outdir)
# Collect best decoys
decoys_to_collect = []
for decoy in master_decoy_list:
if decoy['decoy_rank'] <= args.num_decoys:
# Make name independent of file extension
decoyname = os.path.join(folder,
decoy['decoy_basename'] + '.*')
decoys_to_collect += glob(decoyname)
# Copy best decoys
for decoy in decoys_to_collect:
print(os.path.basename(decoy))
decoy_out = decoy.replace(folder, outdir)
copyfile(decoy, decoy_out)
if __name__ == '__main__':
args = parse_args()
main(args)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import Iterable
from pants.backend.build_files.utils import _get_build_file_partitioner_rules
from pants.core.goals.fmt import FmtFilesRequest
from pants.core.util_rules.partitions import PartitionerType
class FmtBuildFilesRequest(FmtFilesRequest):
partitioner_type = PartitionerType.CUSTOM
@classmethod
def _get_rules(cls) -> Iterable:
assert cls.partitioner_type is PartitionerType.CUSTOM
yield from _get_build_file_partitioner_rules(cls)
yield from super()._get_rules()
|
def move(srcStack, dstStack):
print('Before move:', srcStack, ',', dstStack)
top = srcStack.pop(0)
assert len(dstStack) == 0 or dstStack[0] > top
dstStack.insert(0, top)
print('After move:', srcStack, ',', dstStack)
def move_stack(stacks, size, src, dst, oth):
if size == 1:
move(stacks[src], stacks[dst])
else:
move_stack(stacks, size-1, src, oth, dst)
move(stacks[src], stacks[dst])
move_stack(stacks, size-1, oth, dst, src)
def show_stacks(stacks):
for num, stack in enumerate(stacks):
print('Stack', num, ':', stack)
def init(stacks, num, src):
for i in range(3):
stacks.append([])
stack = stacks[src]
for i in range(num):
stack.append(i+1)
stacks = []
src = 0
num = 5
init(stacks, num, src)
show_stacks(stacks)
move_stack(stacks, num, src, 2, 1)
show_stacks(stacks)
|
import requests
import pandas as pd
import matplotlib.pyplot as plt
######################################################################################################
#Generic parameters for inrinio
api_key='xxxx---Use your own API Key----xxxx'
stock_quote='AAPL'
indicator='$indicator$'
hist_data_url=f'https://api-v2.intrinio.com/companies/{stock_quote}/historical_data/{indicator}?api_key={api_key}&frequency=yearly&start_date=01-01-2013'
data_point_url=f'https://api-v2.intrinio.com/companies/{stock_quote}/data_point/{indicator}/number?api_key={api_key}'
######################################################################################################
#Functions
#Get all listed companies in intrinio
def getAllListedCompanies():
companies_url = 'https://api-v2.intrinio.com/companies?api_key=OmNmMzljNDdhM2MwNjg3MTkwYzc4ODFmYjEzYzdhYjkz'
companies = (requests.get(companies_url).json())
return companies
#Get all the ticker symbols of a companies e.g. AAPL for Apple
def getAllListedCompanyTicker():
companies = getAllListedCompanies()
companyTicker = []
for comp in companies['companies']:
companyTicker.append(comp['ticker'])
return companyTicker
##############
#Generic function to get data from a indicator
def create_hist_df(indicator_str):
url=hist_data_url.replace(indicator,indicator_str)
req=requests.get(url)
data=req.json()
if len(data['historical_data'])>0:
panda_df=pd.DataFrame(data['historical_data'])
#Inverse Dates
panda_df=panda_df.iloc[::-1]
#Cut months and days, only year stays as date
panda_df['date'] =panda_df['date'].str[0:4]
return panda_df
else:
print("Error receiving historical data")
return False
#Get data from intrinio without changes
def getHistoricalDataViaIndicator(indicator_str):
url = hist_data_url.replace(indicator, indicator_str)
req = requests.get(url)
dataJSON = req.json()
return dataJSON
def getDataPoint(indicator_str):
url = data_point_url.replace(indicator, indicator_str)
req = requests.get(url)
dataJSON = req.json()
return dataJSON
#################
#Output functions
#################
#Print plot of JSON (dataJSON)
def showPlotHistorical(dataJSON):
df = pd.DataFrame(dataJSON['historical_data'])
dividend_df = df.iloc[::-1]
dividend_df.head()
dividend_df.plot(x='date', y='value', title='Dividend per Share')
plt.show()
#Print in console X(number of results printed) of results
def printXheadOfDataframeOfHistorical(dataJSON,numberX):
df = pd.DataFrame(dataJSON['historical_data'])
df = df.iloc[::-1]
print(df.head(numberX))
######################################################################################################
######################################################################################################
#Strong and Consistent Return on Equity (net income/shareholder's equity)
roe_df = create_hist_df('roe')
print("###Return on Equity###")
print(roe_df.head().to_string())
print("#######################################")
ax=roe_df.plot(kind='line',x='date',y='value')
ax.set(xlabel='year',ylabel='Percentage',title='Return on Equity')
plt.savefig('roe.png')
plt.show()
|
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from parseridge.parser.modules.attention.positional_encodings import PositionalEncoder
from parseridge.parser.modules.attention.self_attention_layer import SelfAttentionLayer
from parseridge.parser.modules.data_parallel import Module
from parseridge.parser.modules.external_embeddings import ExternalEmbeddings
from parseridge.parser.modules.utils import get_mask
class InputEncoder(Module):
INPUT_ENCODER_MODES = ["lstm", "transformer", "none"]
def __init__(
self,
token_vocabulary,
token_embedding_size,
hidden_size=125,
layers=2,
self_attention_heads=10,
self_attention_layers=4,
dropout=0.33,
max_sentence_length=100,
sum_directions=True,
reduce_dimensionality=0,
mode="lstm",
**kwargs,
):
super().__init__(**kwargs)
self.token_vocabulary = token_vocabulary
self.input_size = token_embedding_size
self.hidden_size = hidden_size
self.max_sentence_length = max_sentence_length
self.sum_directions = sum_directions
self.reduce_dimensionality = reduce_dimensionality
self.mode = mode
# Initialize the embedding layer. If we use pre-trained embeddings, they will
# replace the weights after initialization using `load_external_embeddings()`.
self.token_embeddings = nn.Embedding(
num_embeddings=len(self.token_vocabulary),
embedding_dim=token_embedding_size,
padding_idx=self.token_vocabulary.get_id("<<<PADDING>>>"),
).to(self.device)
if self.mode == "lstm":
# Set up an LSTM as the input encoder.
self.rnn = nn.LSTM(
input_size=self.input_size,
hidden_size=hidden_size,
num_layers=layers,
dropout=dropout,
bidirectional=True,
batch_first=True,
)
self.output_size = hidden_size if self.sum_directions else 2 * hidden_size
elif self.mode == "transformer":
# Use a transformer-style encoder as the for the input.
self.positional_encoder = PositionalEncoder(
model_size=self.input_size, max_length=1024
)
self.self_attention_layers = [
SelfAttentionLayer(
model_size=self.input_size, num_heads=self_attention_heads
)
for _ in range(self_attention_layers)
]
self.output_size = self.input_size
elif self.mode == "none":
# Do not change the input at all and don't use an encoder.
self.output_size = token_embedding_size
else:
raise ValueError(f"'{self.mode}' not in {self.INPUT_ENCODER_MODES}.")
if self.reduce_dimensionality:
self.dimensionality_reducer = nn.Sequential(
nn.Linear(self.output_size, self.reduce_dimensionality), nn.ReLU()
)
self.output_size = self.reduce_dimensionality
def load_external_embeddings(self, embeddings: ExternalEmbeddings):
self.logger.info("Loading external embeddings into the embedding layer...")
self.token_embeddings.weight = embeddings.get_weight_matrix(
self.token_vocabulary, self.device
)
def forward(self, sentence_batch, sentence_lengths):
tokens_embedded = self.token_embeddings(sentence_batch)
if self.mode == "lstm":
input_packed = pack_padded_sequence(
tokens_embedded, lengths=sentence_lengths, batch_first=True
)
packed_outputs, hidden = self.rnn(input_packed)
outputs, _ = pad_packed_sequence(packed_outputs, batch_first=True)
if self.sum_directions:
outputs = (
outputs[:, :, : self.hidden_size] + outputs[:, :, self.hidden_size :]
) # Sum bidirectional outputs
if self.reduce_dimensionality:
outputs = self.dimensionality_reducer(outputs)
return outputs, hidden
elif self.mode == "transformer":
# Get an inverted mask, where '1' indicates padding
mask = ~get_mask(
batch=sentence_batch, lengths=sentence_lengths, device=self.device
)
# Add positional encodings
sequence = self.positional_encoder(tokens_embedded)
layer_outputs = []
weights = []
for self_attention_layer in self.self_attention_layers:
attention_output, attention_weights = self_attention_layer(
sequence=sequence, mask=mask
)
layer_outputs.append(attention_output)
weights.append(attention_weights)
if self.reduce_dimensionality:
attention_output = self.dimensionality_reducer(layer_outputs[-1])
return attention_output, (layer_outputs, weights)
elif self.mode == "none":
return tokens_embedded, None
|
"""
Created by Alex Wang on 2018-03-13
图像复原美化:
inpaint水印去除
"""
import cv2
import numpy as np
def test_image_inpaint():
"""
cv2.inpaint(src, inpaintMask, inpaintRadius, flags[, dst]) → dst
Parameters:
* src – Input 8-bit 1-channel or 3-channel image.
* inpaintMask – Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted.
* dst – Output image with the same size and type as src .
* inpaintRadius – Radius of a circular neighborhood of each point inpainted that is considered by the algorithm.
* flags –
Inpainting method that could be one of the following:
* INPAINT_NS Navier-Stokes based method [Navier01]
* INPAINT_TELEA Method by Alexandru Telea [Telea04].
:return:
"""
img = cv2.imread('scenery.jpg')
print(img.shape)
img_black = img.copy()
black_block = np.zeros(shape=(20, 20, 3), dtype=np.uint8)
img_black[690:710, 100:120, :] = black_block
white_block = np.ones(shape=(20, 20), dtype=np.uint8)
mask_image = np.zeros(shape=(img.shape[0], img.shape[1]), dtype=np.uint8)
print(mask_image.shape)
mask_image[690:710, 100:120] = white_block
img_recovery = cv2.inpaint(img_black, mask_image, 3, cv2.INPAINT_NS)
cv2.imshow('img', img)
cv2.imshow('img_black', img_black)
cv2.imshow('img_recovery', img_recovery)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
test_image_inpaint()
|
#2-3 exercise
name = "Nell"
print("Dear " + name + ", matter compilers are dream makers and world killers.")
#2-4 exercise
character = " sir finkle mcGraw "
print(character.title())
newChar = character.title()
print("\t*" + newChar.rstrip() + "*")
print("\t\t*" + character.lstrip() + "*")
print("\t\t\t*" + newChar.strip() + "*")
#2-5 exercise
print('"Any sufficiently advanced technology is indistinguishable from ~ magic ~," Arthur C. Clark.')
#2-6 exercise
famous_person = "Arthur C. Clark."
message = '"Any sufficiently advanced technology is indistinguishable from ~ magic ~," '
print(message + famous_person)
#2-7 exercise
title = " Sir "
first = " Finkle "
last = " McGraw "
print(title.rstrip() + "\n\t" + first.lstrip() + "\n\t\t" + last.strip())
|
#!/usr/bin/python
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isSameTree(self, p, q):
pVals = self.trValueArray(p)
qVals = self.trValueArray(q)
if pVals == [] and qVals == []:
return True
elif pVals == [] and qVals != []:
return False
elif pVals != [] and qVals == []:
return False
if len(pVals) != len(qVals):
return False
for i in range(len(pVals)):
if pVals[i] != qVals[i]:
return False
return True
def trValueArray(self, p):
if p == [] or p == None:
return []
nodes = [p]
vals = [p.val]
while len(nodes) > 0:
firstNode = nodes[0]
if firstNode.left != None and firstNode.right != None:
nodes.append(firstNode.left)
vals.append(firstNode.left.val)
nodes.append(firstNode.right)
vals.append(firstNode.right.val)
elif firstNode.left != None and firstNode.right == None:
nodes.append(firstNode.left)
vals.append(firstNode.left.val)
vals.append(None)
elif firstNode.left == None and firstNode.right != None:
vals.append(None)
nodes.append(firstNode.right)
vals.append(firstNode.right.val)
nodes.remove(firstNode)
return vals
solu = Solution()
p = TreeNode(1)
p.left = TreeNode(2)
p.right = TreeNode(3)
p.left.right = TreeNode(5)
p.right.left = TreeNode(6)
q = TreeNode(1)
q.left = TreeNode(2)
q.right = TreeNode(3)
q.left.right = TreeNode(5)
q.right.left = TreeNode(6)
print(solu.isSameTree([],[]))
|
from wtforms import TextAreaField, BooleanField, StringField, IntegerField, PasswordField, SubmitField, validators
from flask_wtf import FlaskForm
# NoneOf(values, message=None, values_formatter=None)
class AdminConfigForm(FlaskForm):
temp_chaudiere_failure = IntegerField(
'Temp Chaudiere Failure',
[validators.NumberRange(min=45, max=75, message="min=45, max=75")])
chaudiere_db_rotate_hours= IntegerField(
'Rotate Chaudiere db every (hours)',
[validators.NumberRange(min=1, max=96, message="min=1, max=96")])
chaudiere_minute_db_rotate_days = IntegerField(
'Rotate ChaudiereMinute db every (days)',
[validators.NumberRange(min=1, max=60, message="min=1, max=60")])
alerts_enable = BooleanField('Enable alerts (SMS / Email)')
comment = TextAreaField('Comment', [validators.Length(min=0, max=300)])
# submit = SubmitField('Update')
|
#Program to find the electricity bill of a customer
units=int(input("Enter the units consumed in a month"))
if(units<=50):
amount=units*.50
elif(units<=150):
amount = 25 + ((units-50) * 0.75)
elif(units<=250):
amount = 100 + ((units-150) * 1.20)
else:
amount = 220 + ((units - 250) * 1.50)
sur_charge = amount * 0.20
total_amt = amount + sur_charge
print("\nBill is " ,total_amt)
|
"""winss services management.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import os
import six
from treadmill import fs
from .. import _service_base
from .. import _utils
class LongrunService(_service_base.Service):
"""winss long running service.
"""
__slots__ = (
'_default_down',
'_finish_script',
'_log_run_script',
'_run_script',
'_timeout_finish',
'_env',
)
_TYPE = _service_base.ServiceType.LongRun
def __init__(self, directory, name,
run_script=None, finish_script=None, log_run_script=None,
timeout_finish=None, default_down=None, environ=None):
super(LongrunService, self).__init__(
directory,
name
)
self._default_down = default_down
self._finish_script = finish_script
self._log_run_script = log_run_script
self._run_script = run_script
self._timeout_finish = timeout_finish
self._env = environ
@property
def type(self):
return self._TYPE
@property
def data_dir(self):
"""Returns the data directory for the services.
:returns ``str``:
Full path to the service data directory.
"""
return os.path.join(self._dir, 'data')
@property
def env_dir(self):
"""Returns the environ directory for the services.
:returns ``str``:
Full path to the service environ directory.
"""
return os.path.join(self._dir, 'env')
@property
def logger_dir(self):
"""Returns the logger directory for the services.
:returns ``str``:
Full path to the service log directory.
"""
return os.path.join(self._dir, 'log')
@property
def default_down(self):
"""Is the default service state set to down?
"""
if self._default_down is None:
self._default_down = os.path.exists(
os.path.join(self._dir, 'down')
)
return self._default_down
@default_down.setter
def default_down(self, default_down):
self._default_down = bool(default_down)
@property
def _run_file(self):
return os.path.join(self._dir, 'run')
@property
def _finish_file(self):
return os.path.join(self._dir, 'finish')
@property
def _log_run_file(self):
return os.path.join(self.logger_dir, 'run')
@property
def run_script(self):
"""Service run script.
"""
if self._run_script is None:
self._run_script = _utils.script_read(self._run_file)
return self._run_script
@run_script.setter
def run_script(self, new_script):
self._run_script = new_script
@property
def finish_script(self):
"""Service finish script.
"""
if self._finish_script is None:
try:
self._finish_script = _utils.script_read(self._finish_file)
except IOError as err:
if err.errno is not errno.ENOENT:
raise
return self._finish_script
@finish_script.setter
def finish_script(self, new_script):
self._finish_script = new_script
@property
def log_run_script(self):
"""Service log run script.
"""
if self._log_run_script is None:
try:
self._log_run_script = _utils.script_read(self._log_run_file)
except IOError as err:
if err.errno is not errno.ENOENT:
raise
return self._log_run_script
@log_run_script.setter
def log_run_script(self, new_script):
self._log_run_script = new_script
@property
def timeout_finish(self):
"""Returns amount of milliseconds to wait for the finish script to
complete.
:returns ``int``:
Amount of milliseconds to wait. 0 means infinitely. Default 5000.
"""
if self._timeout_finish is None:
self._timeout_finish = _utils.value_read(
os.path.join(self._dir, 'timeout-finish'),
default=5000
)
return self._timeout_finish
@timeout_finish.setter
def timeout_finish(self, timeout_finish):
"""Service finish script timeout.
"""
if timeout_finish is not None:
if isinstance(timeout_finish, six.integer_types):
self._timeout_finish = timeout_finish
else:
self._timeout_finish = int(timeout_finish, 10)
@property
def environ(self):
"""Returns the environ dictionary for the services.
:returns ``dict``:
Service environ dictionary.
"""
if self._env is None:
self._env = _utils.environ_dir_read(self.env_dir)
return self._env
@environ.setter
def environ(self, new_environ):
self._env = new_environ
def write(self):
super(LongrunService, self).write()
fs.mkdir_safe(self.env_dir)
fs.mkdir_safe(self.data_dir)
if self._env is not None:
_utils.environ_dir_write(self.env_dir, self._env)
if self._run_script is None and not os.path.exists(self._run_file):
raise ValueError('Invalid LongRun service: not run script')
elif self._run_script is not None:
_utils.script_write(self._run_file, self._run_script)
# Handle the case where the run script is a generator
if not isinstance(self._run_script, six.string_types):
self._run_script = None
# Optional settings
if self._finish_script is not None:
_utils.script_write(self._finish_file, self._finish_script)
# Handle the case where the finish script is a generator
if not isinstance(self._finish_script, six.string_types):
self._finish_script = None
if self._log_run_script is not None:
# Create the log dir on the spot
fs.mkdir_safe(os.path.dirname(self._log_run_file))
_utils.script_write(self._log_run_file, self._log_run_script)
# Handle the case where the run script is a generator
if not isinstance(self._log_run_script, six.string_types):
self._log_run_script = None
if self._default_down:
_utils.data_write(
os.path.join(self._dir, 'down'),
None
)
else:
fs.rm_safe(os.path.join(self._dir, 'down'))
if self._timeout_finish is not None:
_utils.value_write(
os.path.join(self._dir, 'timeout-finish'),
self._timeout_finish
)
# Disable W0613: Unused argument 'svc_type'
# pylint: disable=W0613
def create_service(svc_basedir, svc_name, svc_type, **kwargs):
"""Factory function instantiating a new service object from parameters.
:param ``str`` svc_basedir:
Base directory where to create the service.
:param ``str`` svc_name:
Name of the new service.
:param ``supervisor.ServiceType`` _svc_type:
Type for the new service.
:param ``dict`` kw_args:
Additional argument passed to the constructor of the new service.
:returns ``Service``:
New instance of the service
"""
return LongrunService(svc_basedir, svc_name, **kwargs)
__all__ = (
'LongrunService',
'create_service',
)
|
import tweepy
import datetime
consumer_key = 'placeholder_key_123'
consumer_secret = 'placeholder_secret_345'
access_token = 'placeholder_token_678'
access_token_secret = 'placeholder_token_secret_910'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
def chirp():
if datetime.date.today().weekday() == 0:
msg = 'Oh man, Monday again!? Okay, we\'re gonna get through this. Drink some water, take deep breaths, and stay calm! #mondaymotivation #botgirlsummer #hotgirlsummer'
elif datetime.date.today().weekday() == 1:
msg = 'Taco Tuesday? Heck yeah! Nourish your body with your favorite food today, even if that isn\'t tacos (but let\'s be real... it\'s probably tacos). #tacotuesday #botgirlsummer #hotgirlsummer'
elif datetime.date.today().weekday() == 2:
msg = 'Hump day? More like bump day - play your fave jams today and dance like no one\'s watching... because probably no one is, unless you live in an apartment building and your blinds are open, in which case, who knows? #wednesdaywisdom #botgirlsummer #hotgirlsummer'
elif datetime.date.today().weekday() == 3:
msg = 'It\'s THIRSTday, time go get hydrated! Grab your fave glass, mug, bottle, or even a freakin\' goblet if you want to, it\'s your life, just make sure you fill that sucker up all the way and enjoy some water! #thursdaythoughts #botgirlsummer #hotgirlsummer'
elif datetime.date.today().weekday() == 4:
msg = 'TGIF amiright?? Cheers to the freakin\' weekend, beautiful! You are a superstar and don\'t you dare forget it! #fridayfeeling #botgirlsummer #hotgirlsummer'
elif datetime.date.today().weekday() == 5:
msg = 'It\'s Saturday, you absolute legend. You know what you should do today? Go follow @dglewisofficial and read his latest blog post over on https://dglewisofficial.com. #saturYAY #botgirlsummer #hotgirlsummer'
elif datetime.date.today().weekday() == 6:
msg = 'It\'s self care sunday! Make sure to do something nice for yourself today, even if it\'s just reminding yourself what a wonderful person you are. :) #sundayfunday #botgirlsummer #hotgirlsummer'
try:
api.update_status(msg)
print('Successfully tweeted.')
except Exception as e:
print(e)
chirp()
def replyToTweets():
twtToSearch = api.search(q="#hotgirlsummer")
counter = 0
textToFind = ['#hotgirlsummer', '#Hotgirlsummer', '#HotGirlSummer', '#HOTGIRLSUMMER']
try:
for t in twtToSearch:
for i in textToFind:
if i == t.text:
screenName = t.user.screen_name
response = "@%s Happy Hot Girl Summer! Don\'t forget to stay hydrated!" % (screenName)
t = api.update_status(response, t.id)
counter += 1
print('Replied to ' + str(counter) + ' tweets.')
except Exception as e:
print(e)
replyToTweets()
def likeTweets():
searchFor = api.search(q="hot girl summer")
faved = []
errored = []
total = []
for s in searchFor:
try:
s.favorite()
faved.append(s)
total.append(s)
except Exception as e:
print(e)
errored.append(s)
total.append(s)
print('Favorited: ' + str(len(faved)) + ' tweets.')
print('Errored: ' + str(len(errored)) + ' tweets.')
print('Total: ' + str(len(total)) + ' tweets.')
likeTweets()
|
import sys
if len(sys.argv) == 1 or sys.argv[1] == '-v':
print('Input filename:')
f=str(sys.stdin.readline()).strip()
else: f = sys.argv[1]
verbose = sys.argv[-1] == '-v'
print('What is the image pixel width?')
w = int(sys.stdin.readline())
print('What is the image pixel height?')
h = int(sys.stdin.readline())
dim = w * h
canvas, zeros, ones, twos = ' ' * dim, [], [], []
l = open(f, 'r').read().strip()
for i in range(len(l), 0, -dim):
layer = l[i - dim:i]
canvas = [pixel if pixel != '2' else canvas[j] for j, pixel in enumerate(layer)]
zeros.append(layer.count('0'))
ones.append(layer.count('1'))
twos.append(layer.count('2'))
'''
Part 1
'''
mz = zeros.index(min(zeros))
print('1.a. Layer {0} contains the fewest [0] digits.'.format(mz))
mult = ones[mz] * twos[mz]
print('1.b. Number of [1] digits * number of [2] digits ==', mult)
'''
Part 2
'''
print('\n2. ------- IMG ---------\n')
canvas = ''.join(canvas).replace('1', '*').replace('0', ' ')
for p in range(0, len(canvas), w):
print(''.join(canvas[p:p + w]))
|
import unittest
from entity.category import Category
from entity.product import Product
from entity.manufacturer import Manufacture
class CategoryTestCase(unittest.TestCase):
def setUp(self) -> None:
self.category = Category("Category 1")
self.manufacturer = Manufacture()
self.manufacturer.set_name("Manufacturer No1")
def test_add_product(self):
# must by empty
self.assertEqual(0, len(self.category.products))
product1 = Product(
sku="a001", name="Product 1", price=10, manufacture=self.manufacturer
)
product2 = Product(
sku="a002", name="Product 2", price=9, manufacture=self.manufacturer
)
# add product1
self.category.add_product(product1)
self.assertTrue(self.category.has_product(product1))
self.assertEqual(1, len(self.category.products))
self.assertFalse(self.category.has_product(product2))
self.assertTrue(product1.has_category(self.category))
self.assertEqual(1, len(product1.categories))
# add product2
self.category.add_product(product2)
self.assertTrue(self.category.has_product(product2))
self.assertEqual(2, len(self.category.products))
self.assertTrue(product2.has_category(self.category))
self.assertEqual(1, len(product2.categories))
def test_remove_product(self):
# must by empty
self.assertEqual(0, len(self.category.products))
product1 = Product(
sku="a001", name="Product 1", price=10, manufacture=self.manufacturer
)
product2 = Product(
sku="a002", name="Product 2", price=9, manufacture=self.manufacturer
)
self.category.add_product(product1)
self.category.add_product(product2)
self.assertEqual(2, len(self.category.products))
# remove product2
self.category.remove_product(product2)
self.assertFalse(self.category.has_product(product2))
self.assertFalse(product2.has_category(self.category))
# assert that product1 is not affected
self.assertTrue(self.category.has_product(product1))
self.assertTrue(product1.has_category(self.category))
|
from __future__ import division
from layer import *
from neuron import *
import math
import png
class DeepNeuralNetwork:
def __init__(self, NEURONS, TYPE = "mainNet"):
self.layers = []
self.bias = Neuron()
self.type = TYPE
self.layers.append(InputLayer(NEURONS[0]))
for i in range(1,len(NEURONS)):
self.layers.append(HiddenLayer(self.layers[i-1], NEURONS[i], self.bias))
def activate(self, INPUTS):
# print "Neural network ", self.type, "activated with : ", INPUTS
output = []
self.layers[0].activate(INPUTS)
for i in range(1, len(self.layers)):
self.layers[i].activate()
for N in self.layers[len(self.layers)-1].neurons:
output.append(N.out)
return output
def toPNG(self, ITER):
string = ""
M = [[]]
for y in xrange(0,100):
for x in xrange(0,100):
SA = self.activate([x/100, y/100])#, math.sin(x), math.sin(y), x*y])
M[y].append(SA[0]*255)
M.append([])
f = open('./png/'+str(ITER)+'.png', 'wb') # binary mode is important
w = png.Writer(100, 101, greyscale=True)
w.write(f, M)
f.close()
|
__author__ = 'aoboturov'
from load_data import log, count_by_user_attribute
import pandas as pd
pd.set_option('display.width', 300)
pd.set_option('display.max_colwidth', 200)
print(log.ua.unique())
print(count_by_user_attribute(log, ['ua']))
print(count_by_user_attribute(log, ['sex']))
"""
sex
female 58376
male 19731
"""
print(count_by_user_attribute(log, ['age']))
"""
age
20-34 21358
35-49 34886
50over 21863
"""
print(count_by_user_attribute(log, ['sex', 'age']))
"""
sex age
female 20-34 18150
35-49 24506
50over 15720
male 20-34 3208
35-49 10380
50over 6143
"""
print(count_by_user_attribute(log, ['lang']))
print(count_by_user_attribute(log, ['layer']))
"""
layer
cart 6472
category 38693
item 24585
order 508
top 7849
"""
print(count_by_user_attribute(log, ['url']))
url = count_by_user_attribute(log, ['url'])
print(url[url['user_id'] > 5].to_string(max_rows=None, formatters={'url': '{:<90}'.format}))
# layer=top
print(log.loc[log.url.isin(['/7b7e1f07/eee18990/32e09360']), ['user_id', 'layer']].groupby('layer').count())
# layer=category
print(log.loc[log.url.isin(['/7b7e1f07/eee18990/32e09360/36fca572']), ['user_id', 'layer']].groupby('layer').count())
# layer=order
print(log.loc[log.url.isin(['/c8beb388/eee18990/32e09360/8a8a46b5/5815ff1d/1996c792']), ['user_id', 'layer']].groupby('layer').count())
print(log.loc[log['layer'] == 'order', ['user_id', 'url']].groupby('url').count())
"""
url
/7b7e1f07/eee18990/32e09360/8a8a46b5/5815ff1d/1996c792 4
/c8beb388/eee18990/32e09360/8a8a46b5/5815ff1d/1996c792 499 -- OK
/c8beb388/eee18990/32e09360/8a8a46b5/5815ff1d/74a190dd 4
/c8beb388/eee18990/e95b4f39/8a8a46b5/5815ff1d/1996c792 1 -- OK
"""
print(count_by_user_attribute(log, ['referrer']))
url_vs_referrer = pd.merge(pd.DataFrame(log['url'], columns=['url']), pd.DataFrame(log['referrer'], columns=['referrer']), left_on='url', right_on='referrer', how='inner')['url'].unique()
assert(len(url_vs_referrer) == 4614)
referrer = log['referrer'].unique()
assert(len(referrer) == 7701)
# many internal references, some could be external
|
from django.db import models
class Cart(models.Model):
session = models.CharField(max_length=100, db_index=True, unique=True)
created = models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')
updated = models.DateTimeField(auto_now=True, verbose_name='Дата изменения')
def get_total(self):
result = sum([item.get_amount() for item in self.items.all()])
return result
def __str__(self):
return 'Корзина {0}'.format(self.pk)
class Meta:
verbose_name = 'Корзина'
verbose_name_plural = 'Корзины'
class CartItem(models.Model):
product = models.ForeignKey('catalog.Product', on_delete=models.CASCADE)
cart = models.ForeignKey(Cart, related_name='items', on_delete=models.CASCADE)
quantity = models.PositiveSmallIntegerField(default=1, blank=True)
def get_amount(self):
result = self.product.price * self.quantity
return result
def __str__(self):
return '{item.product.category}: {item.product.title} x {item.quantity} шт. = {amount} руб.'.format(item=self,
amount=self.get_amount())
class Order(models.Model):
name = models.CharField(max_length=255, verbose_name='Имя клиента')
phone = models.CharField(max_length=255, verbose_name='Телефон клиента')
address = models.CharField(max_length=255, verbose_name='Адрес клиента')
person = models.PositiveIntegerField(verbose_name='Количество персон')
cart = models.OneToOneField(Cart, on_delete=models.CASCADE, verbose_name='Корзина')
send = models.BooleanField(default=False, verbose_name='Уведомление отправлено')
created = models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')
updated = models.DateTimeField(auto_now=True, verbose_name='Дата изменения')
def __str__(self):
return 'Заказ {0}'.format(self.pk)
class Meta:
verbose_name = 'Заказ'
verbose_name_plural = 'Заказы'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.