text stringlengths 8 6.05M |
|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from naoqi import ALProxy
def test(robot_IP,is_simulation):
#if not is_simulation:
#audioProxy = ALProxy("ALTextToSpeech",robot_IP,9559)
#audioProxy.post.say("I am Tanaka") # .post make a parallel call.
memProxy = ALProxy("ALMemory",robot_IP,9559)
memProxy.insertData("myValueName", 0)
proxyMo = ALProxy('ALMotion',robot_IP,9559)
# Example showing how to interpolate to maximum stiffness in 1 second
names = 'Body'
stiffnessLists = 1.0 # NOTE: it seems not working in Choregraphe
timeLists = 1.0
proxyMo.stiffnessInterpolation(names, stiffnessLists, timeLists)
# Example showing a single target angle for one joint
# Interpolate the head yaw to 1.0 radian in 1.0 second
names = ['HeadYaw', 'HeadPitch']
# angles = [[1.0], [0.2]]
# times = [[1.0], [1.0]]
angles = [[1.0, 0.0], [-0.5, 0.5, 0.0]]
times = [[1.0, 2.0], [ 1.0, 2.0, 3.0]]
isAbsolute = True
proxyMo.angleInterpolation(names, angles, times, isAbsolute)
# Example showing how to set angles, using a fraction of max speed
names = ['HeadYaw', 'HeadPitch']
angles = [-1.0, -0.2]
fractionMaxSpeed = 0.2
#proxyMo.setAngles(names, angles, fractionMaxSpeed)
# NOTE: does not work in Choregraphe
def switch_servo(robot_IP,stiff):
proxyMo = ALProxy('ALMotion',robot_IP,9559)
# Example showing how to interpolate to maximum stiffness in 1 second
names = 'Body'
stiffnessLists = stiff
timeLists = 1.0
proxyMo.stiffnessInterpolation(names, stiffnessLists, timeLists)
def servo_on(robot_IP):
switch_servo(robot_IP,1.0)
def servo_off(robot_IP):
switch_servo(robot_IP,0.0)
|
from scipy.signal import iirdesign, lfiltic, lfilter
from au_defs import *
class Filter:
def __init__( self, band_start, band_stop ):
nyquist_frequency = float(SAMPLES_PER_SECOND) / 2.0
band_start /= nyquist_frequency
band_stop /= nyquist_frequency
assert( band_start >= 0 and band_start <= 1 )
assert( band_stop >= 0 and band_stop <= 1 )
assert( band_stop >= band_start )
passband_edges = []
stopband_edges = []
if band_start >= 0.05: # if not, make LPF only
passband_edges.append( band_start * 1.025 )
stopband_edges.append( band_start * 0.975 )
if band_stop <= 0.95: # if not, make HPF only
passband_edges.append( band_stop * 0.975 )
stopband_edges.append( band_stop * 1.025 )
(self.feedforward_taps,
self.feedback_taps) = iirdesign( passband_edges,
stopband_edges,
0.1, # max attenuation (dB) in passband
30 ) # min attenuation (dB) in stopband
self.filter_state = lfiltic( self.feedforward_taps, self.feedback_taps, [] )
def __call__( self, samples ):
(filtered_samples,
self.filter_state) = lfilter( self.feedforward_taps,
self.feedback_taps,
samples,
zi=self.filter_state )
return filtered_samples
|
# %load ../standard_import.txt
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import seaborn as sns
from sklearn.preprocessing import scale
import sklearn.linear_model as skl_lm
from sklearn.metrics import mean_squared_error, r2_score
import statsmodels.api as sm
import statsmodels.formula.api as smf
plt.style.use('seaborn-white')
# ==============================================
# Load Datasets
# ==============================================
advertising = pd.read_csv('Ref/Data/Advertising.csv', usecols=[1,2,3,4])
advertising.info()
credit = pd.read_csv('Ref/Data/Credit.csv', usecols=list(range(1,12)))
credit['Student2'] = credit.Student.map({'No':0, 'Yes':1})
credit.head(3)
auto = pd.read_csv('Ref/Data/Auto.csv', na_values='?').dropna()
auto.info()
# ==============================================
# 3.1 Simple Linear Regression
# Figure 3.1 - Least squares fit
# ==============================================
sns.regplot(advertising.TV, advertising.Sales, order=1, ci=None, scatter_kws={'color':'r', 's':9})
plt.xlim(-10,310)
plt.ylim(ymin=0)
# ==============================================
# Figure 3.2 - Regression coefficients - RSS
# ==============================================
# Regression coefficients (Ordinary Least Squares)
regr = skl_lm.LinearRegression()
X = scale(advertising.TV, with_mean=True, with_std=False).reshape(-1,1)
y = advertising.Sales
regr.fit(X,y)
print(regr.intercept_)
print(regr.coef_)
# Create grid coordinates for plotting
B0 = np.linspace(regr.intercept_-2, regr.intercept_+2, 50)
B1 = np.linspace(regr.coef_-0.02, regr.coef_+0.02, 50)
xx, yy = np.meshgrid(B0, B1, indexing='xy')
Z = np.zeros((B0.size,B1.size))
# Calculate Z-values (RSS) based on grid of coefficients
for (i,j),v in np.ndenumerate(Z):
# note:
# X was reshaped from (200,) into (200,1)
# X.ravel converts this back to (200,)
ypred = xx[i,j]+X.ravel()*yy[i,j] # this has size (200,)
Z[i,j] =( (y - ypred)**2 ).sum()/1000 # in units of thousands
# Minimized RSS
min_RSS = r'$\beta_0$, $\beta_1$ for minimized RSS'
min_rss = np.sum(( regr.intercept_ + regr.coef_*X - y.values.reshape(-1,1) )**2)/1000
print(min_rss)
fig = plt.figure(figsize=(15,6))
fig.suptitle('RSS - Regression coefficients', fontsize=20)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection='3d')
# Left plot
CS = ax1.contour(xx, yy, Z, cmap=plt.cm.Set1, levels=[2.15, 2.2, 2.3, 2.5, 3])
ax1.scatter(regr.intercept_, regr.coef_[0], c='r', label=min_RSS)
ax1.clabel(CS, inline=True, fontsize=10, fmt='%1.1f')
# Right plot
ax2.plot_surface(xx, yy, Z, rstride=3, cstride=3, alpha=0.3)
ax2.contour(xx, yy, Z, zdir='z', offset=Z.min(), cmap=plt.cm.Set1,
alpha=0.4, levels=[2.15, 2.2, 2.3, 2.5, 3])
ax2.scatter3D(regr.intercept_, regr.coef_[0], min_rss, c='r', label=min_RSS)
ax2.set_zlabel('RSS')
ax2.set_zlim(Z.min(),Z.max())
ax2.set_ylim(0.02,0.07)
# settings common to both plots
for ax in fig.axes:
ax.set_xlabel(r'$\beta_0$', fontsize=17)
ax.set_ylabel(r'$\beta_1$', fontsize=17)
ax.set_yticks([0.03,0.04,0.05,0.06])
ax.legend()
# ===================================================================
# Confidence interval on page 67 & Table 3.1 & 3.2 using Statsmodels
# ===================================================================
est = smf.ols('Sales ~ TV', advertising).fit()
print(est.summary().tables[1])
# RSS with regression coefficients
Sales_pred = est.params[0] + est.params[1]*advertising.TV
RSS = ((advertising.Sales - Sales_pred)**2).sum()
RSE = math.sqrt(RSS/( len(advertising.Sales) - 2 ))
print(RSE)
TSS = ((advertising.Sales - np.mean(Sales_pred))**2).sum()
R2 = 1.0 - RSS/TSS
print(R2)
# ===================================================================
# Table 3.1 & 3.2 using Scikit-learn
# ===================================================================
regr = skl_lm.LinearRegression()
X = advertising.TV.values.reshape(-1,1)
y = advertising.Sales
regr.fit(X,y)
print(regr.intercept_)
print(regr.coef_)
# RSS with regression coefficients
RSS = ((advertising.Sales - (regr.intercept_ + regr.coef_*advertising.TV))**2).sum()
RSE = math.sqrt(RSS/( len(advertising.Sales) - 2 ))
print(RSE)
mean_sales = np.mean(advertising.Sales.values)
print("percent error = %f\n"%(RSE/mean_sales*100))
Sales_pred = regr.predict(X)
R2 = r2_score(y, Sales_pred)
print(R2)
|
'''
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
listLength = 0
currentNode = head
while currentNode != None:
listLength = listLength + 1
currentNode = currentNode.next
if n == listLength:
return head.next
else:
resultHead = head
currentNode = head
previousNode = None
for i in range(listLength - n + 1):
if i == (listLength - n):
previousNode.next = currentNode.next
else:
previousNode = currentNode
currentNode = currentNode.next
return resultHead
test = Solution()
|
# coding:utf-8
from django.db import models
from model_utils import FieldTracker
from commons.models import CommonModel
import constants
class MassEmailOnChangeMixin(object):
def save(self, *args, **kwargs):
if self.tracker.changed():
from suscription.tasks import send_mass_email
send_mass_email.delay()
return super(MassEmailOnChangeMixin, self).save(*args, **kwargs)
class Service(MassEmailOnChangeMixin, CommonModel):
name = models.CharField(
max_length=255,
unique=True,
blank=False,
null=False
)
status = models.SmallIntegerField(
choices=constants.SERVICE_STATUS_CHOICES,
blank=False,
null=False
)
tracker = FieldTracker()
def __unicode__(self):
return self.name
class ServiceStatus(MassEmailOnChangeMixin, CommonModel):
service = models.ForeignKey(
Service,
blank=True,
null=True
)
title = models.CharField(
max_length=255,
blank=False,
null=False
)
text = models.TextField(
blank=False
)
tracker = FieldTracker()
def __unicode__(self):
return self.text
class Meta:
verbose_name_plural = u'Service statuses'
ordering = ('-created_at',)
|
import socket
s=socket.socket()
port = 12345
s.connect(('10.2.24.13',port))
print s.recv(1024)
s.close()
|
import numpy as np
def convert_to_child(parent, children):
while len(children[parent]) > 0:
parent = np.random.choice(children[parent])
return parent
def gen_ex(exposed_y, parents, children, noise_std = .1):
'''
Toy data generation function
'''
true_y = np.array([convert_to_child(i, children) for i in exposed_y])
true_x = np.stack([sum([k%4 * (1/4) ** j for j, k in enumerate(parents[i])]) for i in true_y])[:, None]
exposed_x = np.random.normal((np.repeat(true_x, 16, 1) * 10) % np.linspace(0.5, 1, 16)[None, :], noise_std)
return exposed_x
def interpret(rp, num_root, children, prob = 0.5):
max_idx = np.argmax(rp[:num_root])
max_val = rp[max_idx]
last_max_idx = None
while max_val > prob:
last_max_idx = max_idx
max_idx = children[max_idx][np.argmax(rp[children[max_idx]])]
max_val = rp[max_idx]
if len(children[max_idx]) == 0:
last_max_idx = max_idx
break
return last_max_idx |
q = int(input().strip())
hackerrank = 'hackerrank'
for inp in range(q):
j = 1
k = 0
result = False
s = input().strip()
for i in range(0, len(hackerrank)):
for j in range(k, len(s)):
if hackerrank[i] == s[j]:
result = True
break
else:
result = False
if result == False:
break
k = j
if result == True:
print('YES')
else:
print('NO')
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from .java_types import *
from .attribute_info import Attribute
from .signature_parser import BaseType
from . import util
_UNPARSED = (None, 0)
class FieldInfoFlags(object):
"""http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#88358
"""
ACC_PUBLIC = 0x0001
ACC_PRIVATE = 0x0002
ACC_PROTECTED = 0x0004
ACC_STATIC = 0x0008
ACC_FINAL = 0x0010
ACC_VOLATILE = 0x0040
ACC_TRANSIENT = 0x0080
def __init__(self, data):
self._flags = u2(data).get()
def public(self):
return self._flags & FieldInfoFlags.ACC_PUBLIC
def private(self):
return self._flags & FieldInfoFlags.ACC_PRIVATE
def protected(self):
return self._flags & FieldInfoFlags.ACC_PROTECTED
def static(self):
return self._flags & FieldInfoFlags.ACC_STATIC
def final(self):
return self._flags & FieldInfoFlags.ACC_FINAL
def volatile(self):
return self._flags & FieldInfoFlags.ACC_VOLATILE
def transient(self):
return self._flags & FieldInfoFlags.ACC_TRANSIENT
def __str__(self):
verbs = []
if self.public(): verbs.append('public')
if self.private(): verbs.append('private')
if self.protected(): verbs.append('protected')
if self.static(): verbs.append('static')
if self.final(): verbs.append('final')
if self.volatile(): verbs.append('volatile')
if self.transient(): verbs.append('transient')
return ' '.join(verbs)
class ObjectType(object):
@staticmethod
def match(data):
if data[0] == 'L':
eof = data.find(';')
return data[1:eof], eof + 1
else:
return _UNPARSED
class ArrayType(object):
@staticmethod
def match(data):
if data[0] == '[':
component, offset = ComponentType.match(data[1:])
return component+'[]', offset + 1
else:
return _UNPARSED
class ComponentType(object):
@staticmethod
def match(data):
return FieldType.match(data)
class FieldDescriptor(object):
@staticmethod
def match(data):
return FieldType.match(data)
class FieldType(object):
"""http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#1170
FieldType:
BaseType
ObjectType
ArrayType
FieldDescriptor:
FieldType
ComponentType:
FieldType
BaseType: 'B' | 'C' | 'D' | 'F' | 'I' | 'J' | 'S' | 'Z'
ObjectType:
L <classname> ;
ArrayType:
[ ComponentType
"""
@staticmethod
def match(data):
base_type, offset = BaseType.match(data)
if offset: return base_type, offset
object_type, offset = ObjectType.match(data)
if offset: return object_type, offset
array_type, offset = ArrayType.match(data)
if offset: return array_type, offset
return _UNPARSED
class FieldInfo(object):
def __init__(self, data, constants):
self._access_flags = FieldInfoFlags(data[0:2])
(self._name_index, self._descriptor_index, self._attributes_count), data = \
JavaNativeType.parse(data[2:], u2, u2, u2)
self._name = constants[self._name_index] # synthesized
self._descriptor = constants[self._descriptor_index] # synthesized
self._parsed_descriptor, _ = FieldDescriptor.match(self._descriptor.bytes())
self._attributes = []
offset = 0
for k in range(self._attributes_count):
attribute = Attribute.parse(data[offset:], constants)
offset += attribute.size()
self._attributes.append(attribute)
self._size = offset + 8
def size(self):
return self._size
def __str__(self):
base = '%s %s %s' % (
self._access_flags,
util.javaify(self._parsed_descriptor),
self._name)
if self._attributes:
for attr in self._attributes:
base += '\n %s: %s' % (attr.name(), attr)
base += '\n'
return base
|
import random as rn
import math as mt
import numpy as np
import copy
class reseau():
def __init__(self):
self.fonction_dactivasion = []
self.pois_w = []
self.bier_b = []
self.inisialisation_premier = True
self.nerone_size = []
def add(self, nerone, fonction, couche_dantre = 0):
self.fonction_dactivasion.append(fonction)
if self.inisialisation_premier:
if couche_dantre != 0:
self.couche_dantre = couche_dantre
self.inisialisatuer(nerone, couche_dantre, self.pois_w)
self.inisialisation_premier = False
else:
self.inisialisatuer(nerone, self.nerone_size[-1], self.pois_w)
self.inisialisatuer(couche1 = nerone, couche2 = 1, listeBW = self.bier_b)
self.nerone_size.append(nerone)
def inisialisatuer(self, couche1, couche2, listeBW):
couche_list = []
for l in range(couche1):
inise = []
for j in range(couche2):
nonbreBW = rn.uniform(-0.3, 0.3)
if listeBW == self.pois_w:
inise.append(nonbreBW)
else:
couche_list.append(nonbreBW)
if listeBW == self.pois_w:
couche_list.append(inise)
listeBW.append(couche_list)
def inisialisatuer_zero(self, couche1, couche2, listeBW, desente_de_lereur_w):
couche_list = []
for l in range(couche1):
inise = []
for j in range(couche2):
nonbreBW = 0
if listeBW == desente_de_lereur_w:
inise.append(nonbreBW)
else:
couche_list.append(nonbreBW)
if listeBW == desente_de_lereur_w:
couche_list.append(inise)
listeBW.append(couche_list)
def fit(self, entre_x, sorti_y, batch_size = 1, epochs = 100):
data_entréne = [(x, y) for x, y in zip(entre_x, sorti_y)]
taie_x = len(data_entréne)
for epc in range(epochs):
rn.shuffle(data_entréne)
list_batchs = [data_entréne[z:z+batch_size] for z in range(0, taie_x, batch_size)]
for list_batch in list_batchs:
self.mise_a_jour_du_resaus(list_batch, self.taus_darpentisage)
print(epc)
def mise_a_jour_du_resaus(self, dataxy, eta):
desente_de_lereur_w = []
desente_de_lereur_b = []
nerone_size_shape = len(self.nerone_size)
for nss in range(nerone_size_shape):
if desente_de_lereur_w == [] and desente_de_lereur_b == []:
self.inisialisatuer_zero(self.nerone_size[nss], self.couche_dantre, desente_de_lereur_w, desente_de_lereur_w)
else:
self.inisialisatuer_zero(self.nerone_size[nss], self.nerone_size[nss-1], desente_de_lereur_w, desente_de_lereur_w)
self.inisialisatuer_zero(self.nerone_size[nss], 1, desente_de_lereur_b, desente_de_lereur_w)
refe_delta_b = copy.deepcopy(desente_de_lereur_b)
refe_delta_w = copy.deepcopy(desente_de_lereur_w)
for x, y in dataxy:
delta_b = copy.deepcopy(refe_delta_b)
delta_w = copy.deepcopy(refe_delta_w)
delta_desente_b, delta_desente_w = self.retroPropagasion(x, y, delta_w, delta_b)
#print(delta_desente_b)
for i, desenteb in enumerate(desente_de_lereur_b):
desb = []
for indexb, ds in enumerate(desenteb):
new_vercte_b = desente_de_lereur_b[i][indexb]+delta_desente_b[i][indexb]
desb.append(new_vercte_b)
desente_de_lereur_b[i] = desb
for i, desentew in enumerate(desente_de_lereur_w):
desw = []
for indexw, ds in enumerate(desentew):
dew = []
for indw, d in enumerate(ds):
new_vercte_w = desente_de_lereur_w[i][indexw][indw]+delta_desente_w[i][indexw][indw]
dew.append(new_vercte_w)
desw.append(dew)
desente_de_lereur_w[i] = desw
delta_b = copy.deepcopy(refe_delta_b)
delta_w = copy.deepcopy(refe_delta_w)
for i, (w_couche, ereur_couche_w) in enumerate(zip(self.pois_w, desente_de_lereur_w)):
couchew = []
for w_nerone, ereur_nerone_w in zip(w_couche, ereur_couche_w):
neronew = []
for w_sinapse, w_ereur in zip(w_nerone, ereur_nerone_w):
unit_sinapse_w = w_sinapse-(eta/len(dataxy))*w_ereur
neronew.append(unit_sinapse_w)
couchew.append(neronew)
self.pois_w[i] = couchew
for i, (b_chouche, ereur_couche_b) in enumerate(zip(self.bier_b, desente_de_lereur_b)):
coucheb = []
for b_nerone, ereur_nerone_b in zip(b_chouche, ereur_couche_b):
unit_nerone_b = b_nerone-(eta/len(dataxy))*ereur_nerone_b
coucheb.append(unit_nerone_b)
self.bier_b[i] = coucheb
def retroPropagasion(self, x, y, list_w, list_b):
x1 = x.tolist()
x1 = [float(xa) for xa in x1]
activasion = x1
activasions = [x1]
zs = []
for e, (w, b) in enumerate(zip(self.pois_w, self.bier_b)):
z = []
a = []
for intreireu_w, interireu_b in zip(w,b):
mmini_z = 0
for i, ner in enumerate(intreireu_w):
mmini_z += ner*activasion[i]
mmini_z += interireu_b
acti_unite = self.forncionActivastion(e, mmini_z)
a.append(acti_unite)
z.append(mmini_z)
zs.append(z)
activasion = a
activasions.append(a)
nabla_b, nabla_w = self.fonctionDeCous(zs, activasions, list_w, list_b, y)
return nabla_b, nabla_w
def fonctionDeCous(self, list_z, list_acti, list_w, list_b, y):
#print("------------------")
if self.optimisation == "SGD":
return self.retoSGD(list_z, list_acti, list_w, list_b, y)
def retoSGD(self, zs, activasions, list_w, list_b, y):
exisent_foncsion = {"sigmoide": False, "rampe": False, "heaviside": False, "tangente_hyperbolique": False, "arc_tangente": False, "signe":False, "ReLU": False, "PReLU": False, "ELU": False, "SoftPlus": False, "Identité_courbée": False, "soft_exponential": False, "Sinusoïde": False, "Sinc": False, "Gaussienne": False}
tablau_liste_foncsions = {}
for i, fonc in enumerate(self.fonction_dactivasion):
index = -(i+1)
#print(index)
if exisent_foncsion[fonc] == True:
list_b[index] = tablau_liste_foncsions[fonc][0][index]
#print("list_b:", list_b)
#print("_____")
#print("tablau_liste_foncsions_b:", tablau_liste_foncsions[fonc][0])
list_w[index] = tablau_liste_foncsions[fonc][1][index]
else:
for j in range(len(self.fonction_dactivasion)):
iindex = -(j+1)
if iindex == -1:
if len(activasions[-1]) == 1:
cost = activasions[-1]-y
else:
cost = [(a -ynuiq) for a, ynuiq in zip(activasions[-1], y)]
delta = [(cos*self.fonctionDeDerivasion(-1 ,prit)) for cos, prit in zip(cost, zs[-1])]
w_delta = []
for d in delta:
mini_delta_w = []
for ac in activasions[-2]:
resulte = d*ac
mini_delta_w.append(resulte)
w_delta.append(mini_delta_w)
tablau_liste_foncsions[fonc] = [[delta], [w_delta]]
if index == -1:
list_b[-1] = delta
list_w[-1] = w_delta
else:
#print(iindex,"-----------",index)
z_list = zs[iindex]
sp = [self.fonctionDeDerivasion(index, z) for z in z_list]
couche_retro = self.pois_w[iindex+1]
w_transpose = []
iindex1 = iindex+1
for ji, ii in enumerate(couche_retro[0][:]):
list_retro = []
for cr, w in enumerate(couche_retro):
list_retro.append(couche_retro[cr][ji])
w_transpose.append(list_retro)
for w, trans in enumerate(w_transpose):
transpose = [(tr*delt) for tr, delt in zip(trans, delta)]
w_transpose[w] = transpose
w_unite = 0
for d in w_transpose[w]:
w_unite += d
w_transpose[w] = w_unite
delta = [(wtr*s) for wtr, s in zip(w_transpose, sp)]
tablau_liste_foncsions[fonc][0].insert(0, delta)
#print(tablau_liste_foncsions["sigmoide"][0])
w_delta = []
for d in delta:
mini_delta_w = []
for ac in activasions[iindex-1]:
resulte = d*ac
mini_delta_w.append(resulte)
w_delta.append(mini_delta_w)
tablau_liste_foncsions[fonc][1].insert(0, w_delta)
itereb = len(list_b[index])
if index == iindex:
list_b[index] = [delta for itb in range(itereb)]
#print(list_b)
list_w[index] = w_delta
exisent_foncsion[fonc] = True
#print("list_b:", list_b)
return list_b, list_w
def compile(self, optimisation = "SGD", taus_darpentisage = 0.1):
self.optimisation = optimisation
self.taus_darpentisage = taus_darpentisage
def sigmoide(self, z):
a = 1.0/(1.0+mt.exp(-z))
return a
def sigmoide_printe(self, z):
a = self.sigmoide(z)*(1-self.sigmoide(z))
return a
def fonctionDeDerivasion(self, index, z):
if self.fonction_dactivasion[index] == "sigmoide":
return self.sigmoide_printe(z)
def forncionActivastion(self, index, z):
if self.fonction_dactivasion[index] == "sigmoide":
a = 1.0/(1.0+mt.exp(-z))
return a
def predict(self, data_teste, seuie = None):
pass
def predict(self, data_teste, seuie = None):
resulte = []
taie_data = len(data_teste)
tete_vair = 0
for x_teste in data_teste:
copyx_teste = copy.deepcopy(x_teste)
for e, (w_couche, b_chouche) in enumerate(zip(self.pois_w, self.bier_b)):
activasions = []
for nerone_w, nerone_b in zip(w_couche, b_chouche):
mmini_z = 0
for i, intreireu_w in enumerate(nerone_w):
mmini_z += intreireu_w*x_teste[i]
mmini_z += nerone_b
acti = self.forncionActivastion(e, mmini_z)
activasions.append(acti)
copyx_teste = activasions
if len(copyx_teste) == 1:
copyx_teste = copyx_teste[0]
if seuie == int():
copyx_teste = (copyx_teste > seuie)
minresulte = copyx_teste
resulte.append(copyx_teste)
return resulte
def prousentage(self, y_test, y_prede):
taie_data = len(y_test)
predicton_vair0 = 0
predicton_vair1 = 0
predicton_fause1 = 0
predicton_fause0 = 0
for yt, yp in zip(y_test, y_prede):
if yt = 1 and yp = True:
predicton_vair1 += 1
print("predicton_vair0", predicton_vair0)
print("predicton_vair1", predicton_vair1)
print("predicton_fause1", predicton_fause1)
print("predicton_fause0", predicton_fause0)
print("-----------")
print((predicton_vair1+predicton_vair0)/taie_data)
|
# -*- coding: utf-8 -*-
'''
1. GRU
2. CNN
3. GRU+CNN
4. Transformer
5. Star-Transformer
6. BT-Transformer
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class RNN(nn.Module):
'''
hyperparameters:
batch_size=32, init_lr=2.5e-2, weight_decay=1e-5, lr_decay=0.1: 83.50%
'''
def __init__(self, vocab_size, nemb, nhid, nclass, nlayer=1, dropout=0.2):
super(RNN, self).__init__()
self.nemb = nemb
self.nhid = nhid
self.nclass = nclass
self.nlayer = nlayer
self.emb = nn.Embedding(num_embeddings=vocab_size, embedding_dim=nemb)
self.dropout1 = nn.Dropout(dropout)
self.rnn = nn.GRU(input_size=nemb, hidden_size=nhid, num_layers=nlayer,
dropout=0 if nlayer==1 else dropout, bidirectional=True)
self.num_dir = 2
self.dropout2 = nn.Dropout(dropout)
self.fc = nn.Linear(nhid*self.num_dir, nclass)
def forward(self, x, hidden_state):
emb = self.emb(x)
emb = self.dropout1(emb)
rnn_output, hidden_state = self.rnn(emb, hidden_state)
output = self.dropout2(rnn_output[-1,...])
logits = self.fc(output)
return logits
def init_hiddens(self, batch_size):
return torch.zeros([self.nlayer*self.num_dir, batch_size, self.nhid])
class Transformer(nn.Module):
def __init__(self, vocab, nemb, nhead, nhid, nlayer, nclass, dropout=0.1):
super(Transformer, self).__init__()
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except:
raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
self.model_type = 'Transformer'
self.ntoken = len(vocab)
self.ninp = nemb
self.nhead = nhead
self.nhid = nhid
self.nlayer = nlayer
self.nclass = nclass
self.dropout = dropout
#self.encoder = nn.Embedding.from_pretrained(vocab.vectors, freeze=False)
self.embedding = nn.Embedding(self.ntoken, self.ninp)
#self.dropout1 = nn.Dropout(self.dropout)
self.pos_encoder = PositionalEncoding(nemb, dropout)
self.src_mask = None
encoder_layers = TransformerEncoderLayer(self.ninp, self.nhead, self.nhid, self.dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, self.nlayer)
self.dropout2 = nn.Dropout(self.dropout)
#self.pool = nn.AdaptiveMaxPool1d(1)
self.pool = nn.AdaptiveAvgPool1d(1)
self.decoder = nn.Linear(self.ninp, self.nclass)
#self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = torch.tril(torch.ones([sz, sz], dtype=torch.int))
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.2
self.embedding.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src, hidden_state, has_mask=False):
if has_mask:
device = src.device
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
else:
self.src_mask = None
src = self.embedding(src) #* math.sqrt(self.ninp)
#src = self.dropout1(src)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = output.permute(1, 2, 0)
output = self.pool(output).squeeze(-1)
#output = self.dropout2(output)
output = self.decoder(output)
return output
def init_hiddens(self, batch_size):
return torch.zeros([1, batch_size, self.nhid])
|
# This is a test python file, which includes sensitive information from ROCKYOU password dataset, and some URLs. Add another password to test counter.
INTERNAL_URL = 'http://jira.agile.bns/'
POTENTIAL_PASSWORD_LIST = ['123456', 'shadow', 'monkey']
print("Internal URL is: "+INTERNAL_URL)
i = 1
for(item in POTENTIAL_PASSWORD_LIST):
print("Potential password "+i+": "+item)
i+=1
print("This is a test script.")
|
from __future__ import division
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from composition import closure
from metrics import variation_distance
from skbio.stats import subsample_counts
from skbio.stats.composition import closure
from skbio.diversity.alpha import robbins, lladser_pe
from composition import coverage_replacement, multiplicative_replacement
from scipy.stats import power_divergence, entropy
from cogent.parse.tree import DndParser
from cogent.maths.unifrac.fast_unifrac import fast_unifrac
from cogent.maths.unifrac.fast_tree import UniFracTreeNode
import biom
import os
import itertools
from mpl_toolkits.mplot3d import Axes3D
#######################################################################
# Distance One Urn sampled across entire simplex #
#######################################################################
np.random.seed(0)
data_dir = "../data/tick/meshnick_tech_reps"
biom_file = "%s/373_otu_table.biom" % data_dir
meta_file = "%s/meta.txt" % data_dir
table = biom.load_table(biom_file)
mat = np.array(table._get_sparse_data().todense()).T
# Randomly sample simplex
num_dists = 10000
num_species = 1000
depths=[300, 3000, 30000]
relative_tvd = np.zeros((num_dists, len(depths)))
robbins_tvd = np.zeros((num_dists, len(depths)))
for u, depth in enumerate(depths):
for i in range(num_dists):
pvals = closure(-np.log(np.random.rand(num_species)))
# pvals = closure(mat[i, :])
samp_table = np.random.multinomial(n=depth, pvals=pvals)
cx1 = coverage_replacement(np.atleast_2d(samp_table),
uncovered_estimator=robbins)
relative_tvd[i, u] = variation_distance(closure(samp_table), pvals)
robbins_tvd[i, u] = variation_distance(cx1, pvals)
fig, axes = plt.subplots(1, 3, figsize=(15, 4.5))
for u in range(len(depths)):
axes[u].hist(relative_tvd[:, u], 20, label='Relative', alpha=0.5, color='b')
axes[u].hist(robbins_tvd[:, u], 20, label='Robbins', alpha=0.5, color='r')
axes[u].set_title('Depth=%d' % depths[u])
if u == 0:
axes[u].set_ylabel('Counts')
if u == 1:
axes[u].set_xlabel('Total Variation Distance')
axes[u].locator_params(nbins=4)
plt.legend()
fig.savefig('../results/multiple_simplicial_hists.png')
#######################################################################
# Distance One Urn sampled across entire simplex #
#######################################################################
# Randomly sample simplex
num_dists = 10000
num_species = 1000
depths=np.linspace(1000, 2000000, 100)
relative_tvd = np.zeros((num_dists, len(depths)))
robbins_tvd = np.zeros((num_dists, len(depths)))
for u, depth in enumerate(depths):
for i in range(num_dists):
pvals = closure(-np.log(np.random.rand(num_species)))
samp_table = np.random.multinomial(n=depth, pvals=pvals)
cx1 = coverage_replacement(np.atleast_2d(samp_table),
uncovered_estimator=robbins)
cx2 = np.apply_along_axis(brive, 1, np.atleast_2d(samp_table),
replace_zeros=True)
relative_tvd[i, u] = variation_distance(closure(samp_table), pvals)
robbins_tvd[i, u] = variation_distance(cx1, pvals)
brive_tvd[i, u] = variation_distance(cx2, pvals)
fig, axes = plt.subplots()
width = 1000
depths = depths.astype(np.int)
robbins_wins = (robbins_tvd < brive_tvd).sum(axis=0)
# axes.bar(depths, robbins_wins, width, color='r', label='Robbins')
# axes.bar(depths, num_dists - robbins_wins, width, color='b',
# bottom=robbins_wins, label='Relative')
axes.plot(depths, robbins_wins)
axes.fill_between(depths, robbins_wins, 10000,
where=10000>=robbins_wins,
facecolor='blue', interpolate=True)
axes.fill_between(depths, robbins_wins, 0,
where=robbins_wins>0,
facecolor='red', interpolate=True)
axes.set_title('Comparison of TVD vs Sampling Depth')
axes.set_ylabel('Number of distributions')
axes.set_xlabel('Sampling depth')
plt.legend(loc=3)
plt.xlim([0, 2000000])
plt.ylim([0, 10000])
fig.savefig('../results/simplical_sampling_depth.png')
#######################################################################
# Distance ratio One Urn sampled across entire simplex #
#######################################################################
fig, axes = plt.subplots()
width = 1000
depths = depths.astype(np.int)
ratio = robbins_tvd / brive
# ratio = ratio[:100, :]
num_dists, _ = ratio.shape
# axes.bar(depths, robbins_wins, width, color='r', label='Robbins')
# axes.bar(depths, num_dists - robbins_wins, width, color='b',
# bottom=robbins_wins, label='Relative')
for i in range(num_dists):
axes.plot(depths, ratio[i, :], '-b')
axes.set_title('Comparison of TVD vs Sampling Depth')
axes.set_ylabel('TVD(robbins) / TVD(brive)')
axes.set_xlabel('Sampling depth')
plt.legend(loc=3)
plt.xlim([0, 1000000])
fig.savefig('../results/simplical_brive_robbins_ratios.png')
|
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('api/login', views.UserLoginAPIView.as_view(), name='login'),
path('api/logout', views.UserLogoutAPIView.as_view(), name='logout'),
path('api/employeebranchwise', views.EmployeeList.as_view(), name='employee-branch'),
path('api/skills', views.ServicesListAPIView.as_view(), name='skills'),
path('api/shiftlist', views.ShiftListAPIView.as_view(), name='shiftlist'),
path('api/customers/all/', views.CustomerListAPIView.as_view(), name='customer_all'),
path('api/bookingstatus/', views.AppointmentBookingStatusList.as_view(), name='bookingstatus'),
path('api/branchlist/', views.ItemSiteListAPIView.as_view(), name='branchlist'),
path('api/branchlogin/', views.ItemSiteListAPIViewLogin.as_view(), name='branchlogin'),
path('api/treatmentstock/<int:pk>/', views.StockDetail.as_view(), name='treatmentstock'),
path('api/staffsavailable/', views.StaffsAvailable.as_view(), name='staffsavailable'),
path('api/userlist/', views.UsersList.as_view(), name='userlist'),
path('api/paytable/', views.PaytableListAPIView.as_view(), name='paytable'),
path('api/customerreceiptprint/', views.CustomerReceiptPrintList.as_view(), name='customerreceiptprint'),
path('api/source/', views.SourceAPI.as_view(), name='source'),
path('api/securities/', views.SecuritiesAPIView.as_view(), name='securities'),
path('api/schedulehour/', views.ScheduleHourAPIView.as_view(), name='schedulehour'),
path('api/custappt/', views.CustApptAPI.as_view(), name='custappt'),
path('api/appttype/', views.ApptTypeAPIView.as_view(), name='appttype'),
path('api/focreason/', views.FocReasonAPIView.as_view(), name='focreason'),
# path('api/updatetable/', views.UpdateTablesAPIView.as_view(), name='updatetable'),
path('api/treatmentpackages/', views.TreatmentApptAPI.as_view(), name='treatmentpackages'),
path('api/appointmentsort/', views.AppointmentSortAPIView.as_view(), name='appointmentsort'),
path('api/meta/race/', views.meta_race, name='meta_race'),
path('api/meta/nationality/', views.meta_nationality, name='meta_nationality'),
path('api/meta/religion/', views.meta_religious, name='meta_religious'),
path('api/meta/country/', views.meta_country, name='meta_country'),
path('api/WorkScheduleMonth/', views.MonthlyWorkSchedule.as_view(), name='WorkScheduleMonth'),
path('api/MonthlyAllSchedule/', views.MonthlyAllSchedule.as_view(), name='MonthlyAllSchedule'),
path('api/WorkScheduleHours/', views.schedule_hours, name='WorkScheduleHours'),
path('api/SkillsItemTypeList/', views.SkillsItemTypeList, name='SkillsItemTypeList'),
path('api/SkillsView/', views.SkillsView.as_view(), name='SkillsView'),
path('api/PhotoDiagnosis/', views.PhotoDiagnosis.as_view(), name='PhotoDiagnosis'),
path('api/DiagnosisCompare/', views.DiagnosisCompareView.as_view(), name='DiagnosisCompare'),
path('api/EmployeeSkills/', views.EmployeeSkillView.as_view(), name='EmployeeSkillView'),
path('api/CustomerFormSettings/', views.CustomerFormSettingsView.as_view(), name='CustomerFormSettingsView'),
path('api/CustomerFormSettings/details', views.CustomerFormSettings, name='CustomerFormSettingsDetails'),
# path('api/RewardPolicy/', views.RewardPolicyView.as_view(), name='RewardPolicyView'),
# path('api/RedeemPolicy/', views.RedeemPolicyView.as_view(), name='RedeemPolicyView'),
path('api/EmployeeSecuritySettings/', views.EmployeeSecuritySettings.as_view(), name='EmployeeSecuritySettings'),
path('api/IndividualEmpSettings/<int:emp_no>', views.IndividualEmpSettings.as_view(), name='IndividualEmpSettings'),
path('api/MultiLanguage/', views.MultiLanguage, name='MultiLanguage'),
path('api/EmployeeLevels/', views.EmployeeLevels, name='EmployeeLevels'),
path('api/DailySales/', views.DailySalesView.as_view(), name='DailySales'),
path('api/DailySalesSummery/', views.DailySalesSummeryView.as_view(), name='DailySalesSummeryView'),
path('api/MonthlySalesSummery/', views.MonthlySalesSummeryView.as_view(), name='MonthlySalesSummeryView'),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
'''
Given a non-empty integer array, find the minimum number of moves required to make all array elements equal, where a move is incrementing a selected element by 1 or decrementing a selected element by 1.
You may assume the array's length is at most 10,000.
找到中间值即可,问题的转换很重要
Runtime: 52 ms
Your runtime beats 54.45 % of python submissions.
'''
class Solution(object):
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# 首先找到中间元素,也就是值处于中间的
nums.sort()
mid = len(nums) // 2
count = 0
for ele in nums[:mid] + nums[mid+1:]:
count += abs(ele - nums[mid])
return count
|
import Data_importer
import model
import random
import feature_engineering2 as feature_engineering
from sklearn.model_selection import train_test_split
def main():
train = Data_importer.load_train_set()
train = feature_engineering.main(train)
train_set = train.copy()
book_trainset = train_set[train_set['booking_bool']==1] # extract all bookings from training set
book_rows = book_trainset.index.tolist()
len_book = len(book_trainset.index)
click_trainset = train_set[train_set['click_bool']==1] # extract all clicks from training set
click_rows = click_trainset.index.tolist()
len_click = len(click_trainset.index)
# create two training sets of just 50% booking and random and 50% click and random
book_trainset = book_trainset.append(train_set.iloc[random.sample(list(train_set.drop(book_rows).index), len_book)])
click_trainset =click_trainset.append(train_set.iloc[random.sample(list(train_set.drop(click_rows).index), len_click)])
# Train the booking model
for i in range(0,2):
if i==0:
model_name = "Booking"
training_feature = "booking_bool"
train_sample = book_trainset
isBook = True
else:
model_name = "Click"
training_feature = "click_bool"
train_sample = click_trainset
isBook = False
print("Training the "+model_name+" Classifier...")
feature_names = feature_engineering.get_features(train_sample, isBook)
x = train_sample[feature_names].values
y = train_sample[training_feature].values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
model.model(x_train, x_test, y_train, y_test, isBook)
#classifier.fit(x_train, y_train)
if __name__=="__main__":
main() |
import numpy as np
import pandas as pd
import os
from matplotlib import pyplot as plt
from matplotlib.ticker import ScalarFormatter
from experiments.util import collect_exp_results
params = {'legend.fontsize': 9.5,}
plt.rcParams.update(params)
DIR = os.path.dirname(os.path.abspath(__file__))
lines = []
fig, axes = plt.subplots(1, 2, figsize=(9, 3))
ALGO = 'pacoh-map'
for ALGO in ['pacoh-map', 'mll']:
""" ------- sinusoid ------- """
results_df = collect_exp_results('meta-overfitting-v2-%s-sin'%ALGO)
n_context_samples = 5
results_df = results_df[results_df['n_context_samples'] == n_context_samples]
results_df = results_df[results_df['n_train_tasks'] >= 4]
n_train_tasks_list = sorted(list(set(results_df['n_train_tasks'])))
if 'map' in ALGO:
best_row_per_n_tasks = []
n_tasks_list = sorted(list(set(results_df['n_train_tasks'])))
for n_tasks in n_tasks_list:
df_aggregated = results_df.groupby(['n_train_tasks', 'weight_decay']).aggregate(
{'test_rmse_meta_train': [np.mean, np.std],
'test_rmse_meta_test': [np.mean, np.std],
}
)
df_aggregated_sub = df_aggregated.loc[n_tasks]
best_result_row = df_aggregated_sub.loc[df_aggregated_sub['test_rmse_meta_test']['mean'].idxmin(axis=1)]
best_row_per_n_tasks.append(best_result_row)
df_aggregated =pd.concat(best_row_per_n_tasks, axis=1, keys=n_tasks_list).T
else:
df_aggregated = results_df.groupby(['n_train_tasks']).aggregate(
{'test_rmse_meta_train': [np.mean, np.std],
'test_rmse_meta_test': [np.mean, np.std],
}
)
print(""" ----- Sinusoid %s (n_context_samples=%i) ------"""%(ALGO, n_context_samples))
print(df_aggregated.to_string(), '\n')
metrics = ['test_rmse_meta_train', 'test_rmse_meta_test']
for metric in metrics:
x = df_aggregated.index
y_mean = df_aggregated[(metric, 'mean')]
y_std = df_aggregated[(metric, 'std')]
linestyle = '--' if ALGO=='mll' else '-'
lines.append(axes[0].plot(y_mean, label=str(metric), linestyle=linestyle)[0])
axes[0].fill_between(x, y_mean - y_std * (1.96/np.sqrt(25)), y_mean + y_std*(1.96/np.sqrt(25)), alpha=0.2)
axes[0].set_title('Sinusoid')
axes[0].set_ylabel('test RMSE')
axes[0].set_xscale('log')
#axes[0].set_yscale('log')
axes[0].set_xlabel('number of tasks')
""" ------- cauchy ------- """
results_df = collect_exp_results('meta-overfitting-v2-%s-cauchy'%ALGO)
n_context_samples = 20
results_df = results_df[results_df['n_context_samples'] == n_context_samples]
results_df = results_df[results_df['n_train_tasks'] >= 4]
if 'map' in ALGO:
best_row_per_n_tasks = []
n_tasks_list = sorted(list(set(results_df['n_train_tasks'])))
for n_tasks in n_tasks_list:
df_aggregated = results_df.groupby(['n_train_tasks', 'weight_decay']).aggregate(
{'test_rmse_meta_train': [np.mean, np.std],
'test_rmse_meta_test': [np.mean, np.std],
}
)
df_aggregated_sub = df_aggregated.loc[n_tasks]
best_result_row = df_aggregated_sub.loc[df_aggregated_sub['test_rmse_meta_test']['mean'].idxmin(axis=1)]
best_row_per_n_tasks.append(best_result_row)
df_aggregated =pd.concat(best_row_per_n_tasks, axis=1, keys=n_tasks_list).T
else:
df_aggregated = results_df.groupby(['n_train_tasks']).aggregate(
{'test_rmse_meta_train': [np.mean, np.std],
'test_rmse_meta_test': [np.mean, np.std],
}
)
print(""" ----- Cauchy %s (n_context_samples=%i) ------"""%(ALGO, n_context_samples))
print(df_aggregated.to_string(), '\n')
metrics = ['test_rmse_meta_train', 'test_rmse_meta_test']
for metric in metrics:
x = df_aggregated.index
y_mean = df_aggregated[(metric, 'mean')]
y_std = df_aggregated[(metric, 'std')]
linestyle = '--' if ALGO=='mll' else '-'
lines.append(axes[1].plot(y_mean, label=str(metric), linestyle=linestyle)[0])
axes[1].fill_between(x, y_mean - y_std * (1.96/np.sqrt(25)), y_mean + y_std * (1.96/np.sqrt(25)), alpha=0.2)
axes[1].set_title('Cauchy')
axes[1].set_ylabel('test RMSE')
axes[1].set_xscale('log')
#axes[1].set_yscale('log')
axes[1].set_xlabel('number of tasks')
# axes[0].set_ylim((0.28, 0.9))
# axes[1].set_ylim((0.0, 0.4))
for i in [0, 1]:
axes[i].set_xticks(ticks=[5, 10, 20, 50, 100, 200, 500])
for axis in [axes[i].xaxis, axes[i].yaxis]:
axis.set_major_formatter(ScalarFormatter())
#axes[0].set_yticks(ticks=[0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
# axes[1].legend(axes[0].lines, ('pacoh-map (meta-train tasks)','pacoh-map (meta-test tasks)',
# 'mll (meta-train tasks)','mll (meta-test tasks)') )
fig.suptitle('')
lgd = axes[0].legend(axes[0].lines, ('PACOH-MAP (meta-train tasks)','PACOH-MAP (meta-test tasks)',
'MLL (meta-train tasks)','MLL (meta-test tasks)'))
#fig.tight_layout(rect=[0.5, 0.2, 0.5, 1])
fig.show()
fig.savefig('meta_overfitting_v2_map_vs_mll.pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
|
import os
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
from scipy.spatial import Delaunay
from sklearn import linear_model, datasets
from torchvision import models
import torchvision.transforms as T
from PIL import Image
import torch
from sklearn.cluster import DBSCAN
from sklearn.cluster import KMeans
import copy
def segment(net, path):
img = Image.open(path)
trf = T.Compose([T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
inp = trf(img).unsqueeze(0)
out = net(inp)['out']
om = torch.argmax(out.squeeze(), dim=0).detach().cpu().numpy()
rgb = decode_segmap(om, path)
return rgb
def decode_segmap(image, source, nc=21):
label_colors = np.array([(0, 0, 0), # 0=background
# 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
(128, 0, 0), (0, 128, 0), (128, 128,0), (0, 0, 128), (128, 0, 128),
# 6=bus, 7=car, 8=cat, 9=chair, 10=cow
(0, 128, 128), (128, 128, 128), (64,
0, 0), (192, 0, 0), (64, 128, 0),
# 11=dining table, 12=dog, 13=horse, 14=motorbike, 15=person
(192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),
# 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
(0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128)])
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for l in range(0, nc):
idx = image == l
r[idx] = label_colors[l, 0]
g[idx] = label_colors[l, 1]
b[idx] = label_colors[l, 2]
rgb = np.stack([r, g, b], axis=2)
plt.imshow(rgb)
plt.axis('off')
plt.show()
# Load the foreground input image
foreground = cv.imread(source)
# Change the color of foreground image to RGB
# and resize image to match shape of R-band in RGB output map
foreground = cv.cvtColor(foreground, cv.COLOR_BGR2RGB)
foreground = cv.resize(foreground,(r.shape[1],r.shape[0]))
# Create a background array to hold white pixels
# with the same size as RGB output map
background = 255 * np.ones_like(rgb).astype(np.uint8)
# Convert uint8 to float
foreground = foreground.astype(float)
background = background.astype(float)
# Create a binary mask of the RGB output map using the threshold value 0
th, alpha = cv.threshold(np.array(rgb),0,255, cv.THRESH_BINARY)
plt.imshow(background)
plt.axis('off')
plt.show()
# Apply a slight blur to the mask to soften edges
alpha = cv.GaussianBlur(alpha, (7,7),0)
# Normalize the alpha mask to keep intensity between 0 and 1
alpha = alpha.astype(float)/255
# Multiply the foreground with the alpha matte
foreground = cv.multiply(alpha, foreground)
# Multiply the background with ( 1 - alpha )
background = cv.multiply(1.0 - alpha, background)
# Add the masked foreground and background
outImage = cv.add(foreground, background)
# Return a normalized output image for display
return outImage/255
def PreProcessing1(img):
yuv_img = cv.cvtColor(img, cv.COLOR_BGR2YUV)
yuv_img[:, :, 0] = cv.equalizeHist(yuv_img[:, :, 0])
processed_img = cv.cvtColor(yuv_img, cv.COLOR_YUV2BGR)
return processed_img
def PreProcessing2(img):
yuv_img = cv.cvtColor(img, cv.COLOR_BGR2YUV)
yuv_img[:, :, 0] = cv.equalizeHist(yuv_img[:, :, 0])
equalized = cv.cvtColor(yuv_img, cv.COLOR_YUV2BGR)
processed_img = cv.fastNlMeansDenoisingColored(
equalized, None, 10, 10, 7, 21)
return processed_img
def PreProcessing3(img):
denoized = cv.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
yuv_img = cv.cvtColor(denoized, cv.COLOR_BGR2YUV)
yuv_img[:, :, 0] = cv.equalizeHist(yuv_img[:, :, 0])
processed_img = cv.cvtColor(yuv_img, cv.COLOR_YUV2BGR)
return processed_img
def isValid(filepath):
return os.path.exists(filepath) and os.path.isfile(filepath)
"""
# Check if a point is inside a rectangle
def rect_contains(rect, point) :
if point[0] < rect[0] :
return False
elif point[1] < rect[1] :
return False
elif point[0] > rect[2] :
return False
elif point[1] > rect[3] :
return False
return True
# Draw a point
def draw_point(img, p, color ) :
cv.circle( img, p, 2, color, cv.cv.CV_FILLED, cv.CV_AA, 0 )
# Draw delaunay triangles
def draw_delaunay(img, subdiv, delaunay_color ) :
triangleList = subdiv.getTriangleList();
size = img.shape
r = (0, 0, size[1], size[0])
for t in triangleList :
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
if rect_contains(r, pt1) and rect_contains(r, pt2) and rect_contains(r, pt3) :
cv.line(img, pt1, pt2, delaunay_color, 1, cv.CV_AA, 0)
cv.line(img, pt2, pt3, delaunay_color, 1, cv.CV_AA, 0)
cv.line(img, pt3, pt1, delaunay_color, 1, cv.CV_AA, 0)
def otherDelaunay(img):
w = img.shape[1]
h = img.shape[0]
rectangle = (0,0,w,h)
subdivision = cv.Subdiv2D(rectangle)
triangles = subdivision.getTriangleList()
"""
def DelaunayTriangulation_old(img, keyPoints, imgEcrite):
tri = Delaunay(keyPoints)
vm = []
vmRGB = []
vmDistance = []
matchX = []
matchY = []
threshold = 4
thresholdD = 4
tailleMin = 0
'''
for j in keyPoints[tri.simplices]:
cpt = 0
pt1 = (j[cpt][0], j[cpt][1])
pt2 = (j[cpt+1][0], j[cpt+1][1])
pt3 = (j[cpt+2][0], j[cpt+2][1])
v1 = [j[cpt][0], j[cpt][1]]
v2 = [j[cpt+1][0], j[cpt+1][1]]
v3 = [j[cpt+2][0], j[cpt+2][1]]
triangle = np.array([v1, v2, v3])
cv.polylines(imgEcrite, [triangle], isClosed=True,
color=(0, 0, 255), thickness=1)
cv.circle(imgEcrite, pt1, 2, (0, 0, 255), 1)
cv.circle(imgEcrite, pt2, 2, (0, 0, 255), 1)
cv.circle(imgEcrite, pt3, 2, (0, 0, 255), 1)
'''
# on montre les triangles obtenus
# cv.imshow("Triangles",imgEcrite)
if cv.waitKey(0) & 0xff == 27:
cv.destroyAllWindows()
for i in keyPoints[tri.simplices]:
cpt = 0
v1 = [i[cpt][0], i[cpt][1]]
v2 = [i[cpt+1][0], i[cpt+1][1]]
v3 = [i[cpt+2][0], i[cpt+2][1]]
x = round(float(v1[0])/3.0+float(v2[0])/3.0+float(v3[0])/3.0)
y = round(float(v1[1])/3.0+float(v2[1])/3.0+float(v3[1])/3.0)
xM = abs(x - v1[0])
yM = abs(y - v1[1])
if(xM >= tailleMin) and (yM >= tailleMin):
vm.append([x, y])
vmDistance.append([xM, yM])
r = round(
(img[i[cpt][1], i[cpt][0]][0])/3.0 +
(img[i[cpt+1][1], i[cpt+1][0]][0])/3.0 +
(img[i[cpt+2][1], i[cpt+2][0]][0])/3.0)
g = round(
(img[i[cpt][1], i[cpt][0]][1])/3.0 +
(img[i[cpt+1][1], i[cpt+1][0]][1])/3.0 +
(img[i[cpt+2][1], i[cpt+2][0]][1])/3.0)
b = round((
(img[i[cpt][1], i[cpt][0]][2])/3.0 +
(img[i[cpt+1][1], i[cpt+1][0]][2])/3.0 +
(img[i[cpt+2][1], i[cpt+2][0]][2])/3.0))
vmRGB.append([r, g, b])
cpt += 1
for i in range(0, len(vm)-1):
for j in range(i+1, len(vm)-1):
x = int(vm[i][1])
y = int(vm[i][0])
x2 = int(vm[j][1])
y2 = int(vm[j][0])
if(
abs(vmDistance[i][0] - vmDistance[j][0]) <= thresholdD
and (abs(vmDistance[i][1] - vmDistance[j][1]) <= thresholdD)
and (abs((int(img[x, y][0]) - int(img[x2, y2][0]))) <= threshold)
and (abs((int(img[x, y][1]) - int(img[x2, y2][1]))) <= threshold)
and (abs((int(img[x, y][2]) - int(img[x2, y2][2]))) <= threshold)
):
matchX.append([vm[i][0]])
matchX.append([vm[j][0]])
matchY.append([vm[i][1]])
matchY.append([vm[j][1]])
print("HERE")
lw = 2
ransac = linear_model.RANSACRegressor()
tempArray = np.array(matchX)
lengthArray = tempArray.size
nbTest = 1
dividende = lengthArray/(nbTest+1)
plt.gca().invert_yaxis()
for i in range(1, int(lengthArray/dividende)):
firstSlice = int((i-1)*dividende)
lastSlice = int(i*dividende)
Xarray = np.array(matchX[firstSlice:lastSlice])
Yarray = np.array(matchY[firstSlice:lastSlice])
ransac.fit(Xarray, Yarray)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
'''
while(j > 100):
Xarray = Xarray[outlier_mask]
Yarray = Yarray[outlier_mask]
ransac.fit(Xarray,Yarray)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
j = Xarray[outlier_mask].size
for j in range(0, Xarray[outlier_mask].size - 1):
pt = (Xarray[outlier_mask][j][0], Yarray[outlier_mask][j][0])
pt2 = (Xarray[outlier_mask][j+1][0], Yarray[outlier_mask][j+1][0])
cv.circle(img, pt, 2, (255, 255, 0), -1)
cv.circle(img, pt2, 2, (255, 255, 0), -1)
cv.line(img, pt, pt2, (255, 255, 0), 1)
j += 1
'''
for j in range(0, Xarray[inlier_mask].size - 1):
pt = (Xarray[inlier_mask][j][0], Yarray[inlier_mask][j][0])
pt2 = (Xarray[inlier_mask][j+1][0], Yarray[inlier_mask][j+1][0])
cv.circle(img, pt, 2, (255, 255, 0), -1)
cv.circle(img, pt2, 2, (255, 255, 0), -1)
cv.line(img, pt, pt2, (255, 0, 255), 1)
j += 1
def DelaunayTriangulation(img, keyPoints, imgEcrite, imgMask):
#cv.imshow("DbScan", img)
tri = Delaunay(keyPoints)
vm = []
vmRGB = []
vmDistance = []
matchX = []
matchY = []
threshold = 0
thresholdD = 0
tailleMin = 0
if (cv.countNonZero(cv.cvtColor(imgMask, cv.COLOR_BGR2GRAY)) == 0):
isImgBlack = True
print("La détection de l'IA n'a pas fonctionnée, méthode 2 utilisée")
else:
isImgBlack = False
for i in keyPoints[tri.simplices]:
cpt = 0
v1 = [i[cpt][0], i[cpt][1]]
v2 = [i[cpt+1][0], i[cpt+1][1]]
v3 = [i[cpt+2][0], i[cpt+2][1]]
x = round(float(v1[0])/3.0+float(v2[0])/3.0+float(v3[0])/3.0)
y = round(float(v1[1])/3.0+float(v2[1])/3.0+float(v3[1])/3.0)
xM = abs(x - v1[0])
yM = abs(y - v1[1])
if(xM >= tailleMin) and (yM >= tailleMin) and ( isImgBlack or (int(imgMask[y, x][0]) > 0) or (int(imgMask[y, x][1]) > 0) or (int(imgMask[y, x][2]) > 0)) :
vm.append([x, y])
vmDistance.append([xM, yM])
r = round(
(img[i[cpt][1], i[cpt][0]][0])/3.0 +
(img[i[cpt+1][1], i[cpt+1][0]][0])/3.0 +
(img[i[cpt+2][1], i[cpt+2][0]][0])/3.0)
g = round(
(img[i[cpt][1], i[cpt][0]][1])/3.0 +
(img[i[cpt+1][1], i[cpt+1][0]][1])/3.0 +
(img[i[cpt+2][1], i[cpt+2][0]][1])/3.0)
b = round((
(img[i[cpt][1], i[cpt][0]][2])/3.0 +
(img[i[cpt+1][1], i[cpt+1][0]][2])/3.0 +
(img[i[cpt+2][1], i[cpt+2][0]][2])/3.0))
vmRGB.append([r, g, b])
cpt += 1
#
for i in range(0, len(vm)-1):
for j in range(i+1, len(vm)-1):
x = int(vm[i][1])
y = int(vm[i][0])
x2 = int(vm[j][1])
y2 = int(vm[j][0])
if(
abs(vmDistance[i][0] - vmDistance[j][0]) <= thresholdD
and (abs(vmDistance[i][1] - vmDistance[j][1]) <= thresholdD)
and (abs((int(img[x, y][0]) - int(img[x2, y2][0]))) <= threshold)
and (abs((int(img[x, y][1]) - int(img[x2, y2][1]))) <= threshold)
and (abs((int(img[x, y][2]) - int(img[x2, y2][2]))) <= threshold)
):
matchX.append([vm[i][0]])
matchX.append([vm[j][0]])
matchY.append([vm[i][1]])
matchY.append([vm[j][1]])
lw = 2
ransac = linear_model.RANSACRegressor()
tempArray = np.array(matchX)
lengthArray = tempArray.size
nbTest = 1
dividende = lengthArray/(nbTest+1)
plt.gca().invert_yaxis()
if (int(dividende) == 0):
print("Erreur, mauvaise détection de foreground")
return -1
for i in range(1, int(lengthArray/dividende)):
firstSlice = int((i-1)*dividende)
lastSlice = int(i*dividende)
Xarray = np.array(matchX[firstSlice:lastSlice])
Yarray = np.array(matchY[firstSlice:lastSlice])
ransac.fit(Xarray, Yarray)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
'''
if(isImgBlack):
while(j > 100):
Xarray = Xarray[outlier_mask]
Yarray = Yarray[outlier_mask]
ransac.fit(Xarray, Yarray)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
j = Xarray[outlier_mask].size
for j in range(0, Xarray[outlier_mask].size - 1):
pt = (Xarray[outlier_mask][j][0], Yarray[outlier_mask][j][0])
pt2 = (Xarray[outlier_mask][j+1][0], Yarray[outlier_mask][j+1][0])
cv.circle(img, pt, 2, (255, 255, 0), -1)
cv.circle(img, pt2, 2, (255, 255, 0), -1)
cv.line(img, pt, pt2, (255, 255, 0), 1)
j += 1
else:
'''
for j in range(0, Xarray[inlier_mask].size - 1):
pt = (Xarray[inlier_mask][j][0], Yarray[inlier_mask][j][0])
pt2 = (Xarray[inlier_mask][j+1][0], Yarray[inlier_mask][j+1][0])
cv.circle(img, pt, 2, (255, 255, 0), -1)
cv.circle(img, pt2, 2, (255, 255, 0), -1)
cv.line(img, pt, pt2, (255, 0, 255), 1)
j += 1
def siftDetector(image):
sift = cv.SIFT_create()
gray= cv.cvtColor(image,cv.COLOR_BGR2GRAY)
key_points,descriptors = sift.detectAndCompute(gray, None)
return key_points,descriptors
def locateForgery(image,key_points,descriptors,radius=40,min_sample=2):
forgery = copy.deepcopy(image)
clusters = DBSCAN(eps=radius, min_samples=min_sample).fit(descriptors)
labels = clusters.labels_ # il y a un label pour chaque point d'interet
size = np.unique(labels).shape[0]-1 # donne le nombre de clusters (sans compter le cluster du bruit)
if (size==0) and (np.unique(labels)[0]==-1): # il n'y a que du bruit
print('No Forgery Found!!')
return None
if size==0: # il n'y a qu'un cluster
size=1
cluster_list= [[] for i in range(size)] # initialise la liste au nombre de clusters
for idx in range(len(key_points)): # on parcours tous les points d'interet
if labels[idx]!=-1: # si le point n'est pas du bruit
# pour chaque cluster, on lui ajoute les coordonnees spatiales des points lui appartenant
cluster_list[labels[idx]].append((int(key_points[idx].pt[0]),int(key_points[idx].pt[1])))
"""
for points in cluster_list:
if len(points)>1:
for idx1 in range(len(points)):
cv.circle(forgery,points[idx1], 5, (0,255,255),-1)
"""
points_for_delaunay = list()
for points in cluster_list:
if len(points)>1:
for idx1 in range(len(points)):
points_for_delaunay.append(points[idx1])
imgEcrite = copy.deepcopy(image) # avec une copy simple il y a risque qu'image ecrite et image
# continuent a avoir les mêmes valeurs
if(len(points_for_delaunay) > 4):
DelaunayTriangulation_old(image, np.array(points_for_delaunay), imgEcrite)
#cv.imshow("Delaunay Triangulation", image)
if cv.waitKey(0) & 0xff == 27:
cv.destroyAllWindows()
for points in cluster_list:
if len(points)>1: # s'il y a plus d'un point dans le cluster
for idx1 in range(1,len(points)): # on parcourt les points du cluster
# on trace une ligne entre le premier point et tous les autres
cv.line(forgery,points[0],points[idx1],(255,0,0),4)
"""
clusters_centroids = [(0,0) for i in range(size)]
accu = 0
for points in cluster_list:
if len(points)>1: # s'il y a plus d'un point dans le cluster
cx = points[0][0]
cy = points[0][1]
for idx1 in range(1,len(points)): # on parcourt les points du cluster
# on trace une ligne entre le premier point et tous les autres
cv.line(forgery,points[0],points[idx1],(255,0,0),4)
cx = cx + points[idx1][0]
cy = cy + points[idx1][1]
clusters_centroids[accu] = (int(cx / len(points)),int(cy / len(points)))
accu += 1
centroids_clusters = DBSCAN(eps=radius, min_samples=min_sample).fit(clusters_centroids)
centroids_labels = centroids_clusters.labels_
print(centroids_labels)
centroids_size = np.unique(centroids_labels).shape[0]-1
for i in range(centroids_size):
if(centroids_labels[i] != -1):
cv.circle(forgery,clusters_centroids[i], 5, (0,255,255),-1)
"""
if(len(points_for_delaunay) < 5):
return forgery
else:
return image
'''
def main(filepath):
#fcn = models.segmentation.fcn_resnet101(pretrained=True).eval()
dlab = models.segmentation.deeplabv3_resnet101(pretrained=1).eval()
img = cv.imread(filepath, cv.IMREAD_UNCHANGED)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
imgEcrite = copy.deepcopy(img)
imgAndKeyFeatures = copy.deepcopy(img)
imgMask = segment(dlab,filepath)
#img = PreProcessing3(img)
sift = cv.SIFT_create()
keyPoints = sift.detect(gray)
descriptors = sift.compute(gray, keyPoints)
cv.drawKeypoints(img, keyPoints, imgAndKeyFeatures,
flags=cv.DRAW_MATCHES_FLAGS_DEFAULT)
#cv.imshow("Key Points", imgAndKeyFeatures)
if cv.waitKey(0) & 0xff == 27:
cv.destroyAllWindows()
points = list()
for keypoint in keyPoints:
x = int(keypoint.pt[0])
y = int(keypoint.pt[1])
points.append((x, y))
points = np.array(points)
DelaunayTriangulation(img, points, imgEcrite, imgMask)
cv.imshow("Delaunay Triangulation after Ransac", img)
if cv.waitKey(0) & 0xff == 27:
cv.destroyAllWindows()
'''
def mainDBSCAN(filepath):
img = cv.imread(filepath, cv.IMREAD_UNCHANGED)
imgEcrite = copy.deepcopy(img)
keyPoints, descriptors = siftDetector(img)
forgery = locateForgery(img, keyPoints, descriptors, 40,2)
cv.imshow("Forgery", forgery)
if cv.waitKey(0) & 0xff == 27:
cv.destroyAllWindows()
filepath = "../CVIP/Dataset 0/im28_t.bmp"
if isValid(filepath):
#main(filepath)
mainDBSCAN(filepath)
else:
print("file not valid")
|
from wiki.general.classes import Car
"""
Objectives:
PCPP-32-101 1.1 – Understand and explain the basic terms and
programming concepts used in the OOP paradigm
- essential terminology: class, instance, object, attribute, method, type,
instance and class variables, superclasses and subclasses
- reflexion: isinstance(), issubclass()
- the __init__() method
- creating classes, methods, and class and instance variables; calling
methods; accessing class and instance variables
PCPP-32-101 1.3 Understand and use the concepts of inheritance,
polymorphism, and composition
- duck typing
- inheritance vs. composition
- modelling real-life problems using the "is a" and "has a" relations
"""
class Lamborghini(Car):
def __init__(self):
super(Lamborghini, self).__init__()
self.__nitrous_on = False
# Method overriding - Having a new definition for the method in the super class.
def get_manufacturer(self) -> str:
return "Lamborghini"
def set_nitrous_state(self, nitrous_on: bool) -> None:
self.__nitrous_on = nitrous_on
def get_nitrous_state(self) -> bool:
return self.__nitrous_on
def is_speed_possible(self, speed: float) -> bool:
# TODO: Add check to see if it nitrous, which in case the max speed will go up by 100 kmph.
# Try to reuse Car.is_speed_possible()
return True
# Multilevel inheritance.
class Aventador(Lamborghini):
def __init__(self):
super(Aventador, self).__init__()
self.max_speed_in_kmph = 240
if __name__ == '__main__':
generic_car = Car()
lamborghini = Lamborghini()
aventador = Aventador()
print(lamborghini.get_engine_on())
print(generic_car.get_engine_on())
print(aventador.get_engine_on())
lamborghini.set_engine_on(True)
print(lamborghini.get_engine_on())
print(generic_car.get_engine_on())
print(aventador.get_engine_on())
print(lamborghini.get_manufacturer())
# print(generic_car.get_manufacturer())
print(aventador.get_manufacturer())
print(lamborghini.get_max_speed_in_kmph())
print(generic_car.get_max_speed_in_kmph())
print(aventador.get_max_speed_in_kmph())
# Throws an error because the engine isn't ON!
# aventador.set_speed_in_kmph(150)
lamborghini.set_speed_in_kmph(100)
# isinstance()
print("aventador isinstance Aventador: ", isinstance(aventador, Aventador))
print("aventador isinstance Lamborghini: ", isinstance(aventador, Lamborghini))
print("lamborghini isinstance Aventador: ", isinstance(lamborghini, Aventador))
# issubclass()
print("Aventador issubclass Lamborghini: ", issubclass(Aventador, Lamborghini))
print("Lamborghini issubclass Aventador: ", issubclass(Lamborghini, Aventador))
print("Aventador issubclass Car: ", issubclass(Aventador, Car))
print("Lamborghini issubclass Car: ", issubclass(Lamborghini, Car))
print("Car issubclass Lamborghini: ", issubclass(Car, Lamborghini))
print("Car issubclass Aventador: ", issubclass(Car, Aventador))
# TODO: Try coming up with a multiple inheritance hierarchy. What happens if two of the parent classes contain a
# method with the same name or a variable with the same name?
|
def fun():
print("hello yanlp")
|
import pysc2
from pysc2.env import sc2_env
from pysc2.lib import features
from absl import app
import time
import tensorflow as tf
import numpy as np
import threading
import os
import pickle
import parameters_default
import parameters_custom
from reinforcement_learning.networks import AC_Network
from reinforcement_learning.workers import AC_Worker
from reinforcement_learning.tensorflow_functions import build_histo_summary
from exec_functions import clean_sc2_temp_folder, build_path, load_map_config
from context import tmp_maps_path
MAP_TO_TRAIN = "MoveToBeacon" #DefeatBanelings
custom_params = True
restore = True
MAP_TO_RESTORE = "MoveToBeacon"
restored_policy_type = 'a3c' #Choose between ['a3c', 'random']
params, training_path = load_map_config(MAP_TO_TRAIN, custom_params, restore, MAP_TO_RESTORE, restored_policy_type)
dict_workers_gpu = {1:0.25, 2:0.35, 3:0.45, 4:0.55, 5:0.65, 6:0.75}
session_config = tf.ConfigProto(device_count = {'GPU': 1}, #On CPU => 'GPU': 0||On GPU => 'GPU': 1
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=dict_workers_gpu[params['n_workers']]))
sess = tf.Session(config=session_config)
master_network = AC_Network(scope='global', dict_params=params['network_params'])
saver = tf.train.Saver(max_to_keep=100)
if restore:
saver.restore(sess, training_path+"sessions\\model_episode_10.cptk")
def main(unused_argv):
agents = []
for process in range(params['n_workers']):
agent = AC_Worker(id=process, session=sess, map_name=MAP_TO_TRAIN, restore=restore, dict_params=params['worker_params'], dict_network_params=params['network_params'])
agent.episode = 0
agents.append(agent)
threads = []
for thread_id in range(params['n_workers']):
print("Starting worker_%s"%thread_id)
t = threading.Thread(target=run_thread, args=(agents[thread_id], MAP_TO_TRAIN))
threads.append(t)
t.daemon = True
t.start()
time.sleep(2)
for t in threads:
t.join()
def run_thread(agent, map_name):
while True:
try:
print("\nStarting episode %s for agent %s ..."%(agent.episode, agent.id))
clean_sc2_temp_folder(tmp_maps_path, 8, 90)
agent.rollouts_manager.empty_dict_rollouts()
agent.episode_values = []
agent.episode_cumulated_reward = 0
agent.episode_step_count = 0
agent.current_episode_actions = []
agent.current_episode_rewards = []
agent.current_episode_values = []
L_players = [sc2_env.Agent(sc2_env.Race.terran)]
with sc2_env.SC2Env(map_name=map_name, players=L_players,
agent_interface_format=features.AgentInterfaceFormat(
feature_dimensions=features.Dimensions(screen=params['resolution'], minimap=params['resolution']),
use_feature_units=True), step_mul=params['step_mul'],
game_steps_per_episode=0, visualize=False, disable_fog=True
) as env:
agent.setup(env.observation_spec(), env.action_spec())
timesteps = env.reset()
agent.reset()
global start_time
start_time = time.time()
while True:
step_actions = [agent.step(timesteps[0])]
if timesteps[0].last():
break
timesteps = env.step(step_actions)
print("\nEpisode over for agent %s ..."% agent.id)
#Summary parameters :
available_actions_ratio = len(agent.current_episode_unique_actions)/len(agent.current_episode_available_actions)
summary = tf.Summary()
summary.value.add(tag='Perf/1_Reward', simple_value=float(agent.episode_cumulated_reward))
summary.value.add(tag='Perf/2_Distinct actions', simple_value=float(len(agent.current_episode_unique_actions)))
summary.value.add(tag='Perf/3_Average advantage', simple_value=float(np.mean(agent.advantages)))
summary.value.add(tag='Perf/4_Previous actions ratio', simple_value=float(agent.previous_actions_ratio))
summary.value.add(tag='Perf/5_Average value', simple_value=float(agent.average_value))
summary.value.add(tag='Perf/6_Available actions ratio', simple_value=float(available_actions_ratio))
summary.value.add(tag='Perf/7_Average agent return', simple_value=float(np.mean(agent.agent_return)))
summary.value.add(tag='Perf/8_Random policy', simple_value=float(agent.random_policy))
summary.value.add(tag='Perf/9_Episode length', simple_value=float(agent.current_episode_step_count))
summary.value.add(tag='Losses/1_Value loss', simple_value=float(agent.value_loss))
summary.value.add(tag='Losses/2_Policy loss', simple_value=float(agent.global_policy_loss))
summary.value.add(tag='Losses/3_Entropy loss', simple_value=float(agent.entropy))
summary.value.add(tag='Losses/4_Network loss', simple_value=float(agent.network_loss))
#summary.value.add(tag='Losses/5_Grad norm', simple_value=float(agent.grad_norms))
#summary.value.add(tag='Losses/6_Var norm', simple_value=float(agent.var_norms))
for label in agent.dict_policy.keys():
policy = agent.dict_policy[label][0]
policy_len = len(policy)
indexed_label = agent.index_label(label)+' | (%s)'%policy_len
summary.value.add(tag=indexed_label, histo=build_histo_summary(policy, policy_len))
agent.summary_writer.add_summary(summary, agent.episode)
agent.summary_writer.flush()
if agent.episode > 0 and agent.episode % 20 == 0 :
session_path = training_path+"sessions\\model_episode_%s.cptk"%(str(agent.episode))
build_path(session_path)
saver.save(sess, session_path)
print("\nModel saved")
agent.episode+=1
except KeyboardInterrupt:
break
except pysc2.lib.remote_controller.RequestError:
print("\n\npysc2.lib.remote_controller.RequestError for worker %s\n\n"%agent.name)
env.close()
print("\n\nenvironment closed for worker %s\n\n"%agent.name)
time.sleep(2)
pass
except pysc2.lib.remote_controller.ConnectError:
print()
except pysc2.lib.protocol.ConnectionError:
print("\n\npysc2.lib.protocol.ConnectionError for worker %s\n\n"%agent.name)
#Picked from "https://github.com/inoryy/reaver-pysc2/blob/master/reaver/envs/sc2.py#L57-L69"
# hacky fix from websocket timeout issue...
# this results in faulty reward signals, but I guess it beats completely crashing...
env.close()
if __name__ == "__main__":
app.run(main)
|
from flask import Flask, render_template, request, redirect, flash, session
app = Flask(__name__)
app.secret_key = 'Macbook'
# our index route will handle rendering our form
@app.route('/')
def index():
return render_template("index.html")
# this route will handle our form submission
# notice how we defined which HTTP methods are allowed by this route
@app.route('/result', methods=['POST'])
def result():
name = request.form['name']
location = request.form['location']
favLanguage = request.form['favLanguage']
comment = request.form['comment']
if len(name) < 2:
flash('Name needs to be at least 2 characters.')
if len(comment) > 120 or len(comment) < 1:
flash('Comment needs to be filled in and have less than 120 characters.')
if '_flashes' in session:
return redirect('/')
return render_template('result.html', name=name, location=location, favLanguage = favLanguage, comment=comment)
@app.route('/clear')
def clear():
return redirect('')
app.run(debug=True) # run our server
|
Your input
4
Output
2
Expected
2
Your input
8
Output
2
Expected
2 |
"""
A simple Monte Carlo solver for Nim
http://en.wikipedia.org/wiki/Nim#The_21_game
"""
import random
try:
import codeskulptor
except ImportError:
import SimpleGUICS2Pygame.codeskulptor as codeskulptor
codeskulptor.set_timeout(20)
MAX_REMOVE = 3
TRIALS = 10000
def evaluate_position(num_items):
"""
Monte Carlo evalation method for Nim
"""
# Insert your code here
best_percentage = 0.0
best_move = 0
for first_move in range(1, MAX_REMOVE + 1):
wins = 0
for _ in range(TRIALS):
total = first_move
win = True
while total < num_items:
total += random.randrange(1, MAX_REMOVE + 1)
# alternate between computer and player
win = not win
if win:
wins += 1
current_percentage = float(wins) / TRIALS
if current_percentage > best_percentage:
best_percentage = current_percentage
best_move = first_move
return best_move
def play_game(start_items):
"""
Play game of Nim against Monte Carlo bot
"""
current_items = start_items
print "Starting game with value", current_items
while True:
comp_move = evaluate_position(current_items)
current_items -= comp_move
print "Computer choose", comp_move, ", current value is", current_items
if current_items <= 0:
print "Computer wins"
break
player_move = int(input("Enter your current move"))
current_items -= player_move
print "Player choose", player_move, ", current value is", current_items
if current_items <= 0:
print "Player wins"
break
play_game(21)
|
from PyQt5 import QtCore
from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QPushButton, QLabel, QApplication, QLineEdit)
from PyQt5.QtGui import (QFont, QPixmap)
class EntryWindow(QWidget):
def __init__(self):
super().__init__()
self.init_gui()
def init_gui(self):
font = QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(10)
# lock image settings:
self.image_label = QLabel(self)
self.pix = QPixmap("images/lock.png")
self.image_label.setPixmap(self.pix)
self.image_label.setAlignment(QtCore.Qt.AlignHCenter)
# entry label settings:
self.entry_label = QLabel("Введите пароль программы:")
self.entry_label.setFont(font)
self.entry_label.setAlignment(QtCore.Qt.AlignHCenter)
# verify label settings:
self.verify_label = QLabel("Повторите пароль:")
self.verify_label.setFont(font)
self.verify_label.setAlignment(QtCore.Qt.AlignHCenter)
# entry password settings:
self.entry_entry = QLineEdit()
self.entry_entry.resize(261, 21)
self.entry_entry.setAlignment(QtCore.Qt.AlignHCenter)
# verify password settings:
self.verify_entry = QLineEdit()
self.verify_entry.setAlignment(QtCore.Qt.AlignHCenter)
# button settings:
self.entry_button = QPushButton("Начать работу")
# warning label settings:
self.entry_warning = QLabel("Пароли не совпадают!")
self.entry_warning.setFont(font)
vbox = QVBoxLayout()
vbox.addWidget(self.image_label)
vbox.addWidget(self.entry_label)
vbox.addWidget(self.entry_entry)
vbox.addWidget(self.verify_label)
vbox.addWidget(self.verify_entry)
vbox.addWidget(self.entry_button)
self.setLayout(vbox)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
window = EntryWindow()
window.resize(480, 340)
window.show()
sys.exit(app.exec_())
|
# --------------------------------------------------------------------
import os
import functools
# --------------------------------------------------------------------
# Lambda functions
def myfunc (n):
print ("n: ", n)
return lambda i: i * n # i is the parameter
doubler = myfunc (2) # Creates 2 functions (basically function pointers)
tripler = myfunc (3)
val = 11
print ("Doubled: " + str (doubler (val)) + ". Tripled: " + str (tripler (val))) # Call the function via the func pointer
squared = lambda i: i ** 2
print (squared (4))
# --------------------------------------------------------------------
# Map
numlist = [2, 4, 5, 7, 11, 14]
squaredlist = list (map (squared, numlist)) # Map applies the function to each element of the list. More than one list is possible. They are synchronized
squaredlist2 = list (map (lambda i: i ** 2, numlist)) # No need to define a function separately. Noname, throwaway function
for i in squaredlist:
print (i)
print ("")
for i in squaredlist2:
print (i)
# --------------------------------------------------------------------
# Filter
print ("")
divbytwo = list (filter (lambda num: int (num / 2) * 2 == num, numlist)) # Returns those elements for which the function returns true
for i in divbytwo:
print (i)
# --------------------------------------------------------------------
# Reduce
print ("")
maxval = functools.reduce (lambda a, b: a if a > b else b, numlist) # Reduce moved to functools module. Operates on first 2 then on result and 3rd, result and 4th, etc.
print ("Maxval: ", maxval)
# --------------------------------------------------------------------
os.system ("pause")
|
from . import views
from django.urls import path
from django.contrib.auth import views as auth_views
app_name = 'clients'
urlpatterns = [
path('index', views.index, name='index'),
path('<int:id>/',views.details, name='details'),
path('e',views.empindex,name='e'),
path('e/<int:id>/',views.empdetail,name='empdetail'),
path('company/',views.company,name='company_html'),
path('company1/<int:id>/',views.company1,name='company'),
path('emp/<int:id>',views.employeeedit,name='employeeedit'),
path('delete1/<int:id>',views.delete_empview,name='delete'),
path('delete/<int:id>',views.deleteview,name='delete12'),
path('p/<int:id>',views.projectindex,name='p'),
path('proj/<int:id>',views.projectCreate.as_view(),name='project'),
path('proje/<int:id>',views.projectdetails,name='projectdetails'),
path('project/<pk>/',views.projectedit.as_view(),name='projectedit'),
path('project1/<pk>',views.projectdelete.as_view(),name='projectdelete'),
path('pm',views.projectmodindex,name='pm'),
path('pm/<int:id>',views.projectmoddetails,name='projectmoddetail'),
path('projm/',views.addprojectmod,name='addprojectmod'),
path('pm/edit/<int:id>',views.projectmodedit,name='editprojectmod'),
path('pm/delete/<int:id>',views.projectmoddelete,name='deleteprojectmod'),
path('register/',views.UserFormView.as_view()),
path('login/', views.auth_login,name='login'),
path('logout/', auth_views.logout,{'template_name':'clients/logout.html'},name='logout'),
] |
# -*- coding: utf-8 -*-
import math
import numpy as np
import chainer, os, collections, six, math, random, time, copy
from chainer import cuda, Variable, optimizers, serializers, function, optimizer, initializers
from chainer.utils import type_check
from chainer import functions as F
from chainer import links as L
from softplus import softplus
from params import Params
import sequential
class Object(object):
pass
def to_object(dict):
obj = Object()
for key, value in dict.iteritems():
setattr(obj, key, value)
return obj
class EnergyModelParams(Params):
def __init__(self):
self.ndim_input = 28 * 28
self.ndim_output = 10
self.num_experts = 128
self.weight_init_std = 1
self.weight_initializer = "Normal" # Normal or GlorotNormal or HeNormal
self.nonlinearity = "elu"
self.optimizer = "Adam"
self.learning_rate = 0.001
self.momentum = 0.5
self.gradient_clipping = 10
self.weight_decay = 0
class GenerativeModelParams(Params):
def __init__(self):
self.ndim_input = 10
self.ndim_output = 28 * 28
self.distribution_output = "universal" # universal or sigmoid or tanh
self.weight_init_std = 1
self.weight_initializer = "Normal" # Normal or GlorotNormal or HeNormal
self.nonlinearity = "relu"
self.optimizer = "Adam"
self.learning_rate = 0.001
self.momentum = 0.5
self.gradient_clipping = 10
self.weight_decay = 0
class DDGM():
def __init__(self, params_energy_model, params_generative_model):
self.params_energy_model = copy.deepcopy(params_energy_model)
self.config_energy_model = to_object(params_energy_model["config"])
self.params_generative_model = copy.deepcopy(params_generative_model)
self.config_generative_model = to_object(params_generative_model["config"])
self.build_network()
self._gpu = False
def build_network(self):
self.build_energy_model()
self.build_generative_model()
def build_energy_model(self):
params = self.params_energy_model
self.energy_model = DeepEnergyModel()
self.energy_model.add_feature_extractor(sequential.from_dict(params["feature_extractor"]))
self.energy_model.add_experts(sequential.from_dict(params["experts"]))
self.energy_model.add_b(sequential.from_dict(params["b"]))
config = self.config_energy_model
self.energy_model.setup_optimizers(config.optimizer, config.learning_rate, config.momentum, config.weight_decay, config.gradient_clipping)
def build_generative_model(self):
params = self.params_generative_model
self.generative_model = DeepGenerativeModel()
self.generative_model.add_sequence(sequential.from_dict(params["model"]))
config = self.config_generative_model
self.generative_model.setup_optimizers(config.optimizer, config.learning_rate, config.momentum, config.weight_decay, config.gradient_clipping)
def to_gpu(self):
self.energy_model.to_gpu()
self.generative_model.to_gpu()
self._gpu = True
@property
def gpu_enabled(self):
if cuda.available is False:
return False
return self._gpu
@property
def xp(self):
if self.gpu_enabled:
return cuda.cupy
return np
def to_variable(self, x):
if isinstance(x, Variable) == False:
x = Variable(x)
if self.gpu_enabled:
x.to_gpu()
return x
def to_numpy(self, x):
if isinstance(x, Variable) == True:
x.to_cpu()
x = x.data
if isinstance(x, cuda.ndarray) == True:
x = cuda.to_cpu(x)
return x
def get_batchsize(self, x):
if isinstance(x, Variable):
return x.data.shape[0]
return x.shape[0]
def zero_grads(self):
self.optimizer_energy_model.zero_grads()
self.optimizer_generative_model.zero_grads()
# returns energy and product of experts
def compute_energy(self, x_batch, test=False):
x_batch = self.to_variable(x_batch)
return self.energy_model(x_batch, test=test)
def compute_energy_sum(self, x_batch, test=False):
energy, experts = self.compute_energy(x_batch, test)
energy = F.sum(energy) / self.get_batchsize(x_batch)
return energy
def compute_entropy(self):
return self.generative_model.compute_entropy()
def sample_z(self, batchsize=1):
config = self.config_generative_model
ndim_z = config.ndim_input
# uniform
z_batch = np.random.uniform(-1, 1, (batchsize, ndim_z)).astype(np.float32)
# gaussian
# z_batch = np.random.normal(0, 1, (batchsize, ndim_z)).astype(np.float32)
return z_batch
def generate_x(self, batchsize=1, test=False, as_numpy=False):
return self.generate_x_from_z(self.sample_z(batchsize), test=test, as_numpy=as_numpy)
def generate_x_from_z(self, z_batch, test=False, as_numpy=False):
z_batch = self.to_variable(z_batch)
x_batch = self.generative_model(z_batch, test=test)
if as_numpy:
return self.to_numpy(x_batch)
return x_batch
def backprop_energy_model(self, loss):
self.energy_model.backprop(loss)
def backprop_generative_model(self, loss):
self.generative_model.backprop(loss)
def compute_kld_between_generator_and_energy_model(self, x_batch_negative):
energy_negative, experts_negative = self.compute_energy(x_batch_negative)
entropy = self.generative_model.compute_entropy()
return F.sum(energy_negative) / self.get_batchsize(x_batch_negative) - entropy
def load(self, dir=None):
if dir is None:
raise Exception()
self.energy_model.load(dir + "/energy_model.hdf5")
self.generative_model.load(dir + "/generative_model.hdf5")
def save(self, dir=None):
if dir is None:
raise Exception()
try:
os.mkdir(dir)
except:
pass
self.energy_model.save(dir + "/energy_model.hdf5")
self.generative_model.save(dir + "/generative_model.hdf5")
class DeepGenerativeModel(sequential.chain.Chain):
def compute_entropy(self):
entropy = 0
for i, link in enumerate(self.sequence.links):
if isinstance(link, L.BatchNormalization):
entropy += F.sum(F.log(2 * math.e * math.pi * link.gamma ** 2 + 1e-8) / 2)
return entropy
def __call__(self, z, test=False):
return self.sequence(z, test=test)
class DeepEnergyModel(sequential.chain.Chain):
def add_feature_extractor(self, sequence):
self.add_sequence_with_name(sequence, "feature_extractor")
self.feature_extractor = sequence
def add_experts(self, sequence):
self.add_sequence_with_name(sequence, "experts")
self.experts = sequence
def add_b(self, sequence):
self.add_sequence_with_name(sequence, "b")
self.b = sequence
def compute_energy(self, x, features):
experts = self.experts(features)
# avoid overflow
# -log(1 + exp(x)) = -max(0, x) - log(1 + exp(-|x|)) = -softplus
product_of_experts = -softplus(experts)
sigma = 1.0
if x.data.ndim == 4:
batchsize = x.data.shape[0]
_x = F.reshape(x, (batchsize, -1))
energy = F.sum(_x * _x, axis=1) / sigma - F.reshape(self.b(x), (-1,)) + F.sum(product_of_experts, axis=1)
else:
energy = F.sum(x * x, axis=1) / sigma - F.reshape(self.b(x), (-1,)) + F.sum(product_of_experts, axis=1)
return energy, product_of_experts
def __call__(self, x, test=False):
self.test = test
features = self.feature_extractor(x, test=test)
energy, product_of_experts = self.compute_energy(x, features)
return energy, product_of_experts |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 16:37:59 2013
bpath1
This is a direct re-implementation of Des Higham's SDE scripts. First up
is the Brownian path simulation
@author: ih3
"""
import numpy as np
T = 1.0 # End time
N = 500 # Number of steps
dt = T / N
W = np.zeros(N)
dW = np.zeros(N)
t = np.linspace(0,T,N)
dW[0] = np.random.randn()
W[0] = dW[0]
for j in range(1,N):
dW[j] = np.sqrt(dt) * np.random.randn()
W[j] = W[j-1] + dW[j]
import matplotlib.pyplot as plt
plt.plot(t,W)
plt.xlabel('t')
plt.ylabel('W(t)')
plt.show()
|
#-*- coding:utf8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import re
import hashlib
import inspect
import copy
import time
import datetime
import json
import urllib
import urllib2
from django.conf import settings
from django.core.cache import cache
from shopapp.weixin.models import WeiXinAccount
from common.utils import (randomString,
update_model_fields,
randomString,
getSignatureWeixin,
process_lock)
REFRESH_WX_TOKEN_CACHE_KEY = 'REFRESH_WX_TOKEN_KEY'
class WeiXinRequestException(Exception):
def __init__(self,code=None,msg=None):
self.code = code
self.message = msg
def __str__(self):
return u'微信API错误:(%s,%s)'%(str(self.code),self.message)
class WeiXinAPI(object):
_token_uri = "/cgi-bin/token"
_user_info_uri = "/cgi-bin/user/info"
_create_groups_uri = "/cgi-bin/groups/create"
_get_grounps_uri = "/cgi-bin/groups/get"
_get_user_group_uri = "cgi-bin/groups/getid"
_update_group_uri = "/cgi-bin/groups/update"
_update_group_member_uri = "/cgi-bin/groups/members/update"
_get_user_info_uri = "/cgi-bin/user/info"
_get_followers_uri = "/cgi-bin/user/get"
_create_menu_uri = "/cgi-bin/menu/create"
_get_menu_uri = "/cgi-bin/menu/get"
_detele_menu_uri = "/cgi-bin/menu/delete"
_create_qrcode_uri = "/cgi-bin/qrcode/create"
_media_get_uri = "/cgi-bin/media/get"
_js_ticket_uri = "/cgi-bin/ticket/getticket"
#微信小店接口
_merchant_get_uri = "/merchant/get"
_merchant_getbystatus_uri = "/merchant/getbystatus"
_merchant_stock_add_uri = "/merchant/stock/add"
_merchant_stock_reduce_uri = "/merchant/stock/reduce"
_merchant_order_getbyid_uri = "/merchant/order/getbyid"
_merchant_order_getbyfilter_uri = "/merchant/order/getbyfilter"
_merchant_order_setdelivery_uri = "/merchant/order/setdelivery"
_merchant_modproductstatus_uri = "/merchant/modproductstatus"
_merchant_category_getsku_uri = "/merchant/category/getsku"
#微信原生支付URL
_native_url = "weixin://wxpay/bizpayurl"
_deliver_notify_url = "/pay/delivernotify"
def __init__(self):
self._wx_account = WeiXinAccount.getAccountInstance()
def getAccountId(self):
if self._wx_account.isNone():
return None
return self._wx_account.account_id
def getAbsoluteUrl(self,uri,token):
url = settings.WEIXIN_API_HOST + uri
return token and '%s?access_token=%s'%(url,self.getAccessToken()) or url+'?'
def checkSignature(self,signature,timestamp,nonce):
import time
import hashlib
if time.time() - int(timestamp) > 300:
return False
sign_array = [self._wx_account.token,timestamp,nonce]
sign_array.sort()
sha1_value = hashlib.sha1(''.join(sign_array))
return sha1_value.hexdigest() == signature
def handleRequest(self,uri,params={},method="GET",token=True):
absolute_url = self.getAbsoluteUrl(uri,token)
if method.upper() == 'GET':
url = '%s&%s'%(absolute_url,urllib.urlencode(params))
req = urllib2.urlopen(url)
resp = req.read()
else:
rst = urllib2.Request(absolute_url)
req = urllib2.urlopen(rst,type(params)==dict and
urllib.urlencode(params) or params)
resp = req.read()
content = json.loads(resp,strict=False)
if content.has_key('errcode') and content['errcode'] != 0:
raise WeiXinRequestException(content['errcode'],content['errmsg'])
return content
@process_lock
def refresh_token(self):
if not self._wx_account.isExpired():
return self._wx_account.access_token
params = {'grant_type':'client_credential',
'appid':self._wx_account.app_id,
'secret':self._wx_account.app_secret}
content = self.handleRequest(self._token_uri, params,token=False)
self._wx_account.access_token = content['access_token']
self._wx_account.expired = datetime.datetime.now()
self._wx_account.expires_in = content['expires_in']
update_model_fields(self._wx_account,
update_fields=['access_token','expired','expired_in'])
return content['access_token']
def getAccessToken(self):
if not self._wx_account.isExpired():
return self._wx_account.access_token
return self.refresh_token()
def getCustomerInfo(self,openid,lang='zh_CN'):
return self.handleRequest(self._user_info_uri, {'openid':openid,'lang':lang})
def createGroups(self,name):
name = type(name)==unicode and name.encode('utf8') and name
return self.handleRequest(self._create_groups_uri, {'name':name}, method='POST')
def getGroups(self):
return self.handleRequest(self._get_groups_uri)
def getUserGroupById(self,openid):
return self.handleRequest(self._get_user_group_uri, {'openid':openid}, method='POST')
def updateGroupName(self,id,name):
name = type(name)==unicode and name.encode('utf8') and name
return self.handleRequest(self._update_group_uri, {'id':id,'name':name}, method='POST')
def updateGroupMember(self,openid,to_groupid):
return self.handleRequest(self._update_group_member_uri, {'openid':openid,
'to_groupid':to_groupid},
method='POST')
def getUserInfo(self,openid):
return self.handleRequest(self._get_user_info_uri, {'openid':openid},method='GET')
def getFollowersID(self,next_openid=''):
return self.handleRequest(self._get_followers_uri, {'next_openid':next_openid},
method='GET')
def createMenu(self,params):
assert type(params) == dict
jmenu = json.dumps(params,ensure_ascii=False)
return self.handleRequest(self._create_menu_uri, str(jmenu), method='POST')
def getMenu(self):
return self.handleRequest(self._get_menu_uri, {},method='GET')
def deleteMenu(self):
return self.handleRequest(self._detele_menu_uri, {},method='GET')
def createQRcode(self,action_name,action_info,scene_id,expire_seconds=0):
action_name = (type(action_name)==unicode and
action_name.encode('utf8') and
action_name)
params = {"action_name": action_name ,
"action_info": {"scene": {"scene_id": scene_id}}}
if action_name=='QR_SCENE':
params.update(expire_seconds=expire_seconds)
return self.handleRequest(self._create_qrcode_uri,
params,method='POST')
def getMerchant(self,product_id):
params = json.dumps({'product_id':product_id})
response = self.handleRequest(self._merchant_get_uri,
str(params),
method='POST')
return response['product_info']
def getMerchantByStatus(self,status):
params = json.dumps({'status':status},
ensure_ascii=False)
response = self.handleRequest(self._merchant_getbystatus_uri,
str(params),
method='POST')
return response['products_info']
def modMerchantProductStatus(self,product_id,status):
params = json.dumps({'product_id':product_id,'status':status},
ensure_ascii=False)
response = self.handleRequest(self._merchant_modproductstatus_uri,
str(params),
method='POST')
return response
def addMerchantStock(self,product_id,quantity,sku_info=''):
params = json.dumps({'product_id':product_id,
'quantity':quantity,
'sku_info':sku_info},
ensure_ascii=False)
return self.handleRequest(self._merchant_stock_add_uri,
str(params),
method='POST')
def reduceMerchantStock(self,product_id,quantity,sku_info=''):
params = json.dumps({'product_id':product_id,
'quantity':quantity,
'sku_info':sku_info},
ensure_ascii=False)
return self.handleRequest(self._merchant_stock_reduce_uri,
str(params),
method='POST')
def getOrderById(self,order_id):
params = json.dumps({'order_id':str(order_id)},ensure_ascii=False)
response = self.handleRequest(self._merchant_order_getbyid_uri,
str(params),
method='POST')
return response['order']
def getOrderByFilter(self,status=None,begintime=None,endtime=None):
params = {}
if status:
params.update(status=status)
if begintime:
params.update(begintime=begintime)
if endtime:
params.update(endtime=endtime)
params_str = json.dumps(params,
ensure_ascii=False)
response = self.handleRequest(self._merchant_order_getbyfilter_uri,
str(params_str),
method='POST')
return response['order_list']
def getSkuByCategory(self,cate_id=None):
params = {'cate_id':cate_id}
params_str = json.dumps(params,
ensure_ascii=False)
response = self.handleRequest(self._merchant_category_getsku_uri,
str(params_str),
method='POST')
return response['sku_table']
def deliveryOrder(self,order_id,delivery_company,delivery_track_no,need_delivery=1,is_others=0):
params = json.dumps({'order_id':order_id,
'delivery_company':delivery_company,
'delivery_track_no':delivery_track_no,
'is_others':is_others,
'need_delivery':need_delivery},
ensure_ascii=False)
return self.handleRequest(self._merchant_order_setdelivery_uri,
str(params),
method='POST')
def deliverNotify(self,open_id,trans_id,out_trade_no,
deliver_status=1,deliver_msg="ok"):
params = {"appid":self._wx_account.app_id,
"appkey":self._wx_account.pay_sign_key,
"openid":open_id,
"transid":trans_id,
"out_trade_no":out_trade_no,
"deliver_timestamp":"%.0f"%time.time(),
"deliver_status":deliver_status,
"deliver_msg":deliver_msg}
params['app_signature'] = getSignatureWeixin(params)
params['sign_method'] = 'sha1'
params.pop('appkey')
return self.handleRequest(self._deliver_notify_url,
str(json.dumps(params)),
method='POST')
def getJSTicket(self):
if not self._wx_account.isJSTicketExpired():
return self._wx_account.js_ticket
return self.refreshJSTicket()
def refreshJSTicket(self):
if not self._wx_account.isJSTicketExpired():
return self._wx_account.js_ticket
js_url = self.getAbsoluteUrl(self._js_ticket_uri, self.getAccessToken())+'&type=jsapi'
req = urllib2.urlopen(js_url)
content = json.loads(req.read())
self._wx_account.js_ticket = content['ticket']
self._wx_account.js_expired = datetime.datetime.now()
update_model_fields(self._wx_account,
update_fields=['js_ticket','js_expired'])
return content['ticket']
def getShareSignParams(self,share_url):
sign_params = {"noncestr":randomString(),
"jsapi_ticket":self.getJSTicket(),
"timestamp":int(time.time()),
"url":share_url }
key_pairs = ["%s=%s"%(k,v) for k,v in sign_params.iteritems()]
key_pairs.sort()
sign_params['signature'] = hashlib.sha1('&'.join(key_pairs)).hexdigest()
sign_params['app_id'] = self._wx_account.app_id
return sign_params
def genNativeSignParams(self,product_id):
signString = {'appid':self._wx_account.app_id,
'timestamp':str(int(time.time())),
'noncestr':randomString(),
'productid':str(product_id),
'appkey':self._wx_account.app_secret
}
signString.update(sign, getSignatureWeixin(signString))
signString.pop('appkey')
return signString
def genPaySignParams(self,package):
signString = {'appid':self._wx_account.app_id,
'timestamp':str(int(time.time())),
'noncestr':randomString(),
'package':package,
'appkey':self._wx_account.pay_sign_key
}
signString.update(sign, getSignatureWeixin(signString))
signString.pop('appkey')
return signString
def genPackageSignParams(self,package):
return
def getMediaDownloadUrl(self,media_id):
return '%s%s?access_token=%s&media_id=%s'%(settings.WEIXIN_MEDIA_HOST,
self._media_get_uri,
self.getAccessToken(),
media_id)
|
#!/usr/bin/python3
"""Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
"""Tests the function max_integer for correct output
"""
def test_max_at_the_end(self):
"""Tests all positive numbers
"""
self.assertEqual(max_integer([1, 2, 3, 4]), 4)
def test_one_negative_number(self):
"""Test when there is a negative number in the list
"""
self.assertEqual(max_integer([1, 2, -1, 3, 4]), 4)
def test_only_negatives(self):
"""Tests a list of only negative numbers in the list
"""
self.assertEqual(max_integer([-4, -3, -2, -1]), -1)
def test_zero(self):
"""Tests a list size of 0
"""
self.assertEqual(max_integer([]), None)
def test_positive_and_negative(self):
"""Test a list with positive and negative numbers
"""
self.assertEqual(max_integer([1, 6, 100, 4, 0, -1, 10]), 100)
def test_max_in_the_middle(self):
"""Test when max is in the middle
"""
self.assertEqual(max_integer([2, 3, 6, 4, 5]), 6)
def test_max_at_the_beginning(self):
"""Test with max at the beginning of list
"""
self.assertEquals(max_integer([4, 3, 2, 1]), 4)
def test_list_with_one_element(self):
"""Test with a list that only has 1 element
"""
self.assertEqual(max_integer([1]), 1)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
from rest_framework import permissions, viewsets
from rest_framework.response import Response
from posts.models import Post
from posts.permissions import IsAuthorOfPost
from posts.serializers import PostSerializer
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.order_by('-created_at')
serializer_class = PostSerializer
def get_permissions(self):
if self.request.method in permissions.SAFE_METHODS:
return (permissions.AllowAny(),)
return (permissions.IsAuthenticated(), IsAuthorOfPost(),)
def perform_create(self, serializer):
"""
perform_create se llama antes de que se guarda el modelo de este punto de vista.
Nosotros simplemente agarrar el usuario asociado
a esta solicitud y les hacemos el autor de este post.
"""
instance = serializer.save(author=self.request.user)
return super(PostViewSet, self).perform_create(serializer)
class AccountPostsViewSet(viewsets.ViewSet):
"""
Este viewset se utiliza para mostrar los mensajes asociados a una cuenta específica.
"""
queryset = Post.objects.select_related('author').all()
serializer_class = PostSerializer
def list(self, request, account_username=None):
queryset = self.queryset.filter(author__username=account_username)
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
|
from pathlib import Path
class Bankomat:
def __init__(self, lokacija):
self.lokacija = lokacija
self.stanje = 1000
if Path(str(self.lokacija) + ".txt").exists():
with open(str(self.lokacija) + ".txt") as dat:
for vrstica in dat:
self.stanje = int(vrstica.split(",")[-1][: -3])
def __str__(self):
return "Bankomat, ki se nahaja na lokaciji: {}".format(self.lokacija)
def __repr__(self):
return "Bankomat {}".format(self.lokacija)
def polnjenje(self, znesek):
self.stanje += znesek
def dvig(self, racun, znesek):
if 0 <= znesek <= self.stanje:
self.stanje -= znesek
with open(str(self.lokacija) + ".txt" , "a") as dat:
print("dvig,{},{},{}".format(racun, znesek, self.stanje), file=dat)
return True
else:
return False
def polog(self, racun, znesek):
if znesek >= 0:
self.stanje += znesek
with open(str(self.lokacija) + ".txt", "a") as dat:
print("polog,{},{},{}".format(racun, znesek, self.stanje), file=dat)
return True
else:
return False
|
import pandas as pd
ds1 = pd.read_csv("VaccineData.csv")
ds2 = pd.read_csv("VaccineData2.csv")
ds3 = pd.read_csv("VaccineData3.csv")
ds4 = pd.read_csv("VaccineData4.csv")
ds5 = pd.read_csv("VaccineData5.csv")
ds6 = pd.read_csv("VaccineData6.csv")
ds7 = pd.read_csv("VaccineData7.csv")
ds8 = pd.read_csv("VaccineData8.csv")
ds9 = pd.read_csv("VaccineData9.csv")
ds10 = pd.read_csv("VaccineData10.csv")
ds11 = pd.read_csv("VaccineData11.csv")
ds12 = pd.read_csv("VaccineData12.csv")
ds13 = pd.read_csv("VaccineData13.csv")
ds14 = pd.read_csv("VaccineData14.csv")
ds15 = pd.read_csv("VaccineData15.csv")
dataset = pd.concat([ds1, ds2])
dataset = pd.concat([dataset, ds3])
dataset = pd.concat([dataset, ds4])
dataset = pd.concat([dataset, ds5])
dataset = pd.concat([dataset, ds6])
dataset = pd.concat([dataset, ds7])
dataset = pd.concat([dataset, ds8])
dataset = pd.concat([dataset, ds9])
dataset = pd.concat([dataset, ds10])
dataset = pd.concat([dataset, ds11])
dataset = pd.concat([dataset, ds12])
dataset = pd.concat([dataset, ds13])
dataset = pd.concat([dataset, ds14])
dataset = pd.concat([dataset, ds15])
dataset.to_csv('AllVaccineData.csv')
|
i = 5
print(i) |
def minion_game(string):
words = list(string)
vowels = ('A', 'E', 'I', 'O', 'U')
stuart = 0
kevin = 0
for i in range(len(words)):
if (words[i:i + 1][0] in vowels):
kevin = kevin + len(words) - i
else:
stuart = stuart + len(words) - i
if (stuart == kevin):
print('Draw')
elif (stuart > kevin):
print('Stuart', stuart)
else:
print('Kevin', kevin)
|
d={}
with open('nyc_weather.csv','r') as f:
for line in f:
tokens=line.split(',')
day=tokens[0]
try:
temp=int(tokens[1])
d[day]=temp
except:
print('invalid temperature, ignore line')
print(f'the temperature in jan 9 was {d["Jan 9"]}')
print(f'the temperature in jan 4 was {d["Jan 4"]}') |
'''
Created on Nov 17, 2015
@author: Jonathan
'''
def bags(strength, food):
numBags = 0
for type in set(food):
if food.count(type) % strength == 0:
numBags += food.count(type) / strength
else:
numBags += food.count(type) / strength + 1
return numBags
if __name__ == '__main__':
pass |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('member-directory/', views.directory_of_members, name='member-directory'),
path('current-sponsors/', views.current_sponsors, name='current-sponsors'),
path('resources/', views.resources_page, name='resources'),
path('who-are-we/', views.who_are_we, name='who-are-we'),
path('<page_path>/', views.page, name='pages'),
]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from flask import request, g, make_response, jsonify
from . import Resource
from .. import schemas
import ast
class Timelots(Resource):
def get(self):
result = []
with open('dentists.txt', 'r') as f:
for line in f:
if not line.strip():
continue
result.append(line)
f.close()
s = []
for e in result:
e = ast.literal_eval(e)
if e['status']== 'available':
s.append(e)
print(s)
return s, 200, None
|
# -*- coding: utf-8 -*-
'''
Created on 8 Jul 2015
@author: motasim
Script that loads the MNIST dataset as numpy arrays where the pixels are normalised to be between [0, 1].
If the data is not available it will attempt to download it.
There are 3 classes, all of which load the same data but returns them in different formats:
1. MNIST: returns the dataset as pure numpy arrays. I guess this won't be used much unless for plotting or
other work not related to lasagne/theano.
2. MNISTTheano: converts the numpy arrays into theano variables. Will be used for fully connected netweorks
(i.e. Dense layer)
3. MNISTTheano2D: returns 2D arrays for each image as theano vars. This will be used in convolutional nets.
Usage:
from data import MNIST
mnist = MNIST(validation_size=0.2)
train_imgs, train_lbl, train_n, valid_imgs, valid_lbls, valid_n = mnist.get_training()
test_imgs , test_lbls, test_n = mnist.get_testing()
# or get all the data in one object
dataset = mnist.load_dataset()
print dataset.keys()
print dataset["train"].keys()
print dataset["train"]["images"].shape
'''
import urllib2, gzip
import os
import struct
import logging
from array import array
from math import sqrt
import numpy as np
import theano
import theano.tensor as T
import lasagne
from sklearn.cross_validation import train_test_split
# the following logging mess is just to make it work with ipython's notebook
logger = logging.getLogger("mnist")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(message)s')
handler = logging.FileHandler("mnist")
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
def download_dataset():
file_names = [
't10k-images-idx3-ubyte',
't10k-labels-idx1-ubyte',
'train-images-idx3-ubyte',
'train-labels-idx1-ubyte'
]
base_url = 'http://yann.lecun.com/exdb/mnist/'
dist_dir = "../data/"
for file_name in file_names:
url = base_url + file_name + '.gz'
# make sure we have the data directory
if not os.path.exists(dist_dir):
os.makedirs(dist_dir)
archive_dist_path = dist_dir + file_name + '.gz'
normal_dist_path = dist_dir + file_name
if os.path.exists(normal_dist_path):
continue
# download the archive
# for fun I'll copy this code to show download progress
# http://stackoverflow.com/a/22776/408286
u = urllib2.urlopen(url)
f = open(archive_dist_path, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
logger.info("Downloading: %s Bytes: %s" % (file_name, file_size))
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
# extract the archive
logger.info("Extracting the archive: %s to %s" % (archive_dist_path, normal_dist_path))
archive = gzip.open(archive_dist_path, 'rb')
unzipped= open(normal_dist_path , 'wb')
unzipped.write(archive.read())
archive.close()
unzipped.close()
# remove the archive
os.remove(archive_dist_path)
class MNIST(object):
def __init__(self, path='../data/', validation_size=0.2):
self.path = path
self.test_img_fname = 't10k-images-idx3-ubyte'
self.test_lbl_fname = 't10k-labels-idx1-ubyte'
self.train_img_fname = 'train-images-idx3-ubyte'
self.train_lbl_fname = 'train-labels-idx1-ubyte'
self.dataset = {
"train": None,
"valid": None,
"test" : None
}
self.validation_size = validation_size
download_dataset()
def get_training(self):
if not self.dataset["train"] or not self.dataset["valid"]:
self.load_train_valid()
return self.dataset["train"]["images"], self.dataset["train"]["labels"], self.dataset["train"]["num_examples"], \
self.dataset["valid"]["images"], self.dataset["valid"]["labels"], self.dataset["valid"]["num_examples"]
def get_testing(self):
if not self.dataset["test"]:
self.load_testing()
return self.dataset["test"]["images"], self.dataset["test"]["labels"], self.dataset["test"]["num_examples"]
def load_dataset(self):
self.get_training()
self.get_testing()
return self.dataset
def load_testing(self):
test_images, test_labels, n = self.load(os.path.join(self.path, self.test_img_fname),
os.path.join(self.path, self.test_lbl_fname))
self.dataset["test"] = {
"images": test_images,
"labels": test_labels,
"num_examples": n
}
def load_train_valid(self):
train_imgs, train_lbls, train_n, valid_imgs, valid_lbls, valid_n = self.load(self.path + self.train_img_fname,
self.path + self.train_lbl_fname,
valid_size = self.validation_size)
self.dataset["train"] = {
"images": train_imgs,
"labels": train_lbls,
"num_examples": train_n
}
self.dataset["valid"] = {
"images": valid_imgs,
"labels": valid_lbls,
"num_examples": valid_n
}
def load(self, path_img, path_lbl, valid_size=None):
# From https://github.com/sorki/python-mnist
with open(path_lbl, 'rb') as file:
magic, size = struct.unpack(">II", file.read(8))
if magic != 2049:
raise ValueError('Magic number mismatch, expected 2049,'
'got %d' % magic)
labels = array("B", file.read())
with open(path_img, 'rb') as file:
magic, size, rows, cols = struct.unpack(">IIII", file.read(16))
if magic != 2051:
raise ValueError('Magic number mismatch, expected 2051,'
'got %d' % magic)
image_data = array("B", file.read())
images = []
for i in xrange(size):
images.append([0]*rows*cols)
for i in xrange(size):
images[i][:] = image_data[i*rows*cols : (i+1)*rows*cols]
train_imgs = np.array(images, dtype=np.float32) / 255.0
train_lbls = np.array(labels, dtype=np.int32)
# check the class distribution
counts = np.bincount(train_lbls)
logger.info("Checking class distribution for the %s set." % ("train" if valid_size else "test",))
for i, count in enumerate(counts.tolist()):
logger.info("Class %d:\t%.2f%%" % (i, count*100.0/counts.sum()))
# split into train/valid if needed
valid_imgs, valid_lbls = None, None
if valid_size:
train_imgs, valid_imgs, train_lbls, valid_lbls = train_test_split(train_imgs, train_lbls,test_size=valid_size, random_state=42)
train_n = train_imgs.shape[0]
if not valid_imgs is None and not valid_lbls is None:
# convert the validation vars
valid_n = valid_imgs.shape[0]
return train_imgs, train_lbls, train_n, valid_imgs, valid_lbls, valid_n
return train_imgs, train_lbls, train_n
class MNISTTheano(MNIST):
def load(self, path_img, path_lbl, valid_size=None):
'''
Converts the numpy arrays to theano vars
'''
res = super(MNISTTheano, self).load(path_img, path_lbl, valid_size)
converted = list()
for elem in res:
new_elem = None
if hasattr(elem, 'ndim'):
if elem.ndim == 1: # assume it's the labels
new_elem = T.cast(theano.shared(elem), 'int32')
elif elem.ndim == 2: # inputs
new_elem = theano.shared(lasagne.utils.floatX(elem))
else: # what's that?
assert False
else: # not a numpy array so leave it as is
new_elem = elem
converted.append(new_elem)
return tuple(converted)
class MNISTTheano2D(MNIST):
def load(self, path_img, path_lbl, valid_size=None):
'''
Converts the input numpy arrays (i.e. images) to 2D arrays for use with convolutional nets
'''
res = super(MNISTTheano2D, self).load(path_img, path_lbl, valid_size)
converted = list()
for elem in res:
new_elem = None
if hasattr(elem, 'ndim'):
if elem.ndim == 1: # assume it's the labels
new_elem = T.cast(theano.shared(elem), 'int32')
elif elem.ndim == 2: # inputs
# assuming the images are squares
dim = int(sqrt(elem.shape[1]))
assert dim**2 == elem.shape[1], "The images don't seem to be squares %d != %d" % (dim**2, elem.shape[1])
new_elem = elem.reshape(-1, 1, dim, dim)
new_elem = theano.shared(lasagne.utils.floatX(new_elem))
else: # what's that?
assert False
else: # not a numpy array so leave it as is
new_elem = elem
converted.append(new_elem)
return tuple(converted)
|
requests
qhue
plotly
pandas
dash
dash-bootstrap-components
selenium
forex-python
yfinance
forex_python |
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline
compliance reporting for transportation fuel suppliers in accordance with
the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from rest_framework import serializers
from rest_framework.relations import SlugRelatedField
from api.models.CompliancePeriod import CompliancePeriod
from api.models.ComplianceReport import \
ComplianceReportType, ComplianceReportStatus, ComplianceReport
from api.models.ComplianceReportSchedules import \
ScheduleCRecord, ScheduleC, ScheduleARecord, ScheduleA, \
ScheduleBRecord, ScheduleB
from api.serializers import \
OrganizationMinSerializer, CompliancePeriodSerializer
from api.serializers.ComplianceReportSchedules import \
ScheduleCDetailSerializer, ScheduleADetailSerializer, \
ScheduleBDetailSerializer, ScheduleBRecordSerializer
class ComplianceReportTypeSerializer(serializers.ModelSerializer):
class Meta:
model = ComplianceReportType
fields = ('the_type', 'description')
read_only_fields = ('the_type', 'description')
class ComplianceReportStatusSerializer(serializers.ModelSerializer):
class Meta:
model = ComplianceReportStatus
fields = ('status',)
read_only_fields = ('status',)
class ComplianceReportListSerializer(serializers.ModelSerializer):
status = SlugRelatedField(slug_field='status', read_only=True)
type = SlugRelatedField(slug_field='the_type', read_only=True)
organization = OrganizationMinSerializer(read_only=True)
compliance_period = CompliancePeriodSerializer(read_only=True)
class Meta:
model = ComplianceReport
fields = ('id', 'status', 'type', 'organization', 'compliance_period',
'update_timestamp')
class ComplianceReportDetailSerializer(serializers.ModelSerializer):
status = ComplianceReportStatusSerializer(read_only=True)
type = ComplianceReportTypeSerializer(read_only=True)
organization = OrganizationMinSerializer(read_only=True)
compliance_period = CompliancePeriodSerializer(read_only=True)
schedule_a = ScheduleADetailSerializer(read_only=True)
schedule_b = ScheduleBDetailSerializer(read_only=True)
schedule_c = ScheduleCDetailSerializer(read_only=True)
summary = serializers.SerializerMethodField()
def get_summary(self, obj):
total_petroleum_diesel = 0
total_petroleum_gasoline = 0
total_renewable_diesel = 0
total_renewable_gasoline = 0
schedule_b_records = ScheduleBRecord.objects.filter(
schedule=obj.schedule_b
)
for record in schedule_b_records:
percentage = 100
if record.fuel_code is not None and \
record.fuel_code.renewable_percentage and \
record.fuel_code.renewable_percentage > 0:
percentage = record.fuel_code.renewable_percentage
if record.fuel_type.name in [
"Biodiesel", "HDRD", "Renewable diesel"]:
total_renewable_diesel += record.quantity * percentage/100
elif record.fuel_type.name in ["Ethanol", "Renewable gasoline"]:
total_renewable_gasoline += record.quantity * percentage/100
elif record.fuel_type.name == "Petroleum-based diesel":
total_petroleum_diesel += record.quantity
elif record.fuel_type.name == "Petroleum-based gasoline":
total_petroleum_gasoline += record.quantity
schedule_c_records = ScheduleCRecord.objects.filter(
schedule=obj.schedule_c
)
for record in schedule_c_records:
if record.fuel_type.name == "Petroleum-based diesel" and \
record.expected_use.description == "Heating Oil":
total_petroleum_diesel += record.quantity
return {
"total_petroleum_diesel": total_petroleum_diesel,
"total_petroleum_gasoline": total_petroleum_gasoline,
"total_renewable_diesel": total_renewable_diesel,
"total_renewable_gasoline": total_renewable_gasoline
}
class Meta:
model = ComplianceReport
fields = ['id', 'status', 'type', 'organization', 'compliance_period',
'schedule_a', 'schedule_b', 'schedule_c', 'summary']
class ComplianceReportCreateSerializer(serializers.ModelSerializer):
status = SlugRelatedField(
slug_field='status',
queryset=ComplianceReportStatus.objects.filter(status__in=['Draft'])
)
type = SlugRelatedField(
slug_field='the_type', queryset=ComplianceReportType.objects.all()
)
compliance_period = SlugRelatedField(
slug_field='description',
queryset=CompliancePeriod.objects.all()
)
organization = OrganizationMinSerializer(read_only=True)
schedule_c = ScheduleCDetailSerializer(allow_null=True, required=False)
schedule_b = ScheduleBDetailSerializer(allow_null=True, required=False)
schedule_a = ScheduleADetailSerializer(allow_null=True, required=False)
def create(self, validated_data):
schedule_c_data = None
if 'schedule_c' in validated_data:
schedule_c_data = validated_data.pop('schedule_c')
schedule_b_data = None
if 'schedule_b' in validated_data:
schedule_b_data = validated_data.pop('schedule_b')
schedule_a_data = None
if 'schedule_a' in validated_data:
schedule_a_data = validated_data.pop('schedule_a')
instance = ComplianceReport.objects.create(**validated_data)
if schedule_c_data and 'records' in schedule_c_data:
records_data = schedule_c_data.pop('records')
schedule_c = ScheduleC.objects.create(
**schedule_c_data, compliance_report=instance
)
instance.schedule_c = schedule_c
for record_data in records_data:
record = ScheduleCRecord.objects.create(
**record_data, schedule=schedule_c
)
schedule_c.records.add(record)
schedule_c.save()
if schedule_b_data and 'records' in schedule_b_data:
records_data = schedule_b_data.pop('records')
schedule_b = ScheduleB.objects.create(
**schedule_b_data, compliance_report=instance
)
instance.schedule_b = schedule_b
for record_data in records_data:
record = ScheduleBRecord.objects.create(
**record_data, schedule=schedule_b
)
schedule_b.records.add(record)
schedule_b.save()
if schedule_a_data and 'records' in schedule_a_data:
records_data = schedule_a_data.pop('records')
schedule_a = ScheduleA.objects.create(
**schedule_a_data, compliance_report=instance
)
instance.schedule_a = schedule_a
for record_data in records_data:
record = ScheduleARecord.objects.create(
**record_data, schedule=schedule_a
)
schedule_a.records.add(record)
schedule_a.save()
instance.save()
return instance
class Meta:
model = ComplianceReport
fields = ('status', 'type', 'compliance_period', 'organization',
'schedule_a', 'schedule_b', 'schedule_c')
class ComplianceReportUpdateSerializer(serializers.ModelSerializer):
status = SlugRelatedField(
slug_field='status',
queryset=ComplianceReportStatus.objects.filter(status__in=['Draft'])
)
type = SlugRelatedField(slug_field='the_type', read_only=True)
compliance_period = SlugRelatedField(slug_field='description', read_only=True)
organization = OrganizationMinSerializer(read_only=True)
schedule_a = ScheduleADetailSerializer(allow_null=True, required=False)
schedule_b = ScheduleBDetailSerializer(allow_null=True, required=False)
schedule_c = ScheduleCDetailSerializer(allow_null=True, required=False)
def update(self, instance, validated_data):
if 'schedule_c' in validated_data:
schedule_c_data = validated_data.pop('schedule_c')
if instance.schedule_c:
ScheduleCRecord.objects.filter(
schedule=instance.schedule_c
).delete()
instance.schedule_c.delete()
if 'records' in schedule_c_data:
records_data = schedule_c_data.pop('records')
schedule_c = ScheduleC.objects.create(
**schedule_c_data, compliance_report=instance
)
instance.schedule_c = schedule_c
for record_data in records_data:
record = ScheduleCRecord.objects.create(
**record_data, schedule=schedule_c
)
schedule_c.records.add(record)
schedule_c.save()
instance.save()
if 'schedule_b' in validated_data:
schedule_b_data = validated_data.pop('schedule_b')
if instance.schedule_b:
ScheduleBRecord.objects.filter(
schedule=instance.schedule_b
).delete()
instance.schedule_b.delete()
if 'records' in schedule_b_data:
records_data = schedule_b_data.pop('records')
schedule_b = ScheduleB.objects.create(
**schedule_b_data, compliance_report=instance
)
instance.schedule_b = schedule_b
for record_data in records_data:
record = ScheduleBRecord.objects.create(
**record_data, schedule=schedule_b
)
schedule_b.records.add(record)
schedule_b.save()
instance.save()
if 'schedule_a' in validated_data:
schedule_a_data = validated_data.pop('schedule_a')
if instance.schedule_a:
ScheduleARecord.objects.filter(
schedule=instance.schedule_a
).delete()
instance.schedule_a.delete()
if 'records' in schedule_a_data:
records_data = schedule_a_data.pop('records')
schedule_a = ScheduleA.objects.create(
**schedule_a_data, compliance_report=instance
)
instance.schedule_a = schedule_a
for record_data in records_data:
record = ScheduleARecord.objects.create(
**record_data, schedule=schedule_a
)
schedule_a.records.add(record)
schedule_a.save()
instance.save()
# all other fields are read-only
return instance
class Meta:
model = ComplianceReport
fields = ('status', 'type', 'compliance_period', 'organization',
'schedule_a', 'schedule_b', 'schedule_c')
class ComplianceReportDeleteSerializer(serializers.ModelSerializer):
"""
Delete serializer for Compliance Reports
"""
def destroy(self):
"""
Delete function to mark the compliance report as deleted.
"""
compliance_report = self.instance
if compliance_report.status not in \
ComplianceReportStatus.objects.filter(status__in=["Draft"]):
raise serializers.ValidationError({
'readOnly': "Cannot delete a compliance report that's not a "
"draft."
})
compliance_report.status = ComplianceReportStatus.objects.get(
status="Deleted"
)
compliance_report.save()
class Meta:
model = ComplianceReport
fields = '__all__'
|
import tweepy
from tweepy import OAuthHandler
consumer_key='wdIHUveP64KOhdJiGVEdjkp8B'
consumer_secret='6jyY1sA6Dz6aZlYPXJ2nE9GwwhU4KcmPqdLmCnPGqp8xuunX96'
access_token='567899563-1HmNAIgXYxX2FcVRpPB2Y6OhJ1zyjzOB4FjGRn33'
access_secret='bNAmEuMy1FWgARaHRkbZ893DdvYCxBW9W8C7pGiqIQBw5'
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
import json
def process_or_store(tweet):
print(json.dumps(tweet))
for status in tweepy.Cursor(api.home_timeline).items(200):
# Process a single status
#print(status.text)
process_or_store(status._json)
#
#for friend in tweepy.Cursor(api.friends).items(1):
# process_or_store(friend._json)
|
from collections import namedtuple
def namedtuple_and_choices_from_kwargs(name, **kwargs):
return (
namedtuple(name, sorted(kwargs.keys()))(
**{k: k for k in kwargs.keys()}
),
list(kwargs.items()),
)
|
import requests
from bs4 import BeautifulSoup
import urllib
import re
import sys
sys.stdout = open('file.txt', 'w')
text_file = open("source.txt", "w")
def get_data(item_url):
f = urllib.request.urlopen(item_url)
text_file.write(str(f.read()))
get_data('http://www.ted.com/talks/susan_cain_the_power_of_introverts')
text_file.close()
with open ("source.txt", "r") as myfile:
data=myfile.readlines()
string = "".join(data)
pattern = r'high":"http://.+.mp4\?'
result = re.search(pattern, string)
print(result.group())
|
#-*-coding:utf-8 -*-
#@author:wendy
def talk_with_daddy(is_cheap3,buy_amount3):
if is_cheap3:
print'老妈对老爸说菜便宜买了,买了%d斤'%(buy_amount3)
else:
print'老妈对老爸说菜贵了没买'
def money_account(is_cheap4,buy_amount4):
if is_cheap4:
print'老妈记账在本子上,写下买了%d斤'%(buy_amount4)
else:
print'老妈没有记账因为没买东西'
def buybuybuy():
who='wendy的老妈'
good_price=5#小贩的价格
good_description="西双版纳大白菜"#小贩的招牌
is_cheap=False #是否便宜
reasonable_price=5#老妈能接受的最高价格
buy_amount=2#准备买2斤
print "%s上街看到了%s,卖%d元/斤"%(who,good_description,good_price)
if good_price<=reasonable_price:
print'她认为便宜'
is_cheap=True
buy_amount=2+(reasonable_price-good_price)
if buy_amount>4:
buy_amount=4
print"她买了%d斤"%(buy_amount)
else:
print'她认为贵了'
is_cheap=False
print'她并没有买,扬长而去'
return is_cheap,buy_amount
if __name__ == '__main__':
is_cheap2,buy_amount2=buybuybuy()
talk_with_daddy(is_cheap2,buy_amount2)
money_account(is_cheap2,buy_amount2) |
#!/usr/bin/env python
""" Profiling functions in Python using '%run -p' to compare function speed """
__author__ = 'Saul Moore sm5911@imperial.ac.uk'
__version__ = '0.0.1'
def a_useless_function(x):
""" Exploring the speed of 'xrange' vs 'range' """
y = 0
for i in xrange(100000000): # Eight zeros!
y = y + 1
return 0
def a_less_useless_function(x):
""" Exploring the speed of 'xrange' vs 'range' """
y = 0
for i in xrange(100000): # Five zeros!
y = y + 1
return 0
def some_function(x): # A function to call the other two functions and print value of x
""" Function to print the value of 'x' when called and run the two functions: 'a_useless_function' and 'a_less_useless_function' """
print x
a_useless_function(x)
a_less_useless_function(x)
return 0
some_function(1000) # Execute function cascade
# Run with '%run -p profileme.py', which lists internal time calculations for time taken to perform each function and outputs each result:
# OUTPUT:
# 56 function calls in 4.717 seconds
# ...
# Now try using xrange instead of range:
# OUTPUT:
# 54 function calls in 2.907 seconds
# ...
# When iterating over a large number of values, xrange, unlike range, does not create all the values before iteration, but creates them "on demand" eg. range(1000000) yields a 4Mb+ list.
# So we saved 1.81 secs!
|
from fabric.context_managers import cd, prefix
from fabric.operations import sudo, run
from fabric.state import env
PROJECT_ROOT = ''
VENV_DIR = ''
UWSGI_APP_NAME = ''
def update():
env.host_string = ''
env.user = ''
env.password = ''
with cd(PROJECT_ROOT):
sudo('git pull origin master')
with prefix('source ' + VENV_DIR + '/bin/activate'):
run('pip install -r requirements/prod.txt')
run('./manage.py collectstatic --noinput')
run('./manage.py migrate')
sudo('kill -1 `cat /var/run/uwsgi/app/%s/pid`' % UWSGI_APP_NAME) |
import json
import os
import fnmatch
from parser import *
from preprocess_R import *
# debugging flags
line_print = 1
graph_print = 0
for_print = 0
if_print = 0
bracket_print = 0
write = 1
check = 0
# has the same name as functions and data types
invalid_models_type1 = []
invalid_graphs_type1 = []
# has op sign in the var name
invalid_models_type2 = []
invalid_graphs_type2 = []
# unconnected variables
invalid_models_type3 = []
invalid_graphs_type3 = []
# invalid data type
invalid_models_type4 = []
invalid_graphs_type4 = []
root = '../code'
files = []
for root, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*_Stan.R'):
files.append(os.path.join(root, filename))
#for f in files:
# print f
#print len(files)
#files = ['/Users/emma/Projects/Bayesian/profiling/stan_BCM/code/ParameterEstimation/LatentMixtures/Cheating_Stan.R']
output = '../outputs/probgraph'
if check == 1:
check_results = []
not_passed = 0
for modelfile in files:
print modelfile
model = open(modelfile,'r')
if check == 1:
if not os.path.isfile(modelfile.replace('.stan', '.probgraph')):
continue
with open(modelfile.replace('.stan', '.probgraph')) as fin:
[chk_graph, chk_attr, chk_var_type] = json.load(fin)
lines = preprocess(model)
model.close()
#for line in lines:
# print line
graph, attr, var_type, invalid= parser(lines, line_print, graph_print, for_print, if_print, bracket_print)
if invalid == 1:
invalid_models_type1.append(modelfile)
invalid_graphs_type1.append(os.path.join(output, modelfile.split('/')[-1].replace('.R', '.probgraph')))
if invalid == 2:
invalid_models_type2.append(modelfile)
invalid_graphs_type2.append(os.path.join(output, modelfile.split('/')[-1].replace('.R', '.probgraph')))
if invalid == 3:
invalid_models_type3.append(modelfile)
invalid_graphs_type3.append(os.path.join(output, modelfile.split('/')[-1].replace('.R', '.probgraph')))
if invalid == 4:
invalid_models_type4.append(modelfile)
invalid_graphs_type4.append(os.path.join(output, modelfile.split('/')[-1].replace('.R', '.probgraph')))
print '\nGRAPH:'
for k,v in graph.iteritems():
parents = []
for p, dep in v:
parents.append((list(p), dep))
graph[k] = parents
print k, attr[k], var_type[k], graph[k]
print len(graph), len(attr), len(var_type)
if write == 1:
with open(os.path.join(output, modelfile.split('/')[-1].replace('.R', '.probgraph')), 'w') as fout:
json.dump([graph, attr, var_type], fout)
if check == 1:
flag = 1
if len(graph) <> len(chk_graph):
flag = 0
print 'Graph length is wrong: ', len(graph), len(chk_graph)
for k,v in graph.iteritems():
if len(graph[k]) <> len(chk_graph[k]):
print "WRONG:\t", k
print "GIVEN:\t", graph[k]
print "CORRECT:\t", chk_graph[k]
flag = 0
continue
for p,d in v:
f = 0
for a,b in graph[k]:
if a==p and d==b:
f = 1
if f == 0:
print "WRONG:\t", k
print "GIVEN:\t", graph[k]
print "CORRECT:\t", chk_graph[k]
flag = 0
break
if flag == 0:
check_results.append((modelfile, 'NO'))
print 'Does not pass correction check.'
not_passed += 1
else:
check_results.append((modelfile, 'YES'))
print 'Correction Check Passed.'
if check == 1:
print '\n\nCheck results:'
for i in check_results:
print i[0],'\t', i[1]
print 'Number of not passed file: ', not_passed, 'out of', len(check_results), 'files, ', not_passed*1.0/len(check_results)
print 'Total file:', len(files)
print '\nInvalid files:', len(invalid_models_type1) + len(invalid_models_type3) + len(invalid_models_type3)
print '1. Invalid file, variables have the same name as data types or functions:'
print invalid_models_type1
print invalid_graphs_type1
print '2. Invalid file, variables have op signs'
print invalid_models_type2
print invalid_graphs_type2
print '3. Invalid file, unconnected variables'
print invalid_models_type3
print invalid_graphs_type3
print '4. Invalid file, invalid data type'
print invalid_models_type4
print invalid_graphs_type4
|
import math
class ListNode:
def __init__(self, val):
self.next = None
self.val = val
class LinkedListOperations:
def createLinkedList(self, arr):
"""
Create Linked List with values in input array.
:type:
"""
return
def mergeTwoLists(self, l1, l2):
"""
Leetcode 21. Merge Two Sorted Lists
"""
dummy_head = ListNode(1)
cur_node = dummy_head
while l1 and l2:
if l1.val <= l2.val:
cur_node.next = l1
l1 = l1.next
else:
cur_node.next = l2
l2 = l2.next
cur_node = cur_node.next
if l1:
cur_node.next = l1
if l2:
cur_node.next = l2
return dummy_head.next
def insertionSortList(self, head):
"""
Leetcode 147. Insertion Sort List
"""
prev_node = dummy_node = ListNode(math.inf)
dummy_node.next = head
cur_node = head
while cur_node and cur_node.next:
#find the node that needs to be sorted
sorting_val = cur_node.next.val
if cur_node.val <= sorting_val:
cur_node = cur_node.next
continue
#once find cur_node, find prev_node
if prev_node.val > sorting_val:
prev_node = dummy_node
while prev_node.next.val <= sorting_val:
prev_node = prev_node.next
tmp_node = cur_node.next
cur_node.next = tmp_node.next
tmp_node.next = prev_node.next
prev_node.next = tmp_node
return dummy_node.next
def deleteNode(self, node):
"""
Leetcode 237. Delete Node in a Linked List
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
if not node:
return
node.val = node.next.val
while node.next.next:
node = node.next
node.val = node.next.val
node.next = None
return
def reverseList(self, head):
"""
Leetcode 206. Reverse Linked List
"""
if not head:
return head
prev_node = head
cur_node = head.next
head.next = None
return self.reverseListHelper(prev_node, cur_node)
def reverseListHelper(self, prev_node, cur_node):
while cur_node:
next_node = cur_node.next
cur_node.next = prev_node
prev_node = cur_node
cur_node = next_node
return prev_node
if __name__ == "__main__":
# validated on Leetcode
pass
|
from django.contrib import admin
from .models import Product,Item,Promotions,TimeDeal,ItemComment
# Register your models here.
admin.site.register((Product,Item,Promotions,TimeDeal,ItemComment)) |
# -*- coding:UTF-8 -*-
from rest_framework.routers import DefaultRouter
from . import views
router=DefaultRouter()
app_name='delivery'
router.register('orderCallback',views.OrderCallbackViewSets,base_name='orderCallback')
urlpatterns=router.urls |
# Script that takes code.bin as an input and
# decodes the emoji instruction in it to
# assembly like instructions and writes them
# to a output file
import sys
emojiToInsruction = {
b'\xf0\x9f\x92\xaa': "PUSH", #arm
b'\xF0\x9F\x93\x96': "READ", #book
b'\xe2\x9c\x8f\xef\xb8\x8f': "WRITE", #pen
b'\xf0\x9f\xa6\xbe': "LD08", #mech arm
b'\xf0\x9f\x94\x80': "XOR", #swich arrow
b'\xe2\x9c\x85': "OR", #tick
b'\xf0\x9f\xa4\x94': "JMP", #thinking face
b'\xf0\x9f\x92\x80': "EXIT", #skull
b'\xf0\x9f\x8c\xa0': "LD32", #falling star
b'\xe2\x80\xbc\xef\xb8\x8f': "COPY", #douple exclamation
b'\xe2\x9e\x95': "AND 0x1", #cross
b'\xe2\x9e\xa1\xef\xb8\x8f': "SHR" #arrow
}
def HasParameter(symbol):
code = emojiToInsruction[symbol]
if (code == "PUSH") | (code[:2] == "LD") | (code == "JMP") | (code == "SHR"):
return True
return False
def UTF8decodeInt(byte):
if (byte & 0xf0) == 0xf0:
return 4
elif (byte & 0xe0) == 0xe0:
return 3
elif (byte & 0xc0) == 0xc0:
return 2
elif (byte & 0x80) == 0:
return 1
else:
print("ERROR can find decode int for {}".format(byte))
def NumberEmojisToInt(bytes):
integers = []
offset = 0
for i in range(6):
integers.append(int(chr(bytes[offset])))
offset += 1
offset += UTF8decodeInt(bytes[offset])
offset += UTF8decodeInt(bytes[offset])
result = 0
for i in range(3):
result += integers[i*2+1] ** integers[i*2]
return offset, result
def GetNextEmoji(offset, code):
len = UTF8decodeInt(code[offset])
newOffset = len + offset
symbol = code[offset:newOffset]
return newOffset, symbol
def Decode(code, file):
offset = 0
while offset < len(code):
s = "{0:04X} - ".format(offset)
offset, symbol = GetNextEmoji(offset, code)
try:
s += emojiToInsruction[symbol]
except KeyError:
#maybe it's 2 utf-8 chars
offset, sym = GetNextEmoji(offset, code)
symbol += sym
try:
s += emojiToInsruction[symbol]
except KeyError: #cant be 3 so it's a error
print("ERROR: {0} not found in instructions. offset {1}".format(symbol, offset))
return
#if it has number emojis turn them to int and append
if HasParameter(symbol):
os, parameter = NumberEmojisToInt(code[offset:])
offset += os
s += "\t {0:04X}".format(parameter)
op = emojiToInsruction[symbol]
if op == "JMP":
s += "\t({0:04X})".format(parameter + offset)
elif op[:2] == "LD":
lad = int.from_bytes(seg1[parameter:parameter + int(op[2:])], byteorder='big')
s += "\t({0:04X})".format(lad)
s += "\n"
file.write(s)
#print(hex(offset))
if len(sys.argv) < 3:
print("USAGE: EmojiDecoder.py <code.bin> <outputFile>")
exit()
try:
code = open(sys.argv[1], "rb")
except FileNotFoundError:
print("ERROR: file {0} not found".format(sys.argv[1]))
try:
output = open(sys.argv[2], 'w')
except FileNotFoundError:
print("ERROR: file {0} not found".format(sys.argv[2]))
seg1 = code.read(0x200)
code.seek(0x200)
buf = code.read()
print(buf[:100])
code.close()
Decode(buf, output)
output.close()
|
import part1
import part2
from flask import Flask
from flask import request
from flask import render_template
import os
from werkzeug import secure_filename
app = Flask(__name__, template_folder='templates')
Upload_Folder = './Upload_Folder'
app.config['Upload_Folder'] = Upload_Folder
@app.route('/', methods=['GET'])
def index():
return render_template('client.html')
@app.route('/upload', methods=['POST'])
def upload():
f = request.files['file']
filename = secure_filename(f.filename)
f.save(os.path.join(app.config['Upload_Folder'], filename))
outputFile = open('./templates/output.html', 'w')
outputFile.write("<html><body bgcolor=aqua font-color=white>")
with open('./Upload_Folder/addresses.txt', 'r') as addressFile:
physicalMemory = {}
tlb = []
pageTable = []
pageFaultCounter = 0
tlbHitCounter = 0
addressReadCounter = 0
for line in addressFile:
tlbHit = 0
pageTableTrue = 0
logicalAddress = int(line)
offset = logicalAddress & 255
pageOriginal = logicalAddress & 65280
pageNumber = pageOriginal >> 8
# print("Logical address is: " + str(logicalAddress) + "\nPageNumber is: " + str(pageNumber) + "\nOffset: " + str(offset))
addressReadCounter += 1
tlbHit = part1.checkTLB(pageNumber, physicalMemory, offset, logicalAddress, tlb, addressReadCounter, outputFile)
if tlbHit == 1:
tlbHitCounter += 1
if tlbHit != 1:
pageTableTrue = part1.checkPageTable(pageNumber, logicalAddress, offset, addressReadCounter, pageTable, physicalMemory, outputFile)
if pageTableTrue != 1 and tlbHit != 1:
stro='this is a page fault'
print(stro)
part2.pageFaultHandler(pageNumber, tlb, pageTable, physicalMemory)
pageFaultCounter += 1
part1.checkTLB(pageNumber, physicalMemory, offset, logicalAddress, tlb, addressReadCounter, outputFile)
pageFaultRate = pageFaultCounter / addressReadCounter
tlbHitRate = tlbHitCounter / addressReadCounter
outStr = 'Number of translated address: ' + str(addressReadCounter) + '\n' + 'Number of page fault: ' + str(pageFaultCounter) + '\n' + 'Page fault rate: ' + str(pageFaultRate) + '\n' + 'Number of TLB hits: ' + str(tlbHitCounter) + '\n' + 'TLB hit rate: ' + str(tlbHitRate) + '<BR>'
print(outStr)
outputFile.write(outStr)
outputFile.write("</html></body>")
outputFile.close()
addressFile.close()
return render_template('output.html')
if __name__ == "__main__":
app.run(debug=True, port=8080, host='0.0.0.0')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Script that initializes 'cadaster' index in ElasticSearch so that
is also well supported by Kibana Visualization """
from src.utils.elasticsearch_utils import ElasticSearchUtils
if __name__ == "__main__":
ElasticSearchUtils.remove_index()
ElasticSearchUtils.create_index()
|
import pandas as pd
student1 = pd. Series({'국어':100,"영어":80,'수학':90})
print(student1)
print()
print("# 학생의 과목별 점수를 200으로 나누기")
percentage = student1/200
print(percentage)
print(type(percentage))
student2 = pd.Series({'수학':80,'국어':90,'영어':80})
print('===================================')
print(student1)
print()
print(student2)
print()
print("# 두 학생의 과목별 점수로 사칙연산 수행")
addition = student1 + student2
subtraction = student1 - student2
multipication = student1 * student2
division = student1 / student2
print(type(division))
print("# 사칙연산 결과를 데이터 프레임으로 합치기 (시리즈 -> 데이터프레임)")
result = pd.DataFrame([addition,subtraction,multipication,division], index=['덧셈','뺄셈','곱셈','나눗셈'])
print(result) |
# -*- coding: utf-8 -*-
"""
Geometry of an artery bifurcation
Olga Mula 2019
Modified by Changqing Fu
"""
from dolfin import * # FEM solver
# from mshr import * # mesh
import numpy as np
class Artery():
def __init__(self, diam_steno_vessel=0.1, diam_narrow=0.04, theta_steno=np.pi/6, diam_healthy_vessel=0.1, theta_healthy=np.pi/6,length0 = .5,length = .3):
self.diam_steno_vessel = diam_steno_vessel
self.diam_narrow = diam_narrow#diam_narrow
self.theta_steno = theta_steno
self.diam_healthy_vessel = diam_healthy_vessel
self.theta_healthy = theta_healthy
self.length0 = length0
self.length = length
self.diam_trunk = diam_healthy_vessel * np.cos(theta_healthy) + diam_steno_vessel * np.cos(theta_steno)
def __vessel_healthy(self):
"""
Points for the
Healthy vessel in the upper part of the bifurcation
"""
D = self.diam_healthy_vessel # Diameter vessel
n = 20 # Number of points to build domain (impact on mesh size)
length = self.length
# Bottom
xref = np.linspace(-length, length, num=n)
yref = np.zeros(n)
points_bottom = [Point(x, y) for (x,y) in zip(xref,yref)]
# Top
xref = np.linspace(length, -length, num=n)
yref = D*np.ones(n)
points_top = [Point(x, y) for (x,y) in zip(xref,yref)]
vertices = points_bottom + points_top
# Translate to origin
vertices = [ Point(p[0]+length,p[1]) for p in vertices ]
# Rotate
theta = self.theta_healthy
vertices = [ Point(np.cos(theta)*p[0]-np.sin(theta)*p[1],np.sin(theta)*p[0]+np.cos(theta)*p[1]) for p in vertices ]
return vertices
def __vessel_stenosis(self):
"""
Points for the
Stenotic vessel in the lower part of the bifurcation
"""
D = self.diam_steno_vessel # Diameter vessel
diam_narrow = self.diam_narrow # Narrowing in stenosis (diam_narrow < D/2)
L = 2*D # Length of stenosis
x0 = 0. # location of the center of the stenosis
length = self.length
def S(x,L):
"""
Section of the stenosis following the paper
"Direct numerical simulation of stenotic flows,
Part 1: Steady flow" J. Fluid Mech.
"""
# return D/2 * (1-diam_narrow*(1+np.cos(2*np.pi*(x-x0)/L)))
return D/2 -diam_narrow/2*(1+np.cos(2*np.pi*(x-x0)/L))
n = 30 # Number of points to build domain (impact on mesh size)
# Bottom
xref = np.linspace(-length, length, num=n)
yref = [ -S(x,L) if -L/2<= x and x <= L/2 else -D/2 for x in xref]
points_bottom = [Point(x, y) for (x,y) in zip(xref,yref)]
# Top
xref = np.linspace(length, -length, num=n)
yref = [ S(x,L) if -L/2<= x and x <= L/2 else D/2 for x in xref]
points_top = [Point(x, y) for (x,y) in zip(xref,yref)]
vertices = points_bottom + points_top
# Translate to origin
vertices = [ Point(p[0]+length,p[1]-D/2) for p in vertices ]
# Rotate
theta = -self.theta_steno
vertices = [ Point(np.cos(theta)*p[0]-np.sin(theta)*p[1],np.sin(theta)*p[0]+np.cos(theta)*p[1]) for p in vertices ]
return vertices
def __domain(self):
"""
Construction of the bifurcation geometry as a Polygon object
"""
vertices_stenosis = self.__vessel_stenosis()
vertices_healthy = self.__vessel_healthy()
length0 = self.length0
n=10
xl = vertices_stenosis[0][0]
yl = vertices_stenosis[0][1]
xref = np.linspace(-length0, xl, num=n, endpoint=False)
vertices_bottom_left = [ Point(x,yl) for x in xref ]
xr = vertices_healthy[-1][0]
yr = vertices_healthy[-1][1]
xref = np.linspace(xr, -length0, num=n)
vertices_top_left = [ Point(x,yr) for x in xref ]
v = vertices_bottom_left + vertices_stenosis + vertices_healthy[1:] + vertices_top_left
return Polygon(v)
def mesh(self,mesh_precision = 40):
"""
Create mesh of the geometry
"""
# mesh_precision = 20
return generate_mesh(self.__domain(), mesh_precision)
if __name__ == '__main__':
import matplotlib.pyplot as plt
# diam_steno_vessel = 0.6
# diam_narrow = 0.4
# theta_steno = - np.pi /6
# diam_healthy_vessel = 0.6
# theta_healthy = + np.pi /6
mesh_precision = 40
# artery = Artery(diam_steno_vessel, diam_narrow, theta_steno, diam_healthy_vessel, theta_healthy)
mesh = Artery().mesh(mesh_precision)
plot(mesh, title='stenosis')
plt.savefig('mesh.pdf') |
#Objective: Use sckitit learn module (and its least squares model, LInearRegression) to solve simple linear regression coefficients
#The data set is the same data set used for the normal equation / matrix multiplication in a different code set
#import modules
import pandas as pd
from sklearn.linear_model import LinearRegression
#Extract regression data and place into dataframe
#create empty data frame object
df = pd.DataFrame()
#add two columns to data frame (note there are multiple ways to accomplish this)
#assume all values are training data. For simplicity, no application of splitting data into test and train sub-sets
df["X"] = 4,5,7,9,10,12,6,9,11,13,3,5,6,7,10,14,4,5,8,8,12,13
df["y"] = 11.5,14.3,17.4,21.6,21.8,26.9,15.3,22.3,27.8,29.8,9.5,12.3,15.1,17.6,24.8,34.3,11.3,13.4,18.0,19.4,27.5,30.8
#create and fit a linear model object
#create "empty" object
lm_example = LinearRegression()
#"fit" linear model to data
lm_example.fit(df["X"].values.reshape(-1,1),df["y"].values.reshape(-1,1))
#summarize the linar model coefficients
print() #using print empty line instead of \n
print(f"A linear regression model has been fitted for provided X and y values \nIts intercept (x0) is {lm_example.intercept_} and its only coefficient (x1) is {lm_example.coef_}")
#summarize the coefficients as a dataframe table
coef_df = pd.DataFrame()
coef_df["intercept (xo)"] = lm_example.intercept_
coef_df["x1"] = lm_example.coef_
print("\nBesides printing, these values can be stored in a DataFrame table, lists, etc. See DataFrame table below:") #using \n to print empty line
print(coef_df)
#use linear model object to predict a value for a given set of x1 values
#create a list of x1 values
x1_forPrediction = [3,5,7.5]
#iterate through each value in prediction list
print()
print("Using these coefficients, predictions can be made. For example,")
for i in range(len(x1_forPrediction)):
temp = lm_example.predict([[x1_forPrediction[i]]])
print(f"For x1 value = {x1_forPrediction[i]}, the predicted y = {temp}")
|
import sys
input = sys.stdin.readline
from collections import deque
from copy import deepcopy
def main():
N, M, P = map( int, input().split())
E = []
rE = [[] for _ in range(N)]
for _ in range(M):
a, b, c = map( int, input().split())
a, b = a-1, b-1
E.append((a,b,c-P))
rE[b].append(a)
# 頂点Nに到達する頂点
G = [False]*N
G[N-1] = True
q = deque([N-1])
while q:
v = q.popleft()
for w in rE[v]:
if G[w] == False:
G[w] = True
q.append(w)
# Bellman-Ford
V = [None]*N
V[0] = 0
I = sum([1 for i in range(N) if G[i] == True])
for _ in range(I-1):
for s, t, cost in E:
if V[s] == None or G[t] == False:
continue
if V[t] == None:
V[t] = V[s] + cost
continue
if V[s] + cost > V[t]:
V[t] = V[s] + cost
W = deepcopy(V) #[ V[i] for i in range(N)]
for _ in range(I-1):
for s, t, cost in E:
if V[s] == None or G[t] == False:
continue
if V[t] == None:
V[t] = V[s] + cost
continue
if V[s] + cost > V[t]:
V[t] = V[s] + cost
for i in range(N):
if W[i] == None or G[i] == False:
continue
if V[i] > W[i]: # 頂点iは、頂点Nに到達するので、負(正?)閉路は伝播する
print(-1)
return
print( max(W[-1], 0))
if __name__ == '__main__':
main()
|
import sys, os, cPickle, re, json, dircache, datetime, itertools, gzip
from multiprocessing import Pool
class AutoDict(dict):
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
class Logline(object):
LOGLINE_PATTERN_STRING = r"""^
(?P<hashed_ID>\w{40})\s+
(?P<datetime>\d+-\d+-\d+\s+\d+:\d+:\d+\.\d+)\s+
(?P<process_id>\d+)\s+(?P<thread_id>\d+)\s+(?P<log_level>\w)\s+
(?P<log_tag>%s):\s+(?P<json>.*)$"""
def __init__(self, match, line):
self.device = match.group('hashed_ID')
self.datetime = datetime.datetime.strptime(match.group('datetime'), '%Y-%m-%d %H:%M:%S.%f')
self.log_tag = match.group('log_tag').strip()
self.line = line.strip()
self.log_message = match.group('json').strip()
try:
self.json = json.loads(match.group('json').strip())
except:
self.json = None
self.label = None
def __str__(self):
return self.line
class LogFilter(object):
DEFAULT_FILTER_PROCESSES = 4
@classmethod
def load(cls, **kwargs):
if os.path.exists(cls.get_pickle_path()):
p = cPickle.load(open(cls.get_pickle_path(), 'rb'))
else:
p = cls(**kwargs)
if not p.filtered:
p.filter()
if not p.processed:
p.process()
p.store()
return p
def store(self):
cPickle.dump(self, open(self.get_pickle_path(), 'wb'), cPickle.HIGHEST_PROTOCOL)
@classmethod
def remove(cls):
try:
os.remove(cls.get_pickle_path())
except OSError:
pass
@classmethod
def get_log_directory(cls):
return os.environ['MOBISYS13_DATA']
@classmethod
def get_data_directory(cls):
return os.path.join(cls.get_log_directory(), cls.__name__.lower())
@classmethod
def get_pickle_path(cls):
return os.path.join(cls.get_data_directory(), 'processed.pickle')
@classmethod
def get_log_files(cls):
return sorted([os.path.join(cls.get_log_directory(), f) for f in dircache.listdir(cls.get_log_directory()) if f.endswith('.out.gz')],
key=lambda k: int(os.path.basename(k)[:-len('.out.gz')]))
@classmethod
def get_data_files(cls):
return sorted([os.path.join(cls.get_data_directory(), f) for f in dircache.listdir(cls.get_data_directory()) if f.endswith('.dat')],
key=lambda k: int(os.path.basename(k)[:-len('.dat')]))
@classmethod
def log_file_to_data_file(cls, path):
return os.path.join(cls.get_data_directory(), os.path.basename(path)[:-len('.out.gz')] + '.dat')
def __init__(self, tags, duplicates=False, verbose=False):
if os.environ.has_key('MOBISYS13_FILTER_PROCESSES'):
self.filter_processes = int(os.environ['MOBISYS13_FILTER_PROCESSES'])
else:
self.filter_processes = self.DEFAULT_FILTER_PROCESSES
self.tags = tags
self.pattern = re.compile(Logline.LOGLINE_PATTERN_STRING % ("|".join([r"""%s\s*""" % (tag,) for tag in self.TAGS]),), re.VERBOSE)
self.reset()
self.verbose = True
self.duplicates = duplicates
self.filtered = False
self.processed = False
try:
os.mkdir(self.get_data_directory())
except OSError:
pass
def reset(self):
self.devices = set([])
self.start_time = None
self.end_time = None
self.processed = False
def reset_filter(self):
self.labeled_count = 0
self.duplicate_count = 0
self.filtered = False
self.reset()
def summary(self):
return "%s: %s -> %s. %d lines (%d duplicates) from %d devices." % \
(self.__class__.__name__, self.start_time, self.end_time,
self.labeled_count, self.duplicate_count, len(self.devices))
def test_labels(self, line_count=10000):
line_set = set([])
labels = set([])
for line in gzip.open(self.get_log_files()[0], 'rb'):
m = self.pattern.match(line)
if m == None:
continue
if line in line_set:
continue
l = Logline(m, line)
label = self.label_line(l)
if label == None:
continue
print label, line.strip()
line_set.add(line)
labels.add(label)
if len(line_set) >= line_count:
break
print labels
def filter(self, refilter=False):
if self.filtered and not refilter:
return
log_files = self.get_log_files()
pool_size = min(self.filter_processes, len(log_files))
pool = Pool(processes=pool_size)
if self.verbose:
print >>sys.stderr, "%s: Filtering with %d processes." % (self.__class__.__name__, pool_size)
data_files = [self.log_file_to_data_file(f) for f in log_files]
results = pool.map(do_filter_star, zip(log_files, data_files,
itertools.repeat(self.pattern),
itertools.repeat(self.label_line),
itertools.repeat(self.verbose),
itertools.repeat(self.duplicates),
itertools.repeat(self.__class__.__name__)))
pool.close()
pool.join()
self.labeled_count = sum([r[0] for r in results])
self.duplicate_count = sum([r[1] for r in results])
self.filtered = True
self.processed = False
self.store()
def process_loop(self):
if self.processed:
return
if not self.filtered:
self.filter()
for data_file in self.get_data_files():
if self.verbose:
print >>sys.stderr, "Processing %s" % (data_file,)
lines = cPickle.load(open(data_file, 'rb'))
for line in lines:
self.devices.add(line.device)
if self.start_time == None:
self.start_time = line.datetime
self.end_time = line.datetime
self.process_line(line)
self.processed = True
def do_filter_star(l_d_p_l_v):
return do_filter(*l_d_p_l_v)
def do_filter(log_file, data_file, pattern, label_line, verbose, duplicates, name):
if verbose:
print >>sys.stderr, "%s: filtering %s" % (name, log_file,)
lines = []
device_lines = {}
count = 0
duplicate_count = 0
log_f = gzip.open(log_file, 'rb')
for line in log_f:
m = pattern.match(line)
if m == None:
continue
l = Logline(m, line)
if not duplicates and device_lines.has_key(l.device):
if device_lines[l.device].datetime == l.datetime and \
device_lines[l.device].log_message == l.log_message:
duplicate_count += 1
continue
device_lines[l.device] = l
label = label_line(l)
if label == None:
continue
count += 1
l.label = label
lines.append(l)
log_f.close()
if not duplicates and verbose:
print >>sys.stderr, "%s: %d duplicates, %d labeled." % (name, duplicate_count, count)
data_f = open(data_file, 'wb')
cPickle.dump(lines, data_f, cPickle.HIGHEST_PROTOCOL)
data_f.close()
del(data_f)
del(lines)
return count, duplicate_count
|
import math
class Circle(object):
def area(self,radius):
return math.pi*radius**2
def circumference(self,radius):
return 2* math.pi * radius
c = Circle()
c.area(3)
c.circumference(5)
|
nome = 'jessica'
message = "Alô " + nome.title() + ", voce gostaria de aprender um pouco de Python Hoje?"
famoso = 'Albert Einstein'
message2 = famoso
print (message)
nome = " Jessica "
x = nome.lstrip()
y = nome.rstrip()
z = nome.strip()
print(x + z + y)
print(nome.lower())
print(nome.upper())
print(nome.title())
print(famoso + " certa vez disse:\n\t 'Uma pessoa que nunca cometeu um erro jamais tentou nada novo.'")
|
from libra.ledger_info import LedgerInfo
from libra.validator_verifier import VerifyError
from libra.hasher import *
from libra.proof import verify_transaction_list
from libra.proof.signed_transaction_with_proof import SignedTransactionWithProof
from libra.proof.account_state_with_proof import AccountStateWithProof
from libra.proof.event_with_proof import EventWithProof
from libra.transaction import SignedTransaction, TransactionInfo
from libra.account_address import Address
from libra.proof import ensure, bail
from libra.account_resource import AccountResource
import canoser
def verify(validator_verifier, request, response):
verify_update_to_latest_ledger_response(
validator_verifier,
request.client_known_version,
request.requested_items,
response.response_items,
response.ledger_info_with_sigs
)
def verify_update_to_latest_ledger_response(
validator_verifier,
req_client_known_version,
requested_items,
response_items,
ledger_info_with_sigs
):
ledger_info_proto = ledger_info_with_sigs.ledger_info
ledger_info = LedgerInfo.from_proto(ledger_info_proto)
signatures = ledger_info_with_sigs.signatures
if ledger_info.version < req_client_known_version:
raise VerifyError(f"ledger_info.version:{ledger_info.version} < {req_client_known_version}.")
if ledger_info.version > 0 or signatures.__len__() > 0:
validator_verifier.batch_verify_aggregated_signature(ledger_info.hash(), signatures)
if len(response_items) != len(requested_items):
raise VerifyError(f"{len(response_items)} != {len(requested_items)}")
for req_item, resp_item in zip(requested_items, response_items):
verify_response_item(ledger_info, req_item, resp_item)
def verify_response_item(ledger_info, requested_item, response_item):
req_type = requested_item.WhichOneof('requested_items')
if not req_type.endswith("_request"):
raise VerifyError(f"RequestItem type unknown{req_type}.")
resp_type = req_type.replace("_request", "_response")
resp_type2 = response_item.WhichOneof('response_items')
if resp_type != resp_type2:
raise VerifyError(f"RequestItem/ResponseItem types mismatch:{resp_type} - {resp_type2}.")
if resp_type == "get_account_state_response":
asp = response_item.get_account_state_response.account_state_with_proof
AccountStateWithProof.verify(asp, ledger_info, ledger_info.version,
requested_item.get_account_state_request.address)
elif resp_type == "get_account_transaction_by_sequence_number_response":
atreq = requested_item.get_account_transaction_by_sequence_number_request
atresp = response_item.get_account_transaction_by_sequence_number_response
verify_get_txn_by_seq_num_resp(
ledger_info,
atreq.account,
atreq.sequence_number,
atreq.fetch_events,
atresp.signed_transaction_with_proof,
atresp.proof_of_current_sequence_number
)
elif resp_type == "get_events_by_event_access_path_response":
ereq = requested_item.get_events_by_event_access_path_request
eresp = response_item.get_events_by_event_access_path_response
verify_get_events_by_access_path_resp(
ledger_info,
ereq.access_path,
ereq.start_event_seq_num,
ereq.ascending,
ereq.limit,
eresp.events_with_proof,
eresp.proof_of_latest_event
)
elif resp_type == "get_transactions_response":
req = requested_item.get_transactions_request
ver = req.start_version
limit = req.limit
fetch_events = req.fetch_events
txp = response_item.get_transactions_response.txn_list_with_proof
verify_get_txns_resp(ledger_info, ver, limit, fetch_events, txp)
else:
raise VerifyError(f"unknown response type:{resp_type}")
def verify_get_txn_by_seq_num_resp(
ledger_info,
account,
sequence_number,
fetch_events,
signed_transaction_with_proof,
proof_of_current_sequence_number
):
has_stx = len(signed_transaction_with_proof.__str__()) > 0
has_cur = len(proof_of_current_sequence_number.__str__()) > 0
if has_stx and not has_cur:
ensure(
fetch_events == signed_transaction_with_proof.HasField("events"),
"Bad GetAccountTxnBySeqNum response. Events requested: {}, events returned: {}.",
fetch_events,
signed_transaction_with_proof.HasField("events")
)
SignedTransactionWithProof.verify(
signed_transaction_with_proof,
ledger_info,
signed_transaction_with_proof.version,
account,
sequence_number
)
elif has_cur and not has_stx:
sequence_number_in_ledger = AccountResource.get_account_resource_or_default(
proof_of_current_sequence_number.blob).sequence_number
ensure(
sequence_number_in_ledger <= sequence_number,
"Server returned no transactions while it should. Seq num requested: {}, latest seq num in ledger: {}.",
sequence_number,
sequence_number_in_ledger
)
AccountStateWithProof.verify(proof_of_current_sequence_number, ledger_info,
ledger_info.version, account)
else:
bail(
"Bad GetAccountTxnBySeqNum response. txn_proof.is_none():{}, cur_seq_num_proof.is_none():{}",
has_stx,
has_cur
)
def verify_get_events_by_access_path_resp(
ledger_info,
req_access_path,
req_start_seq_num,
req_ascending,
req_limit,
events_with_proof,
proof_of_latest_event,
):
account_resource = AccountResource.get_account_resource_or_default(proof_of_latest_event.blob)
AccountStateWithProof.verify(proof_of_latest_event, ledger_info, ledger_info.version,
req_access_path.address)
event_handle = account_resource.get_event_handle_by_query_path(req_access_path.path)
expected_event_key = event_handle.key
expected_seq_nums = gen_events_resp_idxs(event_handle.count,
req_start_seq_num, req_ascending, req_limit)
ensure(
len(expected_seq_nums) == len(events_with_proof),
"Expecting {} events, got {}.",
len(expected_seq_nums),
len(events_with_proof)
)
zipped = zip(events_with_proof, expected_seq_nums)
for event_with_proof, seq_num in zipped:
EventWithProof.verify(
event_with_proof,
ledger_info,
expected_event_key,
seq_num,
event_with_proof.transaction_version,
event_with_proof.event_index
)
def gen_events_resp_idxs(seq_num_upper_bound, req_start_seq_num, req_ascending, req_limit):
if not req_ascending and req_start_seq_num == canoser.Uint64.max_value and seq_num_upper_bound > 0:
cursor = seq_num_upper_bound - 1
else:
cursor = req_start_seq_num
if cursor >= seq_num_upper_bound:
return [] #Unreachable, so empty.
elif req_ascending:
#Ascending, from start to upper bound or limit.
realupper = min(cursor + req_limit, seq_num_upper_bound)
return [x for x in range(cursor, realupper)]
elif cursor + 1 < req_limit:
return [x for x in range(cursor, -1, -1)] # Descending and hitting 0.
else:
bottom = cursor + 1 - req_limit
return [x for x in range(cursor, bottom-1, -1)] #Descending and hitting limit.
def verify_get_txns_resp(ledger_info, start_version, limit, fetch_events, txn_list_with_proof):
if limit == 0 or start_version > ledger_info.version:
if txn_list_with_proof.SerializeToString() != b'':
raise VerifyError(f"transactions should be empty.")
return
if fetch_events != txn_list_with_proof.HasField("events_for_versions"):
raise VerifyError(f"fetch_events: {fetch_events} mismatch with events_for_versions")
num_txns = len(txn_list_with_proof.transactions)
ret_num = min(limit, ledger_info.version - start_version + 1)
if num_txns != ret_num:
raise VerifyError(f"transaction number expected:{ret_num}, returned:{num_txns}.")
verify_start_version(txn_list_with_proof, start_version)
verify_transaction_list(txn_list_with_proof, ledger_info)
def verify_start_version(txn_list_with_proof, start_version):
ver = txn_list_with_proof.first_transaction_version.value
if ver != start_version:
raise VerifyError(f"transaction version mismatch:{start_version}, returned:{ver}.")
|
'''15. Write a Python program to filter a list of integers using Lambda. '''
nums = [1, 2, 3, 4, 5]
print(nums)
print("\nEven number:")
even = list(filter(lambda x: x%2 == 0, nums))
print(even)
print("\nOdd number:")
odd = list(filter(lambda x: x%2 != 0, nums))
print(odd)
|
#!/usr/bin/python
import datetime
import time
def onPageLoad(paramstr):
now = datetime.datetime.now()
return "Current Time: " + now.strftime("%c")
|
import os
from os.path import join
# To use the code you should change hard_coded_out_dir and hard_coded_code_dir
# to fit your system
def which_computer():
"""
Detect if we are working on Iain's laptop or on the cluster
"""
cwd = os.getcwd()
if 'iaincarmichael' in cwd:
return 'iain_laptop'
elif 'idc9' in cwd:
return 'bayes'
else:
return None
# raise ValueError('Not sure which comptuer we are on!')
if which_computer() == 'iain_laptop':
# where all the simulation data is saved
hard_coded_out_dir = '/Users/iaincarmichael/Dropbox/Research/mvmm/public_release/'
# directory containing the simulation scripts
hard_coded_code_dir = '/Users/iaincarmichael/Dropbox/Research/local_packages/python/mvmm_sim'
elif which_computer() == 'bayes':
hard_coded_out_dir = '/home/guests/idc9/projects/mvmm/public_release'
hard_coded_code_dir = '/home/guests/idc9/local_packages/mvmm_sim'
class Paths(object):
def __init__(self, out_dir=hard_coded_out_dir,
code_dir=hard_coded_code_dir):
self.out_dir = out_dir
self.code_dir = code_dir
# self.out_dir = '/Users/iaincarmichael/Dropbox/Research/mvmm/public_release/'
# self.code_dir = '/Users/iaincarmichael/Dropbox/Research/local_packages/python/mvmm_sim'
if which_computer() == 'bayes':
self.home_dir = '/home/guests/idc9'
# self.out_dir = join(self.home_dir, 'projects/mvmm/public_release/')
# self.code_dir = join(self.home_dir, 'local_packages/mvmm_sim')
# where to save the cluster printout
self.cluster_out_dir = join(self.home_dir, 'cluster_out')
self.results_dir = join(self.out_dir, 'results')
self.out_data_dir = join(self.out_dir, 'out_data')
self.sim_scripts_dir = join(self.code_dir, 'simulation_scripts')
def make_dirs(self):
to_make = [self.out_data_dir, self.results_dir]
for folder in to_make:
os.makedirs(to_make, exist_ok=True)
|
import tmdb_api
from pprint import pprint
import os
import cv2
import scenedetect
from scenedetect.video_manager import VideoManager
from scenedetect.scene_manager import SceneManager
from scenedetect.frame_timecode import FrameTimecode
from scenedetect.stats_manager import StatsManager
from scenedetect.detectors import ContentDetector
from database import Database
import shutil
import face_recognition
# stage1:
# get movie name (from user) and video file
# get cast and movie info
# open video file
# segment into shot
# store in db
# stage2:
# find actors in each shot
# store actors in each shot to db
want_face_rec = True
db = Database()
movie_name = '1917'
video_path = "videos/1917_1.mkv"
def main():
movie_info = tmdb_api.getMovie(movie_name)
cast = tmdb_api.getCast(movie_name)
# pprint(movie_info)
# pprint(cast[0])
# pprint(tmdb_api.getCastInfo(cast[0]))
# for c in cast:
# img = tmdb_api.getCastImage(c)
# info = tmdb_api.getCastInfo(c)
# pprint(info)
# adding actors into db
print('getting cast...')
for actor in cast:
actor_data = {
'name': actor['name'],
'gender': actor['gender'],
'profile_path': actor['profile_path'],
# 'image': tmdb_api.getCastImage(actor),
# 'biography': tmdb_api.getCastInfo(actor)['biography'],
'biography': ''
}
db.actors_insertone_unique(actor_data)
db.actors_createindex()
if db.movies_findone({
'movie_name': movie_info['original_title'],
'video_path': video_path,
}).count() <= 0:
print('processing video to shots...')
video_manager = VideoManager([video_path])
stats_manager = StatsManager()
scene_manager = SceneManager(stats_manager)
scene_manager.add_detector(ContentDetector(threshold=20.0))
base_timecode = video_manager.get_base_timecode()
video_manager.set_downscale_factor(2)
video_manager.start()
scene_manager.detect_scenes(frame_source=video_manager, show_progress=True)
shot_list = scene_manager.get_scene_list(base_timecode)
# adding shots into db
print('adding shots to db...')
shot_data = {
'movie_name': movie_info['original_title'],
'video_path': video_path,
'framerate': video_manager.get_framerate(),
'encoding': video_path.split('.')[-1]
}
shot_list_data = []
for shot in shot_list:
shot_list_data.append({
'start_frame' : shot[0].get_frames(),
'end_frame' : shot[1].get_frames(),
'start_time' : shot[0].get_seconds(),
'end_time' : shot[1].get_seconds(),
'start_timestamp' : shot[0].get_timecode(),
'end_timestamp' : shot[1].get_timecode(),
'actors_in_shot' : []
}
)
shot_data['shot_list'] = shot_list_data
shots_data_id = db.shots_insertone_unique(shot_data).inserted_id
# adding movies into db
print('adding movie to db...')
movie_data = {
'movie_name': movie_info['original_title'],
'video_path': video_path,
'overview': movie_info['overview'],
'genres': [genre['name'] for genre in movie_info['genres']],
'poster_path': movie_info['poster_path'],
'shots_data': shots_data_id
}
db.movies_insertone_unique(movie_data)
db.movies_createindex()
if want_face_rec:
print('starting face recog and identification...')
# do face recog
# get movie and video file name, get actor id and profile_path from db
# get all images of all actors needed from tmdb
# load images of actors into face recog
# load video file
# for each frame
# get faces recognised there
# find out which shot it belongs to
# insert the ids of the same actors into the shots collection array
known_faces_names = []
known_faces_encodings = []
for actor in cast:
name = actor['name']
path = './cast/' + name + '.png'
if os.path.exists(path):
print('found ' + path)
img = face_recognition.load_image_file(path)
print('encoding ' + name)
encodings = face_recognition.face_encodings(img)
if len(encodings) > 0:
known_faces_encodings.append(encodings[0])
known_faces_names.append(name)
else:
print('failed encoding ' + name)
else:
tmdb_api.saveCastImage(actor)
print('getting actor image from tmdb ' + path)
if os.path.exists(path):
img = face_recognition.load_image_file(path)
print('encoding ' + name)
encodings = face_recognition.face_encodings(img)
if len(encodings) > 0:
known_faces_encodings.append(encodings[0])
known_faces_names.append(name)
else:
print('failed encoding ' + name)
# load vid file and face recog
input_movie = cv2.VideoCapture(video_path)
input_fps = input_movie.get(cv2.CAP_PROP_FPS)
input_width = int(input_movie.get(cv2.CAP_PROP_FRAME_WIDTH))
input_height = int(input_movie.get(cv2.CAP_PROP_FRAME_HEIGHT))
output_resolution = (int(input_width / 2.0),int(input_height / 2.0))
length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))
# Create an output movie file (make sure resolution/frame rate matches input video!)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output_movie = cv2.VideoWriter('videos/outputs/output_' + video_path.split('/')[-1] + '.avi', fourcc, input_fps, output_resolution)
frame_number = 0
while True:
# Grab a single frame of video
ret, frame = input_movie.read()
frame_number += 1
# Quit when the input video file ends
if not ret:
break
print('frame ' + str(frame_number) + '/' + str(length))
frame = cv2.resize(frame,output_resolution,fx=0,fy=0, interpolation=cv2.INTER_CUBIC)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
match = face_recognition.compare_faces(known_faces_encodings, face_encoding, tolerance=0.50)
for i in range(len(match)):
if match[i]:
known_name = known_faces_names[i]
print('found ' + known_name)
actor_id = db.actors_findone({'name': known_name})[0]['_id']
###### draw rectanglesin frame ######
print('locations: ', face_locations)
for location in face_locations:
(top, right, bottom, left) = location
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 25), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, known_name, (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1)
###########
# find out which shot it belongs to
# insert into that one
for j in range(len(shot_list_data)):
if frame_number >= shot_list_data[j]['start_frame'] and frame_number < shot_list_data[j]['end_frame']:
if actor_id not in shot_list_data[j]['actors_in_shot']:
shot_list_data[j]['actors_in_shot'].append(actor_id)
output_movie.write(frame)
print('updating shots db with actor list...')
shot_data['shot_list'] = shot_list_data
db.shots_updateone(shots_data_id, shot_data)
pass
print('DONE FACEREC!!')
else:
print('movie already in database !!')
return
if __name__ == "__main__":
main()
"""
SHEMEA ???
movie : {
_id,
original_title => movie_name,
video_name,
overview => description,
genres => tags
poster_path => poster_path
shots_data: _id
}
shots: {
_id,
movie_name,
frame_rate,
encoding,
shots_list : [
{
actors_in_shot: [ids],
start_frame: ,
end_frame ,
start_time: ,
end_time: ,
},
{
actors_in_shot: [ids],
start_frame: ,
end_frame ,
start_time: ,
end_time: ,
start_timestamp: ,
end_timestamp: ,
}
]
}
actors: {
_id: ,
name: ,
gender: ,
profile_path: ,
biography: ,
5edcacd309b3425e55f9e788
5edcacd409b3425e55f9e7c9
5edcacd309b3425e55f9e78c
5edcacd309b3425e55f9e7b1
}
""" |
#!/usr/bin/env python
#start zap daemon
# encoding=utf8
# -*- coding: utf-8 -*-
import os
import subprocess
import time
from pprint import pprint
from zapv2 import ZAPv2
from shutil import copyfile
print 'Starting ZAP ...'
subprocess.Popen(['/Applications/OWASP ZAP.app/Contents/Java/zap.sh','-daemon'],stdout=open(os.devnull,'w'))
print 'Waiting for ZAP to load, 10 seconds ...'
time.sleep(10)
# Here the target is defined and an instance of ZAP is created.
target = "http://localhost"
zap = ZAPv2()
# Use the line below if ZAP is not listening on 8090.
zap = ZAPv2(proxies={'http': 'http://127.0.0.1:8090', 'https': 'http://127.0.0.1:8090'})
# ZAP starts accessing the target.
print 'Accessing target %s' % target
zap.urlopen(target)
time.sleep(2)
# The spider starts crawling the website for URLs
print 'Spidering target %s' % target
zap.spider.scan(target)
# Progress of spider
time.sleep(2)
time.sleep(200)
print 'Spider completed'
# Give the passive scanner a chance to finish
time.sleep(5)
# The active scanning starts
print 'Scanning target %s' % target
zap.ascan.scan(target)
time.sleep(300)
print 'Scan completed'
# Report the results
print 'Hosts: ' + ', '.join(zap.core.hosts)
#print 'Alerts: '
#pprint(zap.core.alerts())
#export report to XML
#with open("zap_report.xml", 'w') as f:
# f.write(zap.core.xmlreport())
report_type = 'xml'
report_file = 'results/zap_report.xml'
with open(report_file, 'a') as f:
xml = zap.core.xmlreport()
f.write(xml)
print('Success: {1} report saved to {0}'.format(report_file, report_type.upper()))
#os.system("mv zap_report.xml results/")
#
# To close ZAP:
zap.core.shutdown()
|
#1
#v 0.001
def _ERROR(Message,Function):
import sys,traceback
i=sys.exc_info();T=traceback.extract_tb(i[2])[0]
print '-----'
print 'Recall: '+Function
print
print 'File: '+T[0].split('\\')[-1]+', line '+str(T[1])
print "Code: '"+T[3]+"'"
print traceback.format_exception_only(i[0], i[1])[0]
print Message
print '-----'
#other errors should be thrown before this is reached
global maxInstanceCount; maxInstanceCount = 1000000
def IncreaseRange(): #increase safety
global maxInstanceCount; maxInstanceCount = 100000000
def DecreaseRange(): #increase speed
global maxInstanceCount; maxInstanceCount = 10000
IWLDInstanceCount=0
def IWLD(Bool): #detect infinite while loop
global IWLDInstanceCount,maxInstanceCount
if not Bool: IWLDInstanceCount=0; maxInstanceCount=1000000; return False
elif IWLDInstanceCount > maxInstanceCount:
IWLDInstanceCount=0; maxInstanceCount=1000000; return False #stop while loop
else: IWLDInstanceCount+=1; return True
def ResetIWLD(): pass #will be removed
IFLDInstanceCount=0 #detect infinite function loop
def IFLD(): global IFLDInstanceCount
def ResetIFLD(): global IFLDInstanceCount; IFLDInstanceCount=0
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import bs4
import time
un="Redacted"
pw="Redacted"
# initialize browser, --incognito for cache/cookies
option = webdriver.ChromeOptions()
option.add_argument("— incognito")
# replace 'C:/bin/chromedriver.exe' to where chromedriver is installed on your system
driver= webdriver.Chrome(executable_path='C:/bin/chromedriver.exe', options=option)
driver.get("https://www.linkedin.com/")
time.sleep(2)
# log in
driver.find_element_by_xpath("/html/body/nav/section[2]/form/div[1]/div[1]/input").click()
time.sleep(1)
driver.find_element_by_xpath("/html/body/nav/section[2]/form/div[1]/div[1]/input").send_keys(un)
driver.find_element_by_xpath("/html/body/nav/section[2]/form/div[1]/div[2]/input").click()
time.sleep(1)
driver.find_element_by_xpath("/html/body/nav/section[2]/form/div[1]/div[2]/input").send_keys(pw)
driver.find_element_by_xpath("/html/body/nav/section[2]/form/div[2]/button").click()
time.sleep(3)
# search people
person = "Tuan Dau"
driver.get("https://www.google.com")
search_query = driver.find_element_by_xpath('//*[@id="tsf"]/div[2]/div[1]/div[1]/div/div[2]/input')
search_query.send_keys('site:linkedin.com/in/ AND "York University" ' + person)
search_query.send_keys(Keys.RETURN)
|
from zipfile import ZipFile
zip = ZipFile('ch6.zip', 'r');
print(zip.getinfo('90052.txt').comment);
pathPrefix = 'ch6/';
fileType = '.txt';
filename = "90052";
fileContents = open(pathPrefix + filename + fileType, "r").read();
filename = fileContents[fileContents.index('is') + 3:];
print(str(zip.getinfo(filename + fileType).comment)[2], end=""); # print the first character
while 'nothing is' in fileContents:
fileContents = open(pathPrefix + filename + fileType, "r").read();
if('comments' in fileContents):
break;
filename = fileContents[fileContents.index('is') + 3:];
output = str(zip.getinfo(filename + fileType).comment);
output = output[2:output.index('\'', 2)] # strip quotation marks
if output == '\\n':
print();
else:
print(output, end='');
|
# This is the file you'll use to submit most of Lab 0.
# Certain problems may ask you to modify other files to accomplish a certain
# task. There are also various other files that make the problem set work, and
# generally you will _not_ be expected to modify or even understand this code.
# Don't get bogged down with unnecessary work.
# Section 1: Problem set logistics ___________________________________________
# This is a multiple choice question. You answer by replacing
# the symbol 'fill-me-in' with a number, corresponding to your answer.
# You get to check multiple choice answers using the tester before you
# submit them! So there's no reason to worry about getting them wrong.
# Often, multiple-choice questions will be intended to make sure you have the
# right ideas going into the problem set. Run the tester right after you
# answer them, so that you can make sure you have the right answers.
# What version of Python do we *recommend* (not "require") for this course?
# 1. Python v2.3
# 2. Python v2.5 or Python v2.6
# 3. Python v3.0
# Fill in your answer in the next line of code ("1", "2", or "3"):
ANSWER_1 = '2'
# Section 2: Programming warmup _____________________________________________
# Problem 2.1: Warm-Up Stretch
def cube(x):
return x * x * x
def factorial(x):
if (x < 0) or (not isinstance(x, int)):
raise Exception, "factorial: input must not be negative!"
elif (x == 0):
return 1
else:
out = 1
for i in xrange(2, x+1):
out = out * i
return out
def count_pattern(pattern, lst):
if len(pattern) == 0:
return 0
out = 0
i = 0
for c in lst:
if c == pattern[ i ]:
i += 1
else:
i = 0
if i == len(pattern):
out += 1
i = 0
return out
# Problem 2.2: Expression depth
def depth(expr):
# if not isinstance(expr, (tuple, list)):
# reutn 0
# return max(map(depth, expr)) + 1
coutn = 0
if not isinstance(expr, (tuple, list)):
return 0
else:
count = 1
count1 = 0
for e in expr:
count1 = 1 + depth(e)
count = max(count1, count)
return count
# Problem 2.3: Tree indexing
def tree_ref(tree, index):
if (len(index) == 1):
return tree[index[0]]
else:
return tree_ref( tree[index[0]], index[1:] )
# Section 3: Symbolic algebra
# Your solution to this problem doesn't go in this file.
# Instead, you need to modify 'algebra.py' to complete the distributer.
from algebra import Sum, Product, simplify_if_possible
from algebra_utils import distribution, encode_sumprod, decode_sumprod
# Section 4: Survey _________________________________________________________
# Please answer these questions inside the double quotes.
# When did you take 6.01?
WHEN_DID_YOU_TAKE_601 = ""
# How many hours did you spend per 6.01 lab?
HOURS_PER_601_LAB = ""
# How well did you learn 6.01?
HOW_WELL_I_LEARNED_601 = ""
# How many hours did this lab take?
HOURS = ""
|
from django.conf.urls import include, url
#from . import chat.views
import views
urlpatterns = [
url(r'^$', views.about, name='about'),
url(r'^new/$', views.new_discussion, name='new_discussion'),
url(r'^(?P<label>[\w-]{,50})/$', views.discussion_forum, name='discussion_forum'),
]
|
'''
1 获取2019年4月之前的新闻链接
2 存入csv
'''
import time
import sys
import os
import pymysql
from pymysql import Error
import requests
from multiprocessing import Pool
from bs4 import BeautifulSoup
from pandas.core.frame import DataFrame
#获取滚动页面的url
def get_url(date):
url = 'http://www.chinanews.com/scroll-news/' + date +'/news.shtml'
res = requests.get(url)
res.encoding='GBK' # html: ISO-8859-1 (2012)
soup = BeautifulSoup(res.text, 'html.parser')
li_tag = soup.find('div','content_list').find_all('li')
category_list = []
title_list = []
url_list = []
for li in li_tag:
try:
info = li.find_all('a')
category = info[0].text
if category in ['军事','娱乐','台湾','汽车','教育','健康']:
category_list.append(category)
news_title = info[1].text
title_list.append(news_title)
news_url = 'http://www.chinanews.com'+str(info[1].get('href'))
url_list.append(news_url)
print("have done!"+ news_title+":"+news_url)
except:
continue
print()
c = {'类别':category_list,
'标题':title_list,
'url':url_list
}
data=DataFrame(c)
root = ".//newsCollection//"
path = root + "chinanews00.csv"
try:
if not os.path.exists(root):
os.mkdir(root)
print('mkdir success')
data.to_csv(path, mode='a')
except IOError:
print('sorry, write failed')
else:
print("---chinanews01.csv have been added---")
def get_date():
dates = []
year = 2012
for month in range(6,13):
if month in [1,3,5,7,8,10,12]:
for day in range(1,32):
a = str(year)+'/'+str(month).zfill(2)+str(day).zfill(2)
dates.append(a)
elif month in [4,6,9,11]:
for day in range(1,31):
a = str(year)+'/'+str(month).zfill(2)+str(day).zfill(2)
dates.append(a)
return dates
def main():
pool = Pool(8) #create class of Processing Pool
dates = get_date()
res_list = []
for date in dates:
res = pool.apply_async(func=get_url, args=(date,))
res_list.append(res)
record_list = []
count = 0
for res in res_list:
count = count + 1
try:
result = res.get() #执行
print('第'+ str(count) + '页链接获取成功')
except:
print('第'+ str(count) + '页链接获取失败,正在尝试下一页')
continue
record_list.append(result)
pool.close()
pool.join() #Wait for all programs stopping and close pools
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
"""MRI pulse-design-specific linear operators.
"""
import sigpy as sp
from sigpy import backend
def PtxSpatialExplicit(sens, coord, dt, img_shape, b0=None, ret_array=False):
"""Explicit spatial-domain pulse design linear operator.
Linear operator relates rf pulses to desired magnetization.
Equivalent matrix has dimensions [Ns Nt].
Args:
sens (array): sensitivity maps. [nc dim dim]
coord (None or array): coordinates. [nt 2]
dt (float): hardware sampling dt.
img_shape (None or tuple): image shape.
b0 (array): 2D array, B0 inhomogeneity map.
ret_array (bool): if true, return explicit numpy array.
Else return linop.
Returns:
SigPy linop with A.repr_string 'pTx spatial explicit', or numpy array
if selected with 'ret_array'
References:
Grissom, W., Yip, C., Zhang, Z., Stenger, V. A., Fessler, J. A.
& Noll, D. C.(2006).
Spatial Domain Method for the Design of RF Pulses in Multicoil
Parallel Excitation. Magnetic resonance in medicine, 56, 620-629.
"""
three_d = False
if len(img_shape) >= 3:
three_d = True
device = backend.get_device(sens)
xp = device.xp
with device:
nc = sens.shape[0]
dur = dt * coord.shape[0] # duration of pulse, in s
# create time vector
t = xp.expand_dims(xp.linspace(0, dur, coord.shape[0]), axis=1)
# row-major order
# x L to R, y T to B
x_ = xp.linspace(
-img_shape[0] / 2, img_shape[0] - img_shape[0] / 2, img_shape[0]
)
y_ = xp.linspace(
img_shape[1] / 2, -(img_shape[1] - img_shape[1] / 2), img_shape[1]
)
if three_d:
z_ = xp.linspace(
-img_shape[2] / 2,
img_shape[2] - img_shape[2] / 2,
img_shape[2],
)
x, y, z = xp.meshgrid(x_, y_, z_, indexing="ij")
else:
x, y = xp.meshgrid(x_, y_, indexing="ij")
# create explicit Ns * Nt system matrix, for 3d or 2d problem
if three_d:
if b0 is None:
AExplicit = xp.exp(
1j
* (
xp.outer(x.flatten(), coord[:, 0])
+ xp.outer(y.flatten(), coord[:, 1])
+ xp.outer(z.flatten(), coord[:, 2])
)
)
else:
AExplicit = xp.exp(
1j * 2 * xp.pi * xp.transpose(b0.flatten() * (t - dur))
+ 1j
* (
xp.outer(x.flatten(), coord[:, 0])
+ xp.outer(y.flatten(), coord[:, 1])
+ xp.outer(z.flatten(), coord[:, 2])
)
)
else:
if b0 is None:
AExplicit = xp.exp(
1j
* (
xp.outer(x.flatten(), coord[:, 0])
+ xp.outer(y.flatten(), coord[:, 1])
)
)
else:
AExplicit = xp.exp(
1j * 2 * xp.pi * xp.transpose(b0.flatten() * (t - dur))
+ 1j
* (
xp.outer(x.flatten(), coord[:, 0])
+ xp.outer(y.flatten(), coord[:, 1])
)
)
# add sensitivities to system matrix
AFullExplicit = xp.empty(AExplicit.shape)
for ii in range(nc):
if three_d:
tmp = xp.squeeze(sens[ii, :, :, :]).flatten()
else:
tmp = sens[ii, :, :].flatten()
D = xp.transpose(xp.tile(tmp, [coord.shape[0], 1]))
AFullExplicit = xp.concatenate(
(AFullExplicit, D * AExplicit), axis=1
)
# remove 1st empty AExplicit entries
AFullExplicit = AFullExplicit[:, coord.shape[0] :]
A = sp.linop.MatMul((coord.shape[0] * nc, 1), AFullExplicit)
# Finally, adjustment of input/output dimensions to be consistent with
# the existing Sense linop operator. [nc x nt] in, [dim x dim] out
Ro = sp.linop.Reshape(ishape=A.oshape, oshape=sens.shape[1:])
Ri = sp.linop.Reshape(
ishape=(nc, coord.shape[0]), oshape=(coord.shape[0] * nc, 1)
)
A = Ro * A * Ri
A.repr_str = "pTx spatial explicit"
# output a sigpy linop or a numpy array
if ret_array:
return A.linops[1].mat
else:
return A
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.lista_personas, name='lista_personas'),
path('persona/nueva', views.persona_nueva, name='persona_nueva'),
path('tarjetas', views.lista_tarjetas, name='lista_tarjetas'),
path('tarjetas_con_plata', views.tarjetas_con_plata, name='tarjetas_con_plata'),
] |
import csv
num_list = []
for i in range(0, 49):
i += 1
num_list.append(i)
neue = []
for i in range(0, 49):
for j in range(0, 49):
for k in range(0, 49):
for x in range(0, 49):
for y in range(0, 49):
for z in range(0, 49):
if(i != j and j != k and k != i and i != x and j != x and k != x and i != y and j !=y and k != y and x != y and z != y and z != j and z !=i and z != k and z != x):
print("{},{},{},{},{},{}".format(
num_list[i], num_list[j], num_list[k], num_list[x], num_list[y], num_list[z]))
asd = [num_list[i], num_list[j], num_list[k], num_list[x], num_list[y], num_list[z]]
neue.append(asd)
with open('Lotto_Kombis.csv', 'w+', newline="") as csvfile:
writer = csv.writer(csvfile)
for list in neue:
writer.writerow(list)
|
import logging
from furl import furl
from lxml import etree
from share.harvest import BaseHarvester
logger = logging.getLogger(__name__)
# TODO Could we use the OAI harvester instead, or is there something non-standard about NCAR?
class NCARHarvester(BaseHarvester):
VERSION = 1
namespaces = {
'OAI-PMH': 'http://www.openarchives.org/OAI/2.0/',
'dif': 'http://gcmd.gsfc.nasa.gov/Aboutus/xml/dif/'
}
url = 'https://www.earthsystemgrid.org/oai/repository'
def do_harvest(self, start_date, end_date):
url = furl(self.url).set(query_params={
'verb': 'ListRecords',
'metadataPrefix': 'dif',
'from': start_date.format('YYYY-MM-DD') + 'T00:00:00Z',
'until': end_date.format('YYYY-MM-DD') + 'T00:00:00Z',
})
return self.fetch_records(url)
def fetch_records(self, url):
records, token = self.fetch_page(url, token=None)
while True:
for record in records:
yield (
record.xpath('./OAI-PMH:header/OAI-PMH:identifier/node()', namespaces=self.namespaces)[0],
etree.tostring(record),
)
records, token = self.fetch_page(url, token=token)
if not token or not records:
break
def fetch_page(self, url, token):
if token:
url.remove('from')
url.remove('until')
url.remove('metadataPrefix')
url.args['resumptionToken'] = token
logger.info('Making request to {}'.format(url))
resp = self.requests.get(url.url)
parsed = etree.fromstring(resp.content)
records = parsed.xpath('//OAI-PMH:record', namespaces=self.namespaces)
token = (parsed.xpath('//OAI-PMH:resumptionToken/node()', namespaces=self.namespaces) + [None])[0]
logger.info('Found {} records. Continuing with token {}'.format(len(records), token))
return records, token
|
"""
This module takes care of starting the API Server, Loading the DB and Adding the endpoints
"""
import os
from flask import Flask, request, jsonify, url_for
from werkzeug.wrappers import response
from flask_cors import CORS
from utils import APIException, generate_sitemap
from datastructures import FamilyStructure
import json
#from models import Person
app = Flask(__name__)
app.url_map.strict_slashes = False
CORS(app)
# create the jackson family object
jackson_family = FamilyStructure("Jackson")
# Handle/serialize errors like a JSON object
@app.errorhandler(APIException)
def handle_invalid_usage(error):
return jsonify(error.to_dict()), error.status_code
# generate sitemap with all your endpoints
@app.route('/')
def sitemap():
return generate_sitemap(app)
@app.route('/members', methods=['GET'])
def handle_hello():
# this is how you can use the Family datastructure by calling its methods
members = jackson_family.get_all_members()
response_body = members
if response_body:
return jsonify(response_body), 200
else:
msg={"msg": "Familia no encontrada"}
return jsonify(msg), 404
@app.route('/member', methods=['POST'])
def handle_family():
# this is how you can use the Family datastructure by calling its methods
member=json.loads(request.data)
if not member or member==None:
msg={"msg":"Información ingresada de manera incorrecta"}
return jsonify(msg),400
else:
family=jackson_family.add_member(member)
return jsonify(family),200
@app.route('/member/<int:member_id>', methods=['GET'])
def member_byid(member_id):
# this is how you can use the Family datastructure by calling its methods
member=jackson_family.get_member(member_id)
if member:
response_body = member
return jsonify(response_body), 200
else:
response_body={"msg": 'Miembro de familia no encontrado'}
return response_body,404
@app.route('/member/<int:member_id>', methods=['DELETE'])
def delete_member(member_id):
msg = jackson_family.delete_member(member_id)
if msg:
return jsonify(msg),200
else:
msg={"msg":"Id ingresado no coincide con ningún familiar"}
return jsonify(msg),400
@app.route('/member/<int:member_id>', methods=['PUT'])
def update_member(member_id):
member=json.loads(request.data)
msg=jackson_family.update_member(member,member_id)
if member:
return jsonify(msg),200
else:
msg={"msg":"Información ingresada de manera incorrecta"}
return jsonify(msg),400
# this only runs if `$ python src/app.py` is executed
if __name__ == '__main__':
PORT = int(os.environ.get('PORT', 3000))
app.run(host='0.0.0.0', port=PORT, debug=True)
|
from datetime import datetime
from dateutil.parser import isoparse
from functools import cached_property
from onegov.agency.collections import ExtendedPersonCollection
from onegov.agency.collections import PaginatedAgencyCollection
from onegov.agency.collections import PaginatedMembershipCollection
from onegov.api import ApiEndpoint, ApiInvalidParamException
from onegov.gis import Coordinates
UPDATE_FILTER_PARAMS = ['updated_gt', 'updated_lt', 'updated_eq',
'updated_ge', 'updated_le']
def filter_for_updated(filter_operation, filter_value, result):
"""
Applies filters for several 'updated' comparisons.
Refer to UPDATE_FILTER_PARAMS for all filter keywords.
:param filter_operation: the updated filter operation to be applied. For
allowed filters refer to UPDATE_FILTER_PARAMS
:param filter_value: the updated filter value to filter for
:param result: the results to apply the filters on
:return: filter result
"""
try:
# only parse including hours and minutes
ts = isoparse(filter_value[:16])
except Exception as ex:
raise ApiInvalidParamException(f'Invalid iso timestamp for parameter'
f'\'{filter_operation}\': {ex}') from ex
return result.for_filter(**{filter_operation: ts})
class ApisMixin:
@cached_property
def agency_api(self):
return AgencyApiEndpoint(self.app)
@cached_property
def person_api(self):
return PersonApiEndpoint(self.app)
@cached_property
def membership_api(self):
return MembershipApiEndpoint(self.app)
def get_geo_location(item):
geo = item.content.get('coordinates', Coordinates()) or Coordinates()
return dict(lon=geo.lon, lat=geo.lat, zoom=geo.zoom)
def get_modified_iso_format(item):
"""
Returns the iso format of the modified or created field of item.
:param item: db item e.g. agency, people, membership
:return: str iso representation of item last modification
"""
return item.modified.isoformat() if isinstance(
item.modified, datetime) else item.created.isoformat()
class PersonApiEndpoint(ApiEndpoint, ApisMixin):
endpoint = 'people'
filters: list[str] = []
@property
def collection(self):
result = ExtendedPersonCollection(
self.session,
page=self.page or 0
)
for key, value in self.extra_parameters.items():
valid_params = self.filters + ['first_name',
'last_name'] + UPDATE_FILTER_PARAMS
if key not in valid_params:
raise ApiInvalidParamException(
f'Invalid url parameter \'{key}\'. Valid params are: '
f'{valid_params}')
# apply different filters
if key == 'first_name':
result = result.for_filter(first_name=value)
if key == 'last_name':
result = result.for_filter(last_name=value)
if key in UPDATE_FILTER_PARAMS:
result = filter_for_updated(filter_operation=key,
filter_value=value,
result=result)
result.exclude_hidden = True
result.batch_size = self.batch_size
return result
def item_data(self, item):
data = {
attribute: getattr(item, attribute, None)
for attribute in (
'academic_title',
'born',
'email',
'first_name',
'function',
'last_name',
'location_address',
'location_code_city',
'notes',
'parliamentary_group',
'phone',
'phone_direct',
'political_party',
'postal_address',
'postal_code_city',
'profession',
'salutation',
'title',
'website',
)
if attribute not in self.app.org.hidden_people_fields
}
data['modified'] = get_modified_iso_format(item)
return data
def item_links(self, item):
result = {
attribute: getattr(item, attribute, None)
for attribute in (
'picture_url',
'website',
)
if attribute not in self.app.org.hidden_people_fields
}
result['memberships'] = self.membership_api.for_filter(
person=item.id.hex
)
return result
class AgencyApiEndpoint(ApiEndpoint, ApisMixin):
endpoint = 'agencies'
filters = ['parent']
@property
def collection(self):
result = PaginatedAgencyCollection(
self.session,
page=self.page or 0,
parent=self.get_filter('parent', None, False),
joinedload=['organigram']
)
for key, value in self.extra_parameters.items():
valid_params = self.filters + ['title'] + UPDATE_FILTER_PARAMS
if key not in valid_params:
raise ApiInvalidParamException(
f'Invalid url parameter \'{key}\'. Valid params are:'
f' {valid_params}')
# apply different filters
if key == 'title':
result = result.for_filter(title=value)
if key in UPDATE_FILTER_PARAMS:
result = filter_for_updated(filter_operation=key,
filter_value=value,
result=result)
result.batch_size = self.batch_size
return result
def item_data(self, item):
return {
'title': item.title,
'portrait': item.portrait,
'location_address': item.location_address,
'location_code_city': item.location_code_city,
'modified': get_modified_iso_format(item),
'postal_address': item.postal_address,
'postal_code_city': item.postal_code_city,
'website': item.website,
'email': item.email,
'phone': item.phone,
'phone_direct': item.phone_direct,
'opening_hours': item.opening_hours,
'geo_location': get_geo_location(item),
}
def item_links(self, item):
return {
'organigram': item.organigram,
'parent': self.for_item(item.parent_id),
'children': self.for_filter(parent=str(item.id)),
'memberships': self.membership_api.for_filter(
agency=str(item.id)
)
}
class MembershipApiEndpoint(ApiEndpoint, ApisMixin):
endpoint = 'memberships'
filters = ['agency', 'person']
@property
def collection(self):
result = PaginatedMembershipCollection(
self.session,
page=self.page or 0,
agency=self.get_filter('agency'),
person=self.get_filter('person'),
)
for key, value in self.extra_parameters.items():
valid_params = self.filters + UPDATE_FILTER_PARAMS
if key not in valid_params:
raise ApiInvalidParamException(
f'Invalid url parameter \'{key}\'. Valid params are:'
f' {valid_params}')
# apply different filters
if key in UPDATE_FILTER_PARAMS:
result = filter_for_updated(filter_operation=key,
filter_value=value,
result=result)
result.batch_size = self.batch_size
return result
def item_data(self, item):
return {
'title': item.title,
'modified': get_modified_iso_format(item),
}
def item_links(self, item):
return {
'agency': self.agency_api.for_item(item.agency),
'person': self.person_api.for_item(item.person)
}
|
from gevent.monkey import patch_all
patch_all()
|
class Solution(object):
def rob(self, root):
res = self.robSub(root)
return max(res[0], res[1])
def robSub(self, root):
if not root:
return [0, 0]
left = self.robSub(root.left)
right = self.robSub(root.right)
res = [0, 0]
res[0] = max(left[0], left[1]) + max(right[0], right[1])
res[1] = root.val + left[0] + right[0]
return res
|
from django import forms
from .models import Layer, Messagetiers, Projet
from django.forms import ModelForm, ModelChoiceField
from django.utils.translation import ugettext_lazy as _
class UploadFileForm(forms.Form):
name = forms.CharField(label='Nom du fichier', max_length=100)
file = forms.FileField()
class LayerForm(forms.Form):
def __init__(self,layers, *args, **kwargs):
super(LayerForm, self).__init__(*args, **kwargs)
self.fields['Layers'] = forms.MultipleChoiceField(choices=[ (o.id_layer, o) for o in layers], widget=forms.CheckboxSelectMultiple)
class ConnexionForm(forms.Form):
username = forms.CharField(label="Nom d'utilisateur", max_length=30)
password = forms.CharField(label="Mot de passe", widget=forms.PasswordInput)
class demandeModifPWForm(forms.Form):
username = forms.CharField(label="Nom d'utilisateur", max_length=30)
class modifPWForm(forms.Form):
password1 = forms.CharField(label="Nouveau mot de passe", widget=forms.PasswordInput)
password2 = forms.CharField(label="Répétez votre nouveau mot de passe", widget=forms.PasswordInput)
class MessageForm(ModelForm):
class Meta:
model = Messagetiers
fields = ['objet', 'sender', 'text', 'destinataire']
labels = {'objet': _('Objet :'),'sender': _('Expéditeur :'), 'text': _('Message :'), 'destinataire': _('Projet concerné :')} |
import pandas as pd
from sklearn import svm, metrics
house = pd.read_csv('Housing.csv', sep=',', header=0)
k = pd.get_dummies(house.iloc[:,[6,7,8,9,10,12]])
house = house.join(k.iloc[:,[0,2,4,6,8,10]])
data = house.iloc[:,[2,3,4,5,11,13,14,15,16,17,18]]
label = house.iloc[:,1]
clf = svm.SVC(gamma='auto')
clf.fit(data, label)
pre = clf.predict(data)
#정답률 구하
ok = 0; total = 0
for idx, answer in enumerate(label):
p = pre[idx]
if p == answer : ok += 1
total += 1
print("정답률:", ok, "/", total, "=", ok/total) |
import os
def path_split(path):
"""
This is a replacement for the combined use of os.path.split and
os.path.splitext to decompose a relative path into its components.
"""
path_and_file = path.rsplit(os.sep, 1)
if len(path_and_file) <= 1:
path = ""
else:
path = path_and_file[0]
file_and_ext = path_and_file[-1].rsplit(".", 1)
if len(file_and_ext) <= 1:
ext = ""
else:
ext = file_and_ext[-1]
file_name = file_and_ext[0]
return path, file_name, ext
def _read_raw(answer):
buf = os.read(1, 32)
if len(buf) == 0:
return answer, False
if buf[-1] == "\n":
return answer + buf[:-1], False
return answer + buf, True
def raw_input(msg=""):
os.write(1, msg)
answer, cont = _read_raw("")
while cont:
answer, cont = _read_raw(answer)
return answer
|
def checkio(number):
def get_min_k(ns):
maxs = "0"
for s in ns:
if ord(maxs) < ord(s):
maxs = s
if ord(maxs) < 60:
return ord(maxs) - 48 + 1
else:
return ord(maxs) - 55 + 1
def sum_number(number1,k1):
sumNumber = 0
for n in range(len(number1)):
if ord(number1[n]) < 60:
sumNumber += int(number1[n]) * (k1 ** (len(number1) - 1 - n))
else:
sumNumber += (ord(number1[n]) - 55) * (k1 ** (len(number1) - 1 - n))
print("sumNumber:", sumNumber)
return sumNumber
k = get_min_k(number)
print("K:", k)
while k < 37:
if sum_number(number,k) % (k - 1) == 0:
return k
else:
k += 1
print("K+1:", k)
if k == 37:
return 0
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert checkio("18") == 10, "Simple decimal"
assert checkio("1010101011") == 2, "Any number is divisible by 1"
assert checkio("222") == 3, "3rd test"
assert checkio("A23B") == 14, "It's not a hex"
assert checkio("IDDQD") == 0, "k is not exist"
print('Local tests done')
#学习大牛解法
# def checkio(number):
# k = 2
# while k < 37:
# try:
# cur_number = int(number, k)
# if cur_number % (k-1) == 0:
# return k
# except: pass
# finally:
# k +=1
# return 0
#对try: except:的应用 |
from urllib import request,parse
from http import cookiejar
import re
url='https://login.bit.edu.cn/cas/login'
req=request.Request(url)
req.add_header('Host','login.bit.edu.cn')
req.add_header('Origin','https://login.bit.edu.cn')
req.add_header('Referer','https://login.bit.edu.cn/cas/login?service=http%3A%2F%2Fonline.bit.edu.cn%2Fccs%2Fehome%2Findex.do')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36')
cookie=cookiejar.CookieJar()
handler=request.HTTPCookieProcessor(cookie)
opener=request.build_opener(handler)
op=opener.open(req)
data=op.read().decode()
lt_re=re.compile('name="lt" value="(.*?)"')
x=lt_re.findall(data)
execution_re=re.compile('name="execution" value="(.*?)"')
y=execution_re.findall(data)
login_data=parse.urlencode([
('username','2120150113'),
('password','8800956guozichun'),
('lt',x[0]),
('execution',y[0]),
('_eventId','submit'),
('rmShown','1')])
op=opener.open(req,login_data.encode())
op=opener.open('http://online.bit.edu.cn/ccs/euser/profile.do?content=campus')
data=op.read().decode()
name_re=re.compile('<a class="name"\r\n *href="/ccs/euser/profile.do\?uid=1349214">(.*?)</a>')
name=name_re.findall(data)
print(name)
|
"""
Profile page app adming module.
"""
#from django.contrib import admin
# Register your models here.
|
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from locators import TestPageLocators as TPL
from base_page import BasePage
class TestPage(BasePage):
"""
A TestPage class ables to interact with the tested page
"""
def enter_login_email(self, email):
"""
Enters a email in the login form field "E-Mail"
Arguments:
email (str): email for input
"""
elem_login_email = self.find_element(TPL.AUTH_EMAIL_FIELD)
elem_login_email.clear()
elem_login_email.send_keys(email)
def enter_password(self, password):
"""
Enters a password in the login form field "Пароль"
Arguments:
password (str): password for input
"""
elem_login_password = self.find_element(TPL.AUTH_PASSWORD_FIELD)
elem_login_password.clear()
elem_login_password.send_keys(password)
def press_enter_in_password_field(self):
"""
Presses "Enter" in the login form field "Пароль"
"""
self.find_element(TPL.AUTH_PASSWORD_FIELD).send_keys(Keys.RETURN)
def click_on_login_button(self):
"""
Clicks on a button "Вход"
"""
self.find_element(TPL.AUTH_LOGIN_BUTTON).click()
def enter_input_email(self, email):
"""
Enters a email in the input form field "E-Mail"
Arguments:
email (str): email for input
"""
elem_input_email = self.find_element(TPL.INPUT_EMAIL_FIELD)
elem_input_email.clear()
elem_input_email.send_keys(email)
def enter_name(self, name):
"""
Enters a name in the input form field "Имя"
Arguments:
name (str): name for input
"""
elem_input_name = self.find_element(TPL.INPUT_NAME_FIELD)
elem_input_name.clear()
elem_input_name.send_keys(name)
def select_gender_male(self):
"""
Selects "Мужской" in drop-down list "Пол"
"""
elem_gender = Select(self.find_element(TPL.INPUT_GENDER_SELECTOR))
elem_gender.select_by_index(0)
def select_gender_female(self):
"""
Selects "Женский" in drop-down list "Пол"
"""
elem_gender = Select(self.find_element(TPL.INPUT_GENDER_SELECTOR))
elem_gender.select_by_index(1)
def click_check_1(self):
"""
Click on a check box "Вариант 1.1"
"""
self.find_element(TPL.INPUT_CHECK_1).click()
def click_check_2(self):
"""
Clicks on a check box "Вариант 1.2"
"""
self.find_element(TPL.INPUT_CHECK_2).click()
def select_radio_1(self):
"""
Selects a radio button "Вариант 2.1"
"""
self.find_element(TPL.INPUT_RADIO_1).click()
def select_radio_2(self):
"""
Selects a radio button "Вариант 2.2"
"""
self.find_element(TPL.INPUT_RADIO_2).click()
def select_radio_3(self):
"""
Selects a radio button "Вариант 2.3"
"""
self.find_element(TPL.INPUT_RADIO_3).click()
def click_on_send_button(self):
"""
Clicks on a button "Добавить"
"""
self.find_element(TPL.INPUT_SEND_BUTTON).click()
def click_on_ok_button(self):
"""
Clicks on a button "Ok"
"""
self.find_element(TPL.INPUT_DIALOG_BUTTON).click()
def take_row_from_table(self, row_num):
"""
Returns a row from a data table
Arguments:
row_num (int): number of the line from which the data is taken.
If pass 0, row with the headers will be extracted.
If pass row_num > number of rows in table, row
with blank columns will be extracted.
Returns:
result (dict): dictionary representing columns of a table row with
specified number in the form:
{
email (str): "E-Mail" column,
name (str): "Имя" column,
gender (str): "Пол" column,
check (str): "Выбор 1" column,
radio (str): "ВЫбор 2" column
}
"""
elem_table = self.find_element(TPL.INPUT_DATA_TABLE)
rows = elem_table.find_elements(By.TAG_NAME, "tr")
result = {'email': '',
'name': '',
'gender': '',
'check': '',
'radio': ''}
if row_num < len(rows):
row = rows[row_num]
result['email'] = row.find_elements(By.TAG_NAME, "td")[0].text
result['name'] = row.find_elements(By.TAG_NAME, "td")[1].text
result['gender'] = row.find_elements(By.TAG_NAME, "td")[2].text
result['check'] = row.find_elements(By.TAG_NAME, "td")[3].text
result['radio'] = row.find_elements(By.TAG_NAME, "td")[4].text
return result
def check_invalid_email_password_message(self):
"""
Checks if message "Неверный E-Mail или пароль" appeared
"""
try:
self.driver.find_element_by_id("invalidEmailPassword")
result = True
except:
result = False
return result
def check_email_format_error_message(self):
"""
Checks if message "Неверный формат E-Mail" appeared
"""
try:
self.driver.find_element_by_id("emailFormatError")
result = True
except:
result = False
return result
def check_blank_name_error_message(self):
"""
Checks if message "Поле имя не может быть пустым" appeared
"""
try:
self.driver.find_element_by_id("blankNameError")
result = True
except:
result = False
return result
def check_login_success(self):
"""
Checks login success by determining the visibility of the input form
Returns:
True: if login was successful
False: if login was failed
"""
elem_inputsPage = self.driver.find_element_by_id('inputsPage')
display = elem_inputsPage.value_of_css_property('display')
return display != 'none'
def check_send_message_opened(self):
"""
Checks if the message window "Данные добавлены" is opened
Returns:
True: if message window is opened
False: if message window was is opened
"""
try:
self.driver.find_element_by_xpath("/html/body/div[3]")
result = True
except:
result = False
return result
|
# _*_ coding:utf-8 _*_
__anthor__=u'橘子来了'
import Scrapy
class ScrapySpider(scrapy.spiders.Spider):
name="xs84"
all
if __name__=="__main__": |
import sys
from PyQt5 import QtWidgets
from Designs import mainWindow
from Models import Data
# noinspection PyBroadException
class MainApp(QtWidgets.QMainWindow, mainWindow.Ui_MainWindow):
def __init__(self):
# inherit from parent class, setup UI
super(self.__class__, self).__init__()
self.setupUi(self)
# set up data model
self.dataModel = Data.DataModel(self)
self.mainTableView.setModel(self.dataModel)
# map button bindings
self.addButton.clicked.connect(self.add_button)
self.removeButton.clicked.connect(self.remove_button)
def get_entry(self):
# return an entry based on user UI input
date = self.dateEdit.text()
experiment_name = self.experimentNameEdit.text()
result = self.resultEdit.text()
return date, experiment_name, result
def add_button(self):
date, experiment_name, result = self.get_entry()
self.dataModel.add_entry([date, experiment_name, result])
def remove_button(self):
try:
selected_trial = self.mainTableView.selectionModel().selectedRows()[0].row()
self.dataModel.remove_entry(selected_trial)
except:
print('No entry selected')
pass
# Back up the reference to the exceptionhook
sys._excepthook = sys.excepthook
def my_exception_hook(exctype, value, traceback):
# Print the error and traceback
print(exctype, value, traceback)
# Call the normal Exception hook after
sys._excepthook(exctype, value, traceback)
sys.exit(1)
# Set the exception hook to our wrapping function
sys.excepthook = my_exception_hook
def main():
app = QtWidgets.QApplication(sys.argv)
form = MainApp()
form.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() |
# class Hand:
# pass
# class Foot:
# pass
# class Trunk:
# pass
# class Head:
# pass
#
#
#
#
# class Person:
# def __init__(self,id_num,name):
# self.id_num = id_num
# self.name = name
# self.hand = Hand()
# self.foot = Foot()
# self.trunk = Trunk()
# self.head = Head()
#
# p1 = Person('121212','晓得')
# print(p1.__dict__)
class School:
def __init__(self,name,addr):
self.name = name #学校名称
self.addr = addr #学校地址
def zhaosheng(self):
print("%s正在招生" %(self.name))
class Course:
def __init__(self,name,price,period,school):
self.name = name #课程名称
self.price = price #课程价格
self.period = period #课程周期
self.school = school #课程所属学校
class Teacher:
def __init__(self,name,school,course):
self.name = name # 老师名称
self.school = school # 老师所属学校
self.course = course #课程
s1 = School('千锋','北京')
s2 = School('千锋','南京')
c1 = Course('Linux',10,'1h',s1)
# print(c1.school.name)
msg = """
1 前锋 北京校区
2 前锋 南京校区
"""
while True:
print(msg)
choice = input("用户选择:")
meum = {
'1':s1,
'2':s2,
}
school_obj = meum[choice]
name = input("课程名称:")
price = input('课程价格:')
peroid = input('课程周期:')
cr = Course(name,price,peroid,school_obj)
print('[%s] 课程属于[%s]校区' %(cr.name,cr.school.name))
|
import mysql.connector as mc
import FoodFilter as foodpy
from datetime import datetime
'''
password is hidden for privacy purposes
'''
Password = '****************'
Cure=[]
nutr = list()
problem = list()
TotalEnergy = dict()
FoodValues={}
DeficiencyToFoodMap={
"Water":"Water",
"Protein":"Protein",
"Fat":"TotalFat",
"Fibre":"TotalFibre",
"Carbohydrates":"Carbohydrates",
"vitamin-b1": "VitB1",
"vitamin-b2": "VitB2",
"vitamin-b3": "VitB3",
"vitamin-b5": "VitB5",
"vitamin-b6": "VitB6",
"vitamin-b7": "VitB7",
"vitamin-b9": "VitB9",
"calcium": "Calcium",
"copper":"Copper",
"iron":"Iron",
"magnesium":"Magnesium",
"potassium":"Potassium",
"sodium":"Sodium",
"zinc":"Zinc",
"sugar":"TotalFreeSugars",
"phosphorous":"Phosphorous"
}
Columns=["Water","Protein","TotalFat","TotalFibre","Carbohydrates","Energy","VitB1","VitB2","VitB3",
"VitB5","VitB6","VitB7","VitB9","Arsenic","Calcium","Cobalt","Copper","Iron","Leadd","Magnesium",
"Mercury","Phosphorous","Potassium","Sodium","Zinc","TotalStarch","Fructose","Glucose",
"Sucrose","TotalFreeSugars"]
Analysis=["Water","Protein","TotalFat","TotalFibre","Carbohydrates","Energy","VitB1","VitB6","Calcium","Copper","Iron"
,"TotalStarch","TotalFreeSugars"]
FoodMap = {
"A":"Cereals and Millets",
"B":"Grain Legumes",
"C":"Green Leafy Vegetables",
"D":"Other Vegetables",
"E":"Fruits",
"F":"Roots and Tubers",
"G":"Condiments and Spices",
"H":"Nuts and Oil seeds",
"I":"Sugars",
"J":"Mushrooms",
"K":"Miscellaneous",
"L":"Milk products",
"M":"Egg products",
"N":"Chicken",
"O":"Animal Meats",
"P":"Fishes",
"Q":"Shell fishes",
"R":"Marine Molluscs",
"S":"Freshwater fish and shell fish",
"T":"Soups,sauses and Gravies",
"UB":"Baked Products",
"US":"Snacks",
"UC":"Chocolates and confectionaries",
"UF":"Fast foods",
"UD":"Beverages",
}
Units = {"Water": "g",
"Protein":"g",
"TotalFat":"g",
"TotalFibre":"g",
"Carbohydrates":"g",
"Energy":"kJ",
"VitB1":"mg",
"VitB2":"mg",
"VitB3":"g",
"VitB5":"mg",
"VitB6":"mg",
"VitB7":"micro g",
"VitB9":"micro g",
"Arsenic":"micro g",
"Calcium":"mg",
"Cobalt":"mg",
"Copper":"mg",
"Iron":"mg",
"Leadd":"mg",
"Magnesium":"mg",
"Mercury":"micro g",
"Phosphorous":"mg",
"Potassium":"mg",
"Sodium":"mg",
"Zinc":"mg",
"TotalStarch":"g",
"Fructose":"g",
"Glucose":"g",
"Sucrose":"g",
"TotalFreeSugars":"g"
}
def CreateDatabase(cursor):
cursor.execute("CREATE DATABASE IF NOT EXISTS Dietcare")
def InsertUser(username,password,email,age):
conn = mc.connect(user="root", host="localhost",database="dietcare", passwd=Password)
cursor = conn.cursor()
query = "INSERT INTO Login ( Username,Password,EmailID,Age ) VALUES (%s,%s,%s,%s);"
data = (username,password,email,age)
cursor.execute(query,data)
conn.commit()
cursor.close()
def isUser(username,password):
conn = mc.connect(user="root", host="localhost", database="dietcare", passwd=Password)
cursor = conn.cursor()
query = "SELECT * FROM Login WHERE Username = %s and Password = %s"
data = (username, password)
cursor.execute(query, data)
fetch = cursor.fetchone()
if fetch == None or len(fetch) == 0:
cursor.close()
return 0
cursor.close()
return 1
# conn.commit()
def SelectedDeficiencyRes():
global problem
return problem
def SelectedFoodResuts():
global nutr,TotalEnergy
return nutr,TotalEnergy
def profileFetch(UserLogin):
conn = mc.connect(user="root", host="localhost", database="dietcare", passwd=Password)
cursor = conn.cursor()
user = UserLogin
if len(user) != 0 :
query = "SELECT Username, Password, EmailID, Age FROM Login WHERE Username = '"+user[0]+"' and Password = '"+user[1]+"';"
cursor.execute(query)
fetch = cursor.fetchall()
conn.close()
if len(list(fetch)) != 0:
return list(fetch[0])
else:
return []
else:
conn.close()
return []
def UpdateValues(username,password,email,age,UserLoginuser,UserLoginpass):
conn = mc.connect(user="root", host="localhost", database="dietcare", passwd=Password)
cursor = conn.cursor()
userid2=""
if UserLoginuser != "" and UserLoginpass!= "":
query="SELECT UserID FROM Login WHERE Username = '"+UserLoginuser+"' and Password = '"+UserLoginpass+"';"
cursor.execute(query)
userid = cursor.fetchone()
userid = list(userid)[0]
userid2 = userid
if username != "" and userid2 != "" :
query = "UPDATE Login SET Username = '"+username+"' WHERE UserID = "+str(userid2)+";"
cursor.execute(query)
conn.commit()
if password != "" and userid2 != "":
query = "UPDATE Login SET Password = '"+password+"' WHERE UserID = "+str(userid2)+";"
cursor.execute(query)
conn.commit()
if email != "" and userid2 != "":
query = "UPDATE Login SET EmailID = '"+email+"' WHERE UserID = "+str(userid2)+";"
cursor.execute(query)
conn.commit()
if age != "0" and userid2 != "":
query = "UPDATE Login SET Age = '"+age+"' WHERE UserID = "+str(userid2)+";"
cursor.execute(query)
conn.commit()
conn.close()
def UserFoodStats(username):
conn = mc.connect(user="root", host="localhost", database="dietcare", passwd=Password)
cursor = conn.cursor()
foods=[]
global Analysis
cursor.execute("SELECT UserID from Login WHERE Username = '" + username + "';")
fetch = cursor.fetchone()
fetch = list(fetch)[0]
UserID = fetch
query = "SELECT FoodName FROM UserFood WHERE UserID = "+str(UserID)
cursor.execute(query)
fetch = cursor.fetchall()
for i in fetch:
foods.append(list(i)[0])
ntr = dict()
for nutrients in Analysis:
max1 = 0
maxFood = ""
for food in foods:
query = "SELECT FoodName," + nutrients + " FROM FoodDetails WHERE FoodName = '" + food + "';"
cursor.execute(query)
fetch = cursor.fetchall()
for i in fetch:
if list(i)[1] > max1:
max1 = list(i)[1]
maxFood = list(i)[0]
ntr[nutrients] = maxFood
conn.close()
return ntr
def FoodActivity(username):
conn = mc.connect(user="root", host="localhost", database="dietcare", passwd=Password)
cursor = conn.cursor()
cursor.execute("SELECT UserID from Login WHERE Username = '" + username + "';")
fetch = cursor.fetchone()
fetch = list(fetch)[0]
UserID = fetch
query = "SELECT Date, Time, FoodName FROM UserFood WHERE UserID = '"+str(UserID)+"';"
cursor.execute(query)
fetch = cursor.fetchall()
data=[]
for i in fetch:
i = list(i)
temp = dict()
time1 = i[0]+" , "+i[1]
food1 = i[2]
temp[time1] = food1
data.append(temp)
conn.close()
return data
def UserStatsInsert(foods,username):
conn = mc.connect(user="root", host="localhost", database="dietcare", passwd=Password)
cursor = conn.cursor()
now = datetime.now()
date = now.strftime("%d/%m/%Y")
time = now.strftime("%H:%M:%S")
cursor.execute("SELECT UserID from Login WHERE Username = '"+username+"';")
fetch = cursor.fetchone()
fetch = list(fetch)[0]
UserID = fetch
for food in foods:
query = "INSERT INTO UserFood VALUES (%s,%s,%s,%s);"
data = (UserID,food,date,time)
cursor.execute(query,data)
conn.commit()
conn.close()
def SelectedFood(foods):
conn = mc.connect(user="root", host="localhost", database="dietcare", passwd=Password)
cursor = conn.cursor()
global nutr,Columns,TotalEnergy
for food in foods:
query = "SELECT * FROM FoodDetails WHERE FoodName = '" + food + "';"
cursor.execute(query)
fetch = cursor.fetchall()
temp=dict()
temp2 = dict()
for var in range(len(Columns)):
temp2[Columns[var]+" ("+Units[Columns[var]]+") "] = list(fetch[0])[var+2]
temp2 = dict(sorted(temp2.items(), key=lambda item: item[1], reverse=True))
temp[food] = temp2
nutr.append(temp)
conn.close()
for i in nutr:
for j, k in i.items():
for l in k.keys():
TotalEnergy[l] = 0
for i in nutr:
for j, k in i.items():
for l in k.keys():
TotalEnergy[l] = round(TotalEnergy[l] + k[l], 3)
return nutr
def FetchFoods():
conn = mc.connect(user="root", host="localhost",database="dietcare", passwd=Password)
cursor = conn.cursor()
for keys in FoodMap.keys():
query = "SELECT FoodName FROM FoodDetails WHERE FoodID LIKE '"+keys+"%';"
cursor.execute(query)
fetch = cursor.fetchall()
l=[]
for val in fetch:
l.append(val[0])
FoodValues[FoodMap[keys]] = l
cursor.close()
return FoodValues
def InsertFood(value,cursor,db):
query = "INSERT INTO FoodDetails VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
data_val = (value[0],value[1],float(value[2]),float(value[3]),float(value[4]),float(value[5]),float(value[6]),float(value[7]),float(value[8]),float(value[9]),float(value[10]),float(value[11]),float(value[12]),float(value[13]),float(value[14]),float(value[15]),float(value[16]),float(value[17]),float(value[18]),float(value[19]),float(value[20]),float(value[21]),float(value[22]),float(value[23]),float(value[24]),float(value[25]),float(value[26]),float(value[27]),float(value[28]),float(value[29]),float(value[30]),float(value[31]))
print(data_val)
print("===PLEASE DISABLE THE COMMENTS BELOW FOR INSERTING (CAUTION: THIS CAUSES CHANGES IN DATABASE) ===")
# cursor.execute(query,data_val)
# db.commit()
def InsertDisease(diseases,cursor,db):
query = "INSERT INTO Disease VALUES (%s,%s);"
data = (diseases[0],diseases[1])
print(data)
print("===PLEASE DISABLE THE COMMENTS BELOW FOR INSERTING (CAUTION: THIS CAUSES CHANGES IN DATABASE) ===")
# cursor.execute(query,data)
# db.commit()
def InsertDeficiency(data,cursor,db):
query = "INSERT INTO Deficiency VALUES (%s,%s,%s);"
values = (data[0], data[1], data[2])
print(values)
print("===PLEASE DISABLE THE COMMENTS BELOW FOR INSERTING (CAUTION: THIS CAUSES CHANGES IN DATABASE) ===")
# cursor.execute(query,data)
# db.commit()
def FetchDeficiency1():
conn = mc.connect(user="root", host="localhost", database="dietcare", passwd=Password)
cursor = conn.cursor()
query = "SELECT DISTINCT OrganAffected FROM Deficiency"
cursor.execute(query)
fetch = cursor.fetchall()
OrganAffected=dict()
for organ in fetch:
org = list(organ)[0]
query1 = "SELECT Problem FROM deficiency WHERE OrganAffected = '"+str(org)+"';"
cursor.execute(query1)
fetch1 = cursor.fetchall()
temp = []
for probs in fetch1:
prob = list(probs)[0]
temp.append(prob)
OrganAffected[org] = temp
conn.close()
return OrganAffected
def FetchTopNutFood(nutrition):
conn = mc.connect(user="root", host="localhost", database="dietcare", passwd=Password)
cursor = conn.cursor()
query = "SELECT FoodName,"+str(nutrition)+" FROM FoodDetails ORDER BY "+str(nutrition)+" DESC LIMIT 0,10"
cursor.execute(query)
fetch = cursor.fetchall()
foo=[]
for food in fetch:
f = list(food)[0] + " " + " ( "+str(food[1])+" "+Units[str(nutrition)]+" ) "
foo.append(f)
conn.close()
return foo
def SelectedDeficiency(deficiency):
conn = mc.connect(user="root", host="localhost", database="dietcare", passwd=Password)
cursor = conn.cursor()
global problem,Cure
for probs in deficiency:
query = "SELECT Deficiency FROM deficiency WHERE Problem = '" + probs + "';"
cursor.execute(query)
fetch = cursor.fetchall()
for var in fetch:
Cure.append(list(var)[0])
temp = dict()
temp2 = dict()
temp2[DeficiencyToFoodMap[str(list(fetch[0])[0])]] = FetchTopNutFood(DeficiencyToFoodMap[str(list(fetch[0])[0])])
temp[probs.title()] = temp2
problem.append(temp)
conn.close()
def CreateTables(cursor):
cursor.execute("CREATE TABLE IF NOT EXISTS Login ( "
"UserID INT AUTO_INCREMENT PRIMARY KEY NOT NULL,"
"Username VARCHAR(25),"
"Password VARCHAR(30),"
"EmailID VARCHAR (30),"
"Age TINYINT );")
# cursor.execute("CREATE TABLE IF NOT EXISTS Consultation ( "
# "ProfessionalID INT AUTO_INCREMENT PRIMARY KEY NOT NULL,"
# "ProfessionalUsername VARCHAR(25),"
# "ProfessionalPassword VARCHAR(30),"
# "ProfessionalQualification VARCHAR (30),"
# "ProfessionalAge TINYINT,"
# "NoofSessionAttended INT,"
# "ConsultedUser VARCHAR(25) );")
cursor.execute("CREATE TABLE IF NOT EXISTS FoodDetails ( "
"FoodID VARCHAR(10) PRIMARY KEY,"
"FoodName VARCHAR(150),"
"Water FLOAT,"
"Protein FLOAT,"
"TotalFat FLOAT,"
"TotalFibre FLOAT,"
"Carbohydrates FLOAT,"
"Energy FLOAT,"
"VitB1 FLOAT,"
"VitB2 FLOAT,"
"VitB3 FLOAT,"
"VitB5 FLOAT,"
"VitB6 FLOAT,"
"VitB7 FLOAT,"
"VitB9 FLOAT,"
"Arsenic FLOAT,"
"Calcium FLOAT,"
"Cobalt FLOAT,"
"Copper FLOAT,"
"Iron FLOAT,"
"Leadd FLOAT,"
"Magnesium FLOAT,"
"Mercury FLOAT,"
"Phosphorous FLOAT,"
"Potassium FLOAT,"
"Sodium FLOAT,"
"Zinc FLOAT,"
"TotalStarch FLOAT,"
"Fructose FLOAT,"
"Glucose FLOAT,"
"Sucrose FLOAT,"
"TotalFreeSugars FLOAT );")
cursor.execute("CREATE TABLE IF NOT EXISTS UserFood ( "
"UserID INT ,"
"FoodName VARCHAR(150),"
"Date VARCHAR(12),"
"Time VARCHAR(10) );")
# cursor.execute("CREATE TABLE IF NOT EXISTS Disease("
# "Disease VARCHAR(100),"
# "Medicine VARCHAR(100) );")
cursor.execute("CREATE TABLE IF NOT EXISTS Deficiency("
"Problem VARCHAR(100),"
"OrganAffected VARCHAR(20),"
"Deficiency VARCHAR(30) );")
conn = mc.connect( user="root", host="localhost",passwd=Password)
cursor = conn.cursor()
'''data = foodpy.csv_Fooddata
disease = foodpy.diseases
CreateDatabase(cursor)'''
cursor.execute("use dietcare")
'''
CreateTables(cursor)
for food in data:
InsertFood(food,cursor,conn)
data_val=[]
for key in disease.keys():
for value in disease[key]:
temp=[]
temp.append(key)
temp.append(value)
data_val.append(tuple(temp))
for value in data_val:
InsertDisease(value,cursor,conn)
data_val=[]
deficit = foodpy.deficit
for value in deficit:
InsertDeficiency(value,cursor,conn)
'''
foodfun = FetchFoods() |
import torch
import torch.nn as nn
import torch.nn.functional as F
class RNNNaive(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNNNaive, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size+hidden_size, hidden_size)
self.h2o = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden):
if hidden.shape[0] < input.shape[0]: hidden = hidden.expand([input.shape[0], self.hidden_size])
combined = torch.cat((input, hidden), 1)
hidden = F.relu(self.i2h(combined))
output = self.h2o(hidden)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.rnn = nn.GRU(input_size, self.hidden_size, num_layers=2,
batch_first=True, bidirectional=True)
self.h2o = nn.Linear(self.hidden_size*2, output_size)
def forward(self, inputs):
hiddens, _ = self.rnn(inputs)
#hiddens = F.tanh(hiddens)
outputs = self.h2o(hiddens)
return outputs
|
# -*- coding: utf-8 -*-
# flake8: noqa
# Generated by Django 1.11 on 2017-05-27 09:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalog', '0005_productcategory_seo_block_image'),
]
operations = [
migrations.CreateModel(
name='ProductFeatureMain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Создано')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлено')),
('ordering', models.IntegerField(db_index=True, default=0, verbose_name='Порядок')),
('status', models.SmallIntegerField(choices=[(0, 'Черновик'), (1, 'Публичный'), (2, 'Скрытый')], default=1, verbose_name='Статус')),
('value', models.CharField(max_length=255, verbose_name='Значение')),
('hint', models.CharField(blank=True, max_length=255, null=True, verbose_name='Подсказка')),
('feature', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product_features_main', to='catalog.Feature', verbose_name='Характеристика')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='features_main', to='catalog.Product', verbose_name='Товар')),
],
options={
'verbose_name_plural': 'Главные характеристики товара',
'verbose_name': 'Главная характеристика товара',
},
),
migrations.AddField(
model_name='productfeature',
name='hint',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Подсказка'),
),
migrations.AddField(
model_name='productfeature',
name='hint_en',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Подсказка'),
),
migrations.AddField(
model_name='productfeature',
name='hint_fr',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Подсказка'),
),
migrations.AddField(
model_name='productfeature',
name='hint_ru',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Подсказка'),
),
]
|
import os
import glob
from collections import defaultdict
import langid
from generate_multi import ParallelWriter
#from ilmulti.segment import Segmenter
#from ilmulti.sentencepiece import SentencePieceTokenizer
reqs = ['hi', 'ml', 'ta', 'ur', 'te','bn','mr', 'gu', 'or']
for lang in reqs:
mkb = defaultdict(list)
pib = defaultdict(list)
fpath = './mkb-filt/'
fname = 'mkb-filt'
pwriter = ParallelWriter(fpath, fname)
def dirname(xx):
fst, snd = sorted([xx, 'en'])
return '{}-{}'.format(fst, snd)
dxx = dirname(lang)
with open('./mkb/{}/mkb.filt.{}'.format(dxx, lang)) as src1,\
open('./mkb/{}/mkb.filt.en'.format(dxx, lang)) as tgt1,\
open('./mkb/{}/pib.en-{}.{}.filt.txt'.format(dxx, lang, lang)) as src2,\
open('./mkb/{}/pib.en-{}.en.filt.txt'.format(dxx, lang)) as tgt2:
for s1, t1 in zip(src1, tgt1):
s1 = s1.strip()
t1 = t1.strip()
mkb[s1].append(t1)
smkb = set(mkb)
for s2, t2 in zip(src2, tgt2):
s2 = s2.strip()
t2 = t2.strip()
pib[s2].append(t2)
spib = set(pib)
for k in smkb.difference(spib):
pwriter.write(lang, 'en', k, mkb[k][0])
|
import pytest
import transaction
from onegov.core.orm import Base, SessionManager
from onegov.core.orm.types import UUID
from onegov.pay.models import Payable, Payment, PaymentProvider, ManualPayment
from onegov.pay.collections import PaymentCollection
from sqlalchemy import Column
from sqlalchemy import Text
from sqlalchemy.ext.declarative import declarative_base
from uuid import uuid4
from sqlalchemy.exc import IntegrityError
def test_payment_with_different_bases(postgres_dsn):
MyBase = declarative_base()
class Order(MyBase, Payable):
__tablename__ = 'orders'
id = Column(UUID, primary_key=True, default=uuid4)
title = Column(Text)
class Subscription(Base, Payable):
__tablename__ = 'subscriptions'
id = Column(UUID, primary_key=True, default=uuid4)
title = Column(Text)
mgr = SessionManager(postgres_dsn, Base)
mgr.bases.append(MyBase)
mgr.set_current_schema('foobar')
session = mgr.session()
provider = PaymentProvider()
apple = Order(title="Apple")
pizza = Order(title="Pizza")
kebab = Order(title="Kebab")
times = Subscription(title="Times")
apple.payment = provider.payment(amount=100)
pizza.payment = provider.payment(amount=200)
kebab.payment = apple.payment
times.payment = pizza.payment
session.add_all((apple, pizza, kebab, times))
session.flush()
assert session.query(Payment).count() == 2
assert session.query(Order).count() == 3
assert session.query(Subscription).count() == 1
apple = session.query(Order).filter_by(title="Apple").one()
pizza = session.query(Order).filter_by(title="Pizza").one()
kebab = session.query(Order).filter_by(title="Kebab").one()
times = session.query(Subscription).filter_by(title="Times").one()
assert apple.payment.amount == 100
assert pizza.payment.amount == 200
assert kebab.payment.amount == 100
assert times.payment.amount == 200
mgr.dispose()
@pytest.mark.skip("Analyze missing reservations table")
def test_payment_referential_integrity(postgres_dsn):
MyBase = declarative_base()
class Order(MyBase, Payable):
__tablename__ = 'orders'
id = Column(UUID, primary_key=True, default=uuid4)
title = Column(Text)
mgr = SessionManager(postgres_dsn, Base)
mgr.bases.append(MyBase)
mgr.set_current_schema('foobar')
session = mgr.session()
apple = Order(title="Apple", payment=PaymentProvider().payment(amount=100))
session.add(apple)
transaction.commit()
with pytest.raises(IntegrityError):
session.delete(session.query(PaymentProvider).one())
session.flush()
transaction.abort()
# as a precaution we only allow deletion of elements after the payment
# has been explicitly deleted
with pytest.raises(IntegrityError):
session.delete(session.query(Order).one())
session.flush()
transaction.abort()
with pytest.raises(IntegrityError):
session.delete(session.query(Order).one())
session.delete(session.query(Payment).one())
session.flush()
transaction.abort()
session.delete(session.query(Payment).one())
session.delete(session.query(Order).one())
transaction.commit()
assert not list(session.execute("select * from orders"))
assert not list(session.execute("select * from payments"))
assert not list(session.execute(
"select * from payments_for_orders_payment"
))
mgr.dispose()
def test_backref(postgres_dsn):
MyBase = declarative_base()
class Product(MyBase, Payable):
__tablename__ = 'products'
id = Column(UUID, primary_key=True, default=uuid4)
title = Column(Text)
class Part(MyBase, Payable):
__tablename__ = 'parts'
id = Column(UUID, primary_key=True, default=uuid4)
title = Column(Text)
mgr = SessionManager(postgres_dsn, Base)
mgr.bases.append(MyBase)
mgr.set_current_schema('foobar')
session = mgr.session()
provider = PaymentProvider()
car = Product(title="Car", payment=provider.payment(amount=10000))
nut = Part(title="Nut", payment=provider.payment(amount=10))
session.add_all((car, nut))
session.flush()
payments = session.query(Payment).all()
assert [t.title for p in payments for t in p.linked_products] == ["Car"]
assert [t.title for p in payments for t in p.linked_parts] == ["Nut"]
assert len(car.payment.linked_products) == 1
assert len(car.payment.linked_parts) == 0
assert len(nut.payment.linked_products) == 0
assert len(nut.payment.linked_parts) == 1
assert car.payment.links.count() == 1
assert car.payment.links.first().title == "Car"
assert nut.payment.links.count() == 1
assert nut.payment.links.first().title == "Nut"
assert len(PaymentCollection(session).payment_links_by_batch()) == 2
session.delete(nut.payment)
nut.payment = car.payment
session.flush()
assert len(car.payment.linked_products) == 1
assert len(car.payment.linked_parts) == 1
assert len(nut.payment.linked_products) == 1
assert len(nut.payment.linked_parts) == 1
assert car.payment.links.count() == 2
assert {r.title for r in car.payment.links} == {"Car", "Nut"}
assert len(PaymentCollection(session).payment_links_by_batch()) == 1
mgr.dispose()
def test_manual_payment(postgres_dsn):
MyBase = declarative_base()
class Product(MyBase, Payable):
__tablename__ = 'products'
id = Column(UUID, primary_key=True, default=uuid4)
title = Column(Text)
mgr = SessionManager(postgres_dsn, Base)
mgr.bases.append(MyBase)
mgr.set_current_schema('foobar')
session = mgr.session()
car = Product(title="Car", payment=ManualPayment(amount=10000))
session.add(car)
session.flush()
payments = session.query(Payment).all()
assert [t.title for p in payments for t in p.linked_products] == ["Car"]
mgr.dispose()
|
from utils import readTrainLabels,readTrainData
import classModel
import glob
def main():
directory=classModel.Directories()
files=glob.glob(directory.xmlPath)
para=classModel.Parameters()
modelPara=classModel.ModelParameters(len(files),para.lx,para.ly,7)
CNNModel=classModel.Model()
CNNModel.model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
CNNModel.model.load_weights(directory.checkpoint_path)
y_train=readTrainLabels(directory,modelPara)
x_train=readTrainData(directory,modelPara,para)
CNNModel.model.fit(x_train,y_train,batch_size=modelPara.batch_size,epochs=20,validation_split=0.2,callbacks=[directory.cp_callback])
if __name__=='__main__': main() |
from flask import jsonify
from werkzeug.http import HTTP_STATUS_CODES
def error_response(status_code, message=None):
scoreload = {}
if message:
scoreload["errors"] = message
else:
scoreload["errors"] = HTTP_STATUS_CODES.get(status_code, 'Unknown Error'),
#end if
response = scoreload
return response, status_code
#end def
"""
4xx Client Error
"""
def request_success(message=None) :
return error_response(200, message)
#end def
def bad_request(message=None):
return error_response(400, message)
#end def
def request_not_found(message=None):
return error_response(404, message)
#end def
def insufficient_scope(message=None):
return error_response(403, message)
#end def
def method_not_allowed(message=None):
return error_response(405, message)
#end def
"""
5xx Server Error
"""
def internal_error(message=None):
return error_response(500, message)
#end def
def bad_gateway(message=None):
return error_response(502, message)
#end def
def service_unavailable(message=None):
return error_response(503, message)
#end def
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.