text
stringlengths 8
6.05M
|
|---|
# -*- coding: utf-8 -*-
"""
Created on Wed May 15 16:35:41 2019
@author: Administrator
"""
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import rcos
import gold
import wave
import struct
SIG_FREQ = 500
SAMPLE_FREQ = 4000
# PN_CODE = np.array([1,1,1,1,1,-1,-1,1,1,-1,1,-1,1])
# PN_CODE = np.array([1,1,1,1,1,0,0,1,1,0,1,0,1])#BARK CODE
# PN_CODE = np.ones(127)
# PN_CODE = np.array([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0])#M CODE
#PN_CODE = np.array([1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0])
# PN_CODE = np.array([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0])
# PN_CODE = np.array([1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0])
PN_CODE = np.array([1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0])
# PN_CODE = np.array([0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1])
# PN_CODE = np.random.randint(0,2,16)#RANDOM CODE
# PN_CODE = np.kron(np.random.randint(0,2,20),np.ones(20))
class iq_mod:
def __init__(self,sig_freq=1000,sample_freq=32000,rep_N=8,beta=0.3,sps=4,span=1):
i_wave = np.kron(np.ones(rep_N),np.cos(2*np.pi*sig_freq*np.arange(sample_freq/sig_freq)/sample_freq))
q_wave = np.kron(np.ones(rep_N),np.sin(2*np.pi*sig_freq*np.arange(sample_freq/sig_freq)/sample_freq))
self.wave = np.vstack((i_wave,q_wave))
self.period = int(sample_freq*rep_N/sig_freq)
def apl_mod(self,d_iq,mod=0):
if mod==1:
din = d_iq*2 - 1
return np.vstack((np.kron(din[0],self.wave[0]),np.kron(din[1],self.wave[1])))
def mix(self,d_iq,phase=0):
return d_iq*np.tile(np.roll(self.wave,phase,axis=1),int(np.ceil(d_iq.shape[1]/self.wave.shape[1])))
def spread(self,din,code):
return np.kron((din-0.5)*2,code)
def despread(self,din,code):
out = np.zeros(din.shape[0])
code_p = code*2 -1
intp_code = np.kron(code_p,np.ones(self.period))
print("cor len:%d\n" % intp_code.shape[0])
for i in range(intp_code.shape[0],din.shape[0]):
out[i] = np.dot(din[i-intp_code.shape[0]:i],intp_code)
return out
#def rrc(beta, filter_width, Ts):
# """
# https://en.wikipedia.org/wiki/Root-raised-cosine_filter
# :param beta: roll-off factor
# :param filter_width: The width of the filter, samples
# :param Ts: The width of a symbol, samples
# :return: impulse response of the filter, the tuple of filter_width float numbers coefficients
# """
# rrc_out = []
# for i in range(0, filter_width):
# rrc_out.append(0.0)
# if beta != 0.0:
# t1 = Ts/(4*beta)
# else:
# t1 = Ts
#
# for p in range(0, filter_width):
# t = (p - filter_width / 2)
# if t == 0.0:
# rrc_out[p] = (1 + beta*(4/np.pi - 1))
# elif t == t1 or t == -t1:
# if beta != 0.0:
# arg = np.pi/(4*beta)
# s = (1 + 2/np.pi)*np.sin(arg)
# c = (1 - 2/np.pi)*np.cos(arg)
# rrc_out[p] = (s + c) * (beta/np.sqrt(2))
# else:
# rrc_out[p] = 0
# else:
# pts = np.pi*t/Ts
# bt = 4*beta*t/Ts
# s = np.sin(pts*(1-beta))
# c = np.cos(pts*(1+beta))
# div = pts*(1 - bt*bt)
# rrc_out[p] = (s + bt*c)/div
# return tuple(rrc_out)
class my_filter:
def __init__(self,N,filt_zone=[0.2],filt_type='lowpass'):
self.b,self.a = signal.butter(N, filt_zone, filt_type)
self.z = np.zeros(max(len(self.a),len(self.b))-1,dtype=np.float)
def filt(self,din):
dout, self.z = signal.lfilter(self.b, self.a, din, zi=self.z)
return dout
def my_fft(din):
fftx = np.fft.rfft(din)/din.shape[0]
xfp = np.abs(fftx)*2
return xfp
iq_mod_inst = iq_mod(SIG_FREQ,SAMPLE_FREQ,rep_N=1)
lpf_inst_i = my_filter(3,[0.15],'lowpass')
lpf_inst_q = my_filter(3,0.15,'lowpass')
din1 = np.tile(np.vstack((PN_CODE,PN_CODE)),4)
#din2 = np.tile(np.vstack((PN_CODE1,PN_CODE1)),4)
din2 = din1
din = din1 + din2
dm = iq_mod_inst.apl_mod(din,mod=1)
noise = np.random.randn(dm.shape[0],dm.shape[1])
# noise = np.random.randn(dm.shape)
dmn = dm + noise*1
dmn[1]=dmn[0]
dmm = iq_mod_inst.mix(dmn,1)
print("di len:%d\n" % din.shape[0])
b, a = signal.butter(3, [0.15], 'lowpass')
df = dmm[0]
zt = signal.filtfilt(b,a,df)
z1 = lpf_inst_i.filt(df[0:20])
z2 = lpf_inst_i.filt(df[20:40])
z3 = lpf_inst_i.filt(df[40:60])
z4 = lpf_inst_i.filt(df[60:80])
z5 = lpf_inst_i.filt(df[80:])
zo = np.concatenate((z1,z2,z3,z4,z5))
cor_i = iq_mod_inst.despread(zo,PN_CODE)
df = dmm[1]
zt = signal.filtfilt(b,a,df)
z1 = lpf_inst_q.filt(df[0:20])
z2 = lpf_inst_q.filt(df[20:40])
z3 = lpf_inst_q.filt(df[40:60])
z4 = lpf_inst_q.filt(df[60:80])
z5 = lpf_inst_q.filt(df[80:])
zo = np.concatenate((z1,z2,z3,z4,z5))
cor_q = iq_mod_inst.despread(zo,PN_CODE)
cor = np.vstack((cor_i,cor_q))
print("zi len:%d\n" % zo.shape[0])
fig = plt.figure()
ax = fig.add_subplot(411)
bx = fig.add_subplot(412)
cx = fig.add_subplot(413)
dx = fig.add_subplot(414)
x = np.arange(dm.shape[1])/SAMPLE_FREQ
xh = np.arange(dm.shape[1]/2 + 1)*SAMPLE_FREQ/dm.shape[1]
ax.plot(x,dmn[1],'g',label='qdm')
ax.plot(x,dm[0],'r',label='dm')
ax.legend()
bx.plot(x,cor[0],label='cor_i')
bx.plot(x,cor[1],label='cor_q')
bx.plot(x,np.linalg.norm(cor,axis=0),label='norm')
bx.grid(True, linestyle='-.')
bx.legend()
cx.plot(x,dmm[1],label='di')
cx.plot(x,zo,label='zo')
cx.plot(x,zt,'r',label='zt')
cx.legend()
#dx.plot(x,dm[0],label="di")
#dx.plot(x,dm[1],label="dq")
idff = my_fft(dmn[0])
dx.plot(xh,idff,label="i_freq/amp")
dx.legend()
plt.show()
|
#encoding: utf-8
import circulo
import random
import sys
import subprocess
def circuloRandom():
#Devuelve una lista [x1,y1,r,data]
x1 = random.randint(1,399)
y1 = random.randint(1,399)
rmax = min(400-x1,400-y1)
if (400-x1) > (400-y1) :
r = random.randint(1,rmax)
else:
r = random.randint(1,rmax)
data = str(x1)+','+str(y1)
return circulo.Circulo(x1,y1,r)
def leerCirculos(nombre_arch) :
#Devuelve una lista de circulos a partir de un archivo con formato x1,y1,r
arch = open(nombre_arch,'r')
tmp = []
for line in arch :
tmp.append(line.split(','))
for c in tmp :
c[0] = int (c[0])
c[1] = int (c[1])
c[2] = int (c[2])
return [circulo.Circulo(c[0],c[1],c[2]) for c in tmp]
def crearArchivoPlot(nombre_arch) :
#Crea un archivo .gnu para plotear los circulos descritos en el archivo nombre_arch
#este archivo debe tener el formato x1,y1,r
outfile = open ('circulos.gnu','wa')
outfile.write("set term x11 \nset size ratio - 1 \nset parametric \nset xrange [0:400] \nset yrange [0:400] \nunset key \nset multiplot\n")
outfile.write("x(t) = r*cos(t) + x0\ny(t) = r*sin(t) + y0\n")
circulos = leerCirculos(nombre_arch)
#para cada obj circulo, escribir sus datos en el archivo para que se ploteen.
i = j = 0
while i < len(circulos) :
outfile.write("r = {}\n".format(circulos[i].r))
outfile.write("x0 = {}\n".format(circulos[i].x1))
outfile.write("y0 = {}\n".format(circulos[i].y1))
# aqui hay que decidir si w l o bien, w lt 2
j = 0
flag = 0
while j < len(circulos) :
if circulos[i] != circulos [j] :
if circulos[i].compara(circulos[j]) == 0 :
flag = 1 #si hay al menos uno en colisión
j += 1
if flag == 1 :
outfile.write("plot x(t),y(t) w l lt 2\n")
else :
outfile.write("plot x(t),y(t) w l\n")
i += 1
outfile.write("unset multiplot\npause -1")
outfile.close()
def main() :
if len(sys.argv) != 2 :
print 'Args: número_de_círculos'
sys.exit(2)
try :
nCirculos = int(sys.argv[1])
except ValueError:
print 'El argumento debe ser un número.'
sys.exit(3)
listaC = []
for i in range(nCirculos):
listaC.append(circuloRandom())
arch = open('lista_circulos.dat','wa')
i = 0
while i < nCirculos :
arch.write("{},{},{}\n".format(listaC[i].x1,listaC[i].y1,listaC[i].r))
i += 1
arch.close()
circulos = leerCirculos('lista_circulos.dat')
crearArchivoPlot('lista_circulos.dat')
try :
subprocess.check_output(['gnuplot','circulos.gnu'])
except KeyboardInterrupt:
#plot.terminate()
print "Bye!"
sys.exit(0)
main()
|
from requests import get, post
import os
from dotenv import load_dotenv
load_dotenv()
import pandas as pd
# Module variables to connect to moodle api
KEY = os.getenv("MOODLE_API_KEY")
URL = os.getenv("URL")
ENDPOINT="/webservice/rest/server.php"
def rest_api_parameters(in_args, prefix='', out_dict=None):
"""Transform dictionary/array structure to a flat dictionary, with key names
defining the structure.
Example usage:
>>> rest_api_parameters({'courses':[{'id':1,'name': 'course1'}]})
{'courses[0][id]':1,
'courses[0][name]':'course1'}
"""
if out_dict==None:
out_dict = {}
if not type(in_args) in (list,dict):
out_dict[prefix] = in_args
return out_dict
if prefix == '':
prefix = prefix + '{0}'
else:
prefix = prefix + '[{0}]'
if type(in_args)==list:
for idx, item in enumerate(in_args):
rest_api_parameters(item, prefix.format(idx), out_dict)
elif type(in_args)==dict:
for key, item in in_args.items():
rest_api_parameters(item, prefix.format(key), out_dict)
return out_dict
def call(fname, **kwargs):
"""Calls moodle API function with function name fname and keyword arguments.
Example:
>>> call_mdl_function('core_course_update_courses',
courses = [{'id': 1, 'fullname': 'My favorite course'}])
"""
parameters = rest_api_parameters(kwargs)
parameters.update({"wstoken": KEY, 'moodlewsrestformat': 'json', "wsfunction": fname})
response = post(URL+ENDPOINT, parameters).json()
if type(response) == dict and response.get('exception'):
raise SystemError("Error calling Moodle API\n", response)
return response
class CourseList():
"""Class for list of all courses in Moodle and order them by id and idnumber."""
def __init__(self):
# TODO fullname atribute is filtered
# (no <span class="multilang" lang="sl">)
courses_data = call('core_course_get_courses')
self.courses = []
for data in courses_data:
self.courses.append(Course(**data))
self.id_dict = {}
self.idnumber_dict = {}
for course in self.courses:
self.id_dict[course.id] = course
if course.idnumber:
self.idnumber_dict[course.idnumber] = course
def __getitem__(self, key):
if 0<= key < len(self.courses):
return self.courses[key]
else:
raise IndexError
def by_id(self, id):
"Return course with given id."
return self.id_dict.get(id)
def by_idnumber(self, idnumber):
"Course with given idnumber"
return self.idnumber_dict.get(idnumber)
def update_courses(courses_to_update, fields):
"Update a list of courses in one go."
if not ('id' in fields):
fields.append('id')
courses = [{k: c.__dict__[k] for k in fields} for c in courses_to_update]
return call("core_course_update_courses",
courses = courses)
class Course():
"""Class for a single course.
Example:
>>> Course(name="Example course", shortname="example", categoryid=1, idnumber=123)
"""
def __init__(self, **data):
self.__dict__.update(data)
def create():
"Create this course on moodle"
res = call('core_course_create_courses', courses = [self.__dict__])
if type(res) == list:
self.id = res[0].get('id')
def update():
"Update course"
r = call('core_course_update_courses', courses = [self.__dict__])
def i18n_set(self, **data):
"Transform given field to multilang string with <span class=\"multilang\""
template = "<span class=\"multilang\" lang=\"{}\">{}</span>"
for field in data:
value = data[field]
new_value = ""
if type(value) == dict:
if len(value) == 1:
for lang in value:
new_value += value[lang]
else:
for lang in value:
if value[lang]:
new_value += template.format(lang, value[lang])
self.__dict__[field] = new_value
|
# coding=utf-8
import unittest
import warnings
from mock import patch, Mock
from . import interface_factory
from . import api_details
from paypal.exceptions import PayPalAPIResponseError, PayPalConfigError
from paypal.interface import PayPalInterface
from paypal.response import PayPalResponse
interface = interface_factory.get_interface_obj()
class TestExpressCheckout(unittest.TestCase):
def setUp(self):
self.returnurl = 'http://www.paypal.com'
self.cancelurl = 'http://www.ebay.com'
def test_sale(self):
"""
Tests the first part of a sale. At this point, this is a partial unit
test. The user has to login to PayPal and approve the transaction,
which is not something we have tackled in the unit test yet. So we'll
just test the set/get_express_checkout methods.
A call to `SetExpressCheckoutDetails`.
A call to `DoExpressCheckoutPayment`.
A call to `GetExpressCheckoutDetails`.
"""
setexp_response = interface.set_express_checkout(
amt='10.00',
returnurl=self.returnurl, cancelurl=self.cancelurl,
paymentaction='Order',
email=api_details.EMAIL_PERSONAL
)
self.assertTrue(setexp_response)
token = setexp_response.token
getexp_response = interface.get_express_checkout_details(token=token)
# Redirect your client to this URL for approval.
redirect_url = interface.generate_express_checkout_redirect_url(token)
# Once they have approved your transaction at PayPal, they'll get
# directed to the returnurl value you defined in set_express_checkout()
# above. This view should then call do_express_checkout_payment() with
# paymentaction = 'Sale'. This will finalize and bill.
def test_authorize_and_delayed_capture(self):
"""
Tests a four-step checkout process involving the following flow::
One or more calls to `SetExpressCheckout`.
--- User goes to PayPal, logs in, and confirms shipping, taxes,
and total amount. ---
A call to `GetExpressCheckoutDetails`.
A call to `DoExpressCheckoutPayment`.
A call to `DoAuthorization`.
A call to `DoCapture`.
"""
pass
def test_authorize_and_void(self):
"""
Tests a four-step checkout process involving the following flow::
One or more calls to `SetExpressCheckout`.
--- User goes to PayPal, logs in, and confirms shipping, taxes,
and total amount. ---
A call to `GetExpressCheckoutDetails`.
A call to `DoExpressCheckoutPayment`.
A call to `DoAuthorization`.
A call to `DoVoid`.
"""
pass
class UrlGenerationTest(unittest.TestCase):
def test_no_useraction(self):
redirect_url = interface.generate_express_checkout_redirect_url(
'token-abc')
self.assertTrue(redirect_url.endswith(
'/webscr?cmd=_express-checkout&token=token-abc'))
def test_renders_useraction_commit(self):
redirect_url = interface.generate_express_checkout_redirect_url(
'token-abc', useraction='commit')
redirect_path = ('/webscr?cmd=_express-checkout&token=token-abc'
'&useraction=commit')
self.assertTrue(redirect_url.endswith(redirect_path))
def test_renders_useraction_continue(self):
redirect_url = interface.generate_express_checkout_redirect_url(
'token-abc', useraction='continue')
redirect_path = ('/webscr?cmd=_express-checkout&token=token-abc'
'&useraction=continue')
self.assertTrue(redirect_url.endswith(redirect_path))
def test_renders_any_useraction_with_warning(self):
with warnings.catch_warnings(record=True) as warning_context:
redirect_url = interface.generate_express_checkout_redirect_url(
'token-abc', useraction='some_action')
self.assertTrue(issubclass(warning_context[0].category,
RuntimeWarning))
redirect_path = ('/webscr?cmd=_express-checkout&token=token-abc'
'&useraction=some_action')
self.assertTrue(redirect_url.endswith(redirect_path))
class CallParamsTest(unittest.TestCase):
def setUp(self):
self.configs_3token = {'API_USERNAME': 'test_username',
'API_PASSWORD': 'test_password',
'API_SIGNATURE': 'test_signature',
'API_AUTHENTICATION_MODE': '3TOKEN'}
self.configs_certificate = {
'API_USERNAME': 'test_username',
'API_PASSWORD': 'test_password',
'API_CERTIFICATE_FILENAME': 'test_cert_filename',
'API_KEY_FILENAME': 'test_key_filename',
'API_AUTHENTICATION_MODE': 'CERTIFICATE'}
def test_returns_3token_call_params(self):
interface = PayPalInterface(**self.configs_3token)
call_kwargs = {'param_a': 'a1', 'param_b': 'b2'}
call_params = interface._get_call_params('some_method', **call_kwargs)
version = interface.config.API_VERSION
expected_call_params = {'data': {'USER': 'test_username',
'PWD': 'test_password',
'SIGNATURE': 'test_signature',
'PARAM_A': 'a1',
'PARAM_B': 'b2',
'METHOD': 'some_method',
'VERSION': version},
'cert': None,
'url': interface.config.API_ENDPOINT,
'timeout': interface.config.HTTP_TIMEOUT,
'verify': interface.config.API_CA_CERTS}
self.assertEqual(expected_call_params, call_params)
def test_returns_unipay_call_params(self):
interface = PayPalInterface(**self.configs_3token)
interface.config.API_AUTHENTICATION_MODE = 'UNIPAY'
interface.config.UNIPAY_SUBJECT = 'test_subject'
call_kwargs = {'param_a': 'a1', 'param_b': 'b2'}
call_params = interface._get_call_params('some_method', **call_kwargs)
version = interface.config.API_VERSION
expected_call_params = {'data': {'SUBJECT': 'test_subject',
'PARAM_A': 'a1',
'PARAM_B': 'b2',
'METHOD': 'some_method',
'VERSION': version},
'cert': None,
'url': interface.config.API_ENDPOINT,
'timeout': interface.config.HTTP_TIMEOUT,
'verify': interface.config.API_CA_CERTS}
self.assertEqual(expected_call_params, call_params)
def test_returns_certificate_call_params(self):
interface = PayPalInterface(**self.configs_certificate)
call_kwargs = {'param_a': 'a1', 'param_b': 'b2'}
call_params = interface._get_call_params('some_method', **call_kwargs)
version = interface.config.API_VERSION
expected_call_params = {'data': {'USER': 'test_username',
'PWD': 'test_password',
'PARAM_A': 'a1',
'PARAM_B': 'b2',
'METHOD': 'some_method',
'VERSION': version},
'cert': ('test_cert_filename',
'test_key_filename'),
'url': interface.config.API_ENDPOINT,
'timeout': interface.config.HTTP_TIMEOUT,
'verify': interface.config.API_CA_CERTS}
self.assertEqual(expected_call_params, call_params)
def test_raises_error_for_single_none_config(self):
interface = PayPalInterface(**self.configs_certificate)
interface.config.API_USERNAME = None
with self.assertRaisesRegexp(PayPalConfigError, 'USER'):
interface._get_call_params('some_method', some_param=123)
def test_raises_error_for_multiple_configs(self):
interface = PayPalInterface(**self.configs_certificate)
interface.config.API_USERNAME = None
interface.config.API_PASSWORD = None
with self.assertRaisesRegexp(PayPalConfigError, r'PWD.*USER'):
interface._get_call_params('some_method', some_param=123)
class CallTest(unittest.TestCase):
def test_posts_params(self):
with patch('paypal.interface.requests.post') as post_mock:
post_mock.return_value = Mock(text='ACK=SUCCESS')
paypal_response = interface._call('some_method',
param_a='a1',
param_b='b2')
expected_data = interface._get_call_params('some_method',
param_a='a1',
param_b='b2')
post_mock.assert_called_once_with(**expected_data)
self.assertIsInstance(paypal_response, PayPalResponse)
self.assertTrue(paypal_response.success)
def test_raises_configerror_on_error_response(self):
with patch('paypal.interface.requests.post') as post_mock:
post_mock.return_value = Mock(text='ACK=NO_SUCCESS')
with self.assertRaises(PayPalAPIResponseError):
interface._call('some_method', param='a')
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the data access object (DAO)."""
import json
from google.cloud.security.common.data_access import dao
from google.cloud.security.common.data_access.sql_queries import select_data
from google.cloud.security.common.gcp_type import project
from google.cloud.security.common.gcp_type import resource
from google.cloud.security.common.gcp_type import resource_util
from google.cloud.security.common.util import log_util
LOGGER = log_util.get_logger(__name__)
class ProjectDao(dao.Dao):
"""Data access object (DAO)."""
# pylint: disable=arguments-differ
@staticmethod
def map_row_to_object(row):
"""Instantiate a Project from database row.
TODO: Make this go away when we start using an ORM.
ProjectDao has a special case because the database schema doesn't
match the GCP API fields.
Args:
row (dict): The database row to map.
Returns:
Project: A Project, created from the row.
"""
return project.Project(
project_id=row['project_id'],
project_number=row['project_number'],
display_name=row['project_name'],
lifecycle_state=row['lifecycle_state'],
parent=resource_util.create_resource(
resource_id=row['parent_id'],
resource_type=row['parent_type']))
# pylint: enable=arguments-differ
def get_project_numbers(self, resource_name, timestamp):
"""Select the project numbers from a projects snapshot table.
Args:
resource_name (str): The resource name.
timestamp (str): The timestamp, formatted as YYYYMMDDTHHMMSSZ.
Returns:
list: A list of project numbers.
Raises:
MySQLError: An error with MySQL has occurred.
"""
project_numbers_sql = select_data.PROJECT_NUMBERS.format(timestamp)
rows = self.execute_sql_with_fetch(
resource_name, project_numbers_sql, ())
return [row['project_number'] for row in rows]
def get_project(self, project_id, timestamp):
"""Get a project from a particular snapshot.
Args:
project_id (str): The id of the project.
timestamp (str): The snapshot timestamp.
Returns:
Project: A Project, if found.
"""
project_query = select_data.PROJECT_BY_ID.format(timestamp)
rows = self.execute_sql_with_fetch(
resource.ResourceType.PROJECT, project_query, (project_id,))
if rows:
return self.map_row_to_object(rows[0])
return None
def get_project_by_number(self, project_number, timestamp):
"""Get a project from a particular snapshot.
Args:
project_number (int): The number of the project.
timestamp (str): The snapshot timestamp.
Returns:
Project: A Project, if found.
"""
project_query = select_data.PROJECT_BY_NUMBER.format(timestamp)
rows = self.execute_sql_with_fetch(
resource.ResourceType.PROJECT, project_query, (project_number,))
if rows:
return self.map_row_to_object(rows[0])
return None
def get_projects(self, timestamp):
"""Get projects from a particular snapshot.
Args:
timestamp (str): The snapshot timestamp.
Returns:
list: A list of Projects.
"""
projects_query = select_data.PROJECTS.format(timestamp)
rows = self.execute_sql_with_fetch(
resource.ResourceType.PROJECT, projects_query, ())
return [self.map_row_to_object(row) for row in rows]
def get_project_policies(self, resource_name, timestamp):
"""Get the project policies.
This does not raise any errors on database or json parse errors
because we want to return as many projects as possible.
Args:
resource_name (str): The resource type.
timestamp (str): The timestamp of the snapshot.
Returns:
dict: A dict containing the projects (gcp_type.project.Project)
and their iam policies (dict).
"""
project_policies = {}
query = select_data.PROJECT_IAM_POLICIES_RAW.format(
timestamp, timestamp)
rows = self.execute_sql_with_fetch(
resource_name, query, ())
for row in rows:
try:
proj = self.map_row_to_object(row)
project_policies[proj] = json.loads(row['iam_policy'])
except ValueError:
LOGGER.warn('Error parsing json:\n %s', row['iam_policy'])
return project_policies
def get_project_raw_data(self, resource_name, timestamp, **kwargs):
"""Select the project raw data from a projects snapshot table.
Args:
resource_name (str): The resource name.
timestamp (str): Snapshot timestamp, formatted as YYYYMMDDTHHMMSSZ.
**kwargs (dict): Additional args.
Returns:
list: List of project raw data.
"""
project_id = kwargs.get('project_id')
project_number = kwargs.get('project_number')
if project_id is not None:
project_raw_sql = select_data.PROJECT_RAW.format(timestamp)
rows = self.execute_sql_with_fetch(
resource_name, project_raw_sql, (project_id,))
elif project_number is not None:
project_raw_sql = select_data.PROJECT_RAW_BY_NUMBER.format(
timestamp)
rows = self.execute_sql_with_fetch(
resource_name, project_raw_sql, (project_number,))
else:
project_raw_sql = select_data.PROJECT_RAW_ALL.format(timestamp)
rows = self.execute_sql_with_fetch(
resource_name, project_raw_sql, ())
return [row['raw_project'] for row in rows]
|
def convert(s, numRows):
n = len(s)
dic = dict()
incr = 0
flag = 1
for i in range(n):
if incr not in dic.keys():
dic[incr] = ''
dic[incr] += s[i]
incr += flag
if incr == 0 or incr == numRows-1:
flag *= -1
ans = ''
for value in dic.values():
ans += value
return ans
|
#
# carkov markov chain library
# © Copyright 2021 by Aldercone Studio <aldercone@gmail.com>
# This is free software, see the included LICENSE for terms and conditions.
#
"""
Various filter functions that may be useful for processing certain kinds of corpora.
"""
from typing import Optional
# from unidecode import unidecode # fixme asciifying filter
# All of these filters operate on string tokens
def str_abstractize_numbers(token: str) -> Optional[str]:
"""Replace all numbers with a Number abstract."""
return None
def str_abstractize_roman(token: str) -> Optional[str]:
"""Replace roman numerals with a Number abstract."""
return None
def str_strip_punct(token: str) -> Optional[str]:
"""Remove any punctuation characters."""
return None
def str_asciify(token: str) -> Optional[str]:
"""Convert all characters to an ascii approximation."""
return None
|
import codecademylib3_seaborn
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
from sklearn.cluster import KMeans
digits = datasets.load_digits()
#print(digits.target[100])
plt.gray()
plt.matshow(digits.images[100])
plt.show()
model = KMeans(n_clusters = 10, random_state = 43)
model.fit(digits.data)
fig = plt.figure(figsize=(8,3))
fig.suptitle('Cluster Center Images', fontsize=12, fontweight='bold')
for i in range(10):
ax=fig.add_subplot(2,5,1+i)
ax.imshow(model.cluster_centers_[i].reshape((8,8)), cmap=plt.cm.binary)
plt.show()
new_samples=np.array([
[0.00,0.15,4.10,6.56,6.86,6.38,0.82,0.00,0.07,5.47,7.62,5.37,4.11,7.63,3.87,0.00,2.36,7.62,3.87,0.00,0.00,5.95,6.76,0.08,3.81,7.32,0.23,0.00,0.00,3.13,7.62,0.76,3.74,7.47,0.46,0.00,0.00,2.52,7.62,0.76,2.14,7.62,3.84,0.00,0.18,5.01,7.47,0.30,0.00,5.77,7.50,5.18,6.46,7.62,4.53,0.00,0.00,0.92,5.03,6.10,5.72,3.31,0.05,0.00],
[0.00,0.79,6.89,7.62,7.62,4.23,0.00,0.00,0.00,4.42,7.55,2.85,4.93,7.62,2.04,0.00,0.53,7.17,5.19,0.00,0.59,7.32,5.19,0.00,3.05,7.62,1.91,0.00,0.00,4.42,7.47,0.38,4.12,7.17,0.00,0.00,0.00,2.21,7.62,1.53,4.57,6.86,0.13,0.00,0.00,2.21,7.62,1.53,3.18,7.62,5.01,2.36,1.75,6.00,7.24,0.30,0.08,4.60,7.55,7.62,7.62,7.60,3.23,0.00],
[0.00,0.87,4.42,4.57,4.57,4.57,2.34,0.00,0.00,5.01,7.55,6.10,6.10,6.10,3.26,0.00,0.00,6.71,5.01,0.00,0.00,0.00,0.00,0.00,0.00,6.79,6.79,5.34,5.34,3.00,0.00,0.00,0.00,2.16,5.26,5.34,6.56,7.55,1.68,0.00,0.00,0.31,0.30,0.00,0.79,7.63,3.81,0.00,0.00,5.11,7.40,6.86,7.17,7.62,3.16,0.00,0.00,1.50,3.81,3.81,3.81,2.64,0.00,0.00],
[0.00,4.17,7.62,7.62,7.62,7.40,3.03,0.00,0.00,3.89,7.17,3.05,3.13,3.81,1.50,0.00,0.00,4.65,7.24,4.95,5.34,4.35,0.62,0.00,0.00,4.99,7.62,6.94,5.79,7.60,5.06,0.00,0.00,0.23,0.69,0.00,0.00,5.22,6.10,0.00,0.23,4.35,2.04,2.21,3.05,6.81,5.95,0.00,0.38,7.28,7.62,7.62,7.62,6.86,2.14,0.00,0.00,0.89,1.52,1.37,0.76,0.00,0.00,0.00]
])
new_labels = model.predict(new_samples)
for i in range(len(new_labels)):
if new_labels[i] ==0:
print(4, end='')
elif new_labels[i] == 1:
print(1, end='')
elif new_labels[i] == 2:
print(7, end='')
elif new_labels[i] == 3:
print(9, end='')
elif new_labels[i] == 4:
print(6, end='')
elif new_labels[i] == 5:
print(3, end='')
elif new_labels[i] == 6:
print(0, end='')
elif new_labels[i] == 7:
print(8, end='')
elif new_labels[i] == 8:
print(5, end='')
elif new_labels[i] == 9:
print(2, end='')
print(new_labels)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
a = 'hello word'
b = a.replace('word','python')
print(b)
|
listRange=list(range(4)) # 0 to 3
print(listRange)
listRange1=list(range(-6,7,2)) # -6 to +6 by 2
print(listRange1)
list1=[[x**2,x**3] for x in range(4)]
print(list1)
list2=[[x, x / 2, x * 2] for x in range (-6,7,2)if x > 0]
print(list2)
|
# Problem name: Coin Piles
# Description: You have two coin piles containing a and b coins.
# On each move, you can either remove one coin from the left pile and two coins from the right pile, or two coins from the left pile and one coin from the right pile.
# Your task is to efficiently find out if you can empty both the piles.
# Strategy: if a nd b are two given numbers then let x=2 from a and 1 from b and y=2 from b and 1 from a(the ways).
# Form equations for a and b--> a=(2*x)+(1*y) and b=(2*y)+(1*x) , now while solving both the equations we get--> 2a-b=3x and 2b-a=3y, which means, 2a-b and 2b-a should be a multiple of 3
# Optimized code:
n=int(input())
for i in range(n):
li=list(map(int,input().split()))
a=li[0]
b=li[1]
if(((2*a)-b)%3==0 and ((2*a)-b)>=0 and ((2*b)-a)%3==0 and ((2*b)-a)>=0):
print("YES")
else:
print("NO")
#Brute-Force approach:
"""n=int(input())
for i in range(n):
li=list(map(int,input().split()))
if(li[0]+li[1])%3!=0:
print("NO")
else:
flag=0
n=li[0]+li[1]
while(n>0):
if(li[0]==0 or li[1]==0):
print("NO")
flag=1
break
if(n%3==0):
if(li[0]>li[1]):
li[0]-=2
li[1]-=1
n=li[0]+li[1]
else:
li[0]-=1
li[1]-=2
n=li[0]+li[1]
else:
flag=1
print("NO")
break
if(flag==0):
print("YES")
"""
|
import cv2
import numpy as np
import sys
import os
ASCII_CHARS = ['.',',',':',';','+','*','?','%','S','#','@']
ASCII_CHARS = ASCII_CHARS[::-1]
def asciify(frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = frame // 25
frame = frame.tolist()
text = [[ASCII_CHARS[px] for px in line[::10]] for line in frame[::10]]
return text
if __name__ == '__main__':
video = cv2.VideoCapture(sys.argv[1])
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = video.get(cv2.CAP_PROP_FPS)
name, ext = os.path.splitext(sys.argv[1])
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
writer = cv2.VideoWriter('asciified-'+name+'.mp4', fourcc, fps, (width, height))
print('Writing W*H {}x{}, FPS {}'.format(width, height, fps))
while(video.isOpened()):
ret, frame = video.read()
if ret:
text = asciify(frame)
out = np.zeros((height, width, 3), np.uint8)
for i in range(len(text)):
for j in range(len(text[i])):
cv2.putText(img=out,
text=''.join(text[i][j]),
org=(j*10, i*10),
fontFace=1,
fontScale=0.6,
color=(255, 255, 255))
writer.write(out)
## TO CHECK RESULTS
##cv2.imshow('test', out)
else:
break
print('done')
writer.release()
video.release()
cv2.waitKey(0)
cv2.destroyAllWindows()
|
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils import timezone
def dateStartValidator(value):
dateStartValidator.lastValue = value
def dateEndValidator(value):
if dateStartValidator.lastValue > value:
raise ValidationError(
'End Date should be later than start date'
)
class ScheduleInfo(models.Model):
name = models.CharField(max_length=300)
dateStart = models.DateTimeField(validators=[dateStartValidator,])
dateEnd = models.DateTimeField(validators=[dateEndValidator,])
def __str__(self):
return timezone.localtime(self.dateStart).strftime("[%Y-%m-%d]") + "~" + timezone.localtime(self.dateEnd).strftime("[%Y-%m-%d]") + " " + self.name
def __eq__(self, other):
return self.name==other.name and self.dateStart==other.dateStart and self.dateEnd==other.dateEnd
|
# Generated by Django 2.2.2 on 2019-09-06 15:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='comment',
name='is_anonymity',
field=models.IntegerField(default=0),
),
]
|
import modules as m
#from modules import Add
print(m.Add.__doc__)
print(m.Add(2,5))
|
# http://www.practicepython.org/exercise/2014/03/26/08-rock-paper-scissors.html
from random import randint
puntj = puntia = 0
print ("Al primero que gane tres!")
while True:
while puntj<3 and puntia<3:
jugador = "inicial"
while (jugador != "piedra" and jugador != "papel" and jugador != "tijeras" and jugador != "tijera"):
jugador = input("Un, dos, tres, piedra, papel o tijera! ").lower()
if (jugador == "piedra"):
j = 1
break
elif (jugador == "papel"):
j = 2
break
elif (jugador == "tijera" or jugador == "tijeras"):
j = 3
break
#
ia = randint(1,3)
if (ia == 1):
iastr = "piedra"
elif (ia == 2):
iastr = "papel"
elif (ia == 3):
iastr = "tijera"
else:
print ("Error inesperado")
# print("Jugador: {}, IA: {}".format(j, ia))
if (j == ia):
print ("Los dos hemos sacado {}, empate!".format(jugador))
if ((j == 1 and ia == 2) or (j == 2 and ia == 3) or (j == 3 and ia == 1)):
print ("Yo saqué {}, gané!".format(iastr))
puntia += 1
if ((j == 2 and ia == 1) or (j == 3 and ia == 2) or (j == 1 and ia == 3)):
print ("Yo saqué {}, ganaste...".format(iastr))
puntj += 1
if (puntia<3 and puntj<3):
if (puntia>puntj):
print("Gano {} a {}!".format(puntia, puntj))
elif (puntia<puntj):
print("Vas ganando {} a {}!".format(puntj, puntia))
else:
print("Estamos empatados a {}".format(puntia))
elif(puntia == 3):
print("He ganado!!")
elif(puntj == 3):
print("Has ganado...")
#
while True:
op = input("Otra partida? (s,n): ").lower()
if (op=="s" or op == "n" or op == "y"):
break
if (op == "n"):
break
else:
puntj = 0
puntia = 0
#
|
from time import sleep
from serial import Serial
import sys
sys.path.append('/home/pi/tracer/python')
from tracer import Tracer, TracerSerial, QueryCommand
port = Serial('/dev/ttyAMA0', 9600, timeout = 1)
port.flushInput()
port.flushOutput()
tracer = Tracer(0x16)
t_ser = TracerSerial(tracer, port)
query = QueryCommand()
try:
while 1:
try:
t_ser.send_command(query)
data = t_ser.receive_result()
except (IndexError, IOError) as e:
print(e)
port.flushInput()
port.flushOutput()
sleep(4)
continue
print('Battery Voltage: {0:0.1f}V'.format(data.batt_voltage))
print('Solar Panel Voltage: {0:0.1f}V'.format(data.pv_voltage))
print('Charging Current: {0:0.2f}A'.format(data.charge_current))
print('Load Current: {0:0.2f}A\n'.format(data.load_amps))
sleep(4)
except KeyboardInterrupt:
print ("\nCtrl-C pressed. Closing serial port and exiting...")
finally:
port.close()
|
class Solution:
def threeSum(self, nums):
res = []
nums.sort()
for i in range(len(nums) - 2):
if i > 0 and nums[i] == nums[i - 1]:
continue
# two sum
j = i + 1
k = len(nums) - 1
while j < k:
if nums[i] + nums[j] + nums[k] == 0:
res.append([nums[i],nums[j],nums[k]])
while j < k and nums[j] == nums[j + 1]:
j += 1
while j < k and nums[k] == nums[k - 1]:
k -= 1
j += 1
k -= 1
elif nums[i] + nums[j] + nums[k] > 0:
k -= 1
else:
j += 1
# end two sum
return res
def threeSumSlow(self,nums):
res = set()
nums.sort()
for i in range(len(nums) - 2):
for j in range(i + 1, len(nums) - 1):
for k in range(j + 1, len(nums)):
if nums[i] + nums[j] + nums[k] == 0:
res.add((nums[i],nums[j],nums[k]))
return list(res)
print(Solution().threeSum([-1, 0, 1, 2, -1, -4]))
|
import numpy as np
def calculate(list):
if len(list) == 9:
calculations = dict()
np_list = np.array(list)
matrix = np.reshape(np_list, (3, 3))
calculations['mean'] = (np.mean(matrix, axis=0), np.mean(matrix, axis=1), np.mean(matrix))
calculations['variance'] = (np.var(matrix, axis=0), np.var(matrix, axis=1), np.var(matrix))
calculations['standard deviation'] = (np.std(matrix, axis=0), np.std(matrix, axis=1), np.std(matrix))
calculations['max'] = (np.max(matrix, axis=0), np.max(matrix, axis=1), np.max(matrix))
calculations['min'] = (np.min(matrix, axis=0), np.min(matrix, axis=1), np.min(matrix))
calculations['sum'] = (np.sum(matrix, axis=0), np.sum(matrix, axis=1), np.sum(matrix))
# calculations = np.mean(matrix, axis=0)
return calculations
else:
raise ValueError('list length is not nine')
|
"""Test tailrecursion.py."""
from tailrecursion import Recursion
from tailrecursion import tail_recursive
@tail_recursive
def fib(seq_num: int, _i: int = 0, _j: int = 1) -> int:
"""Return the nth Fibonacci number."""
if seq_num == 0:
return _i
else:
raise Recursion(seq_num - 1, _i=_j, _j=(_i + _j))
def test_correctness():
"""Test we get the correct result."""
assert fib(0) == 0
assert fib(1) == 1
assert fib(5) == 5
assert fib(100) == 354224848179261915075
def test_function_meta():
"""Test we don't destroy the function by decorating it."""
@tail_recursive
def my_fn():
"""TEST."""
pass
assert my_fn.__doc__ == 'TEST.'
|
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
def load_clean_data():
data = pd.read_csv('kc_house_data.csv')
data.date = pd.to_datetime(data.date, infer_datetime_format=True)
return data
data = load_clean_data()
def show_EDA_page():
st.title('EDA of the KC House Sales Price Data')
st.write("""In this page we present a brief EDA of the used data set, for more detailed EDA please refer
to this [kernel](https://www.kaggle.com/hamzaboulahia/eda-kc), and the data set is available publicly
on [Kaggle](https://www.kaggle.com/harlfoxem/housesalesprediction)""")
st.write("Top 5 rows of the King County House Sales Price data set:")
st.write(data.head())
st.markdown(""" ## Features description:
**id :** The identification number of a house
**date:** The date when a house was sold
**price:** Price is prediction target
**bedrooms:** Number of bedrooms
**bathrooms:** Number of bathrooms
**sqft_living:** Square footage of the home
**sqft_lot:** Square footage of the lot
**floors:** Total floors (levels) in house
**waterfront:** House which has a view to a waterfront
**view:** Number of views in the house
**condition:** How good the house condition is overall
**grade:** Overall grade given to the housing unit, based on King County grading system
**sqft_above:** Square footage of house apart from basement
**sqft_basement:** Square footage of the basement
**yr_built:** Built Year
**yr_renovated :** Year when house was renovated
**zipcode:** Zip code
**lat:** Latitude coordinate
**long:** Longitude coordinate
**sqft_living15:** Living room area in 2015(implies-- some renovations)
**sqft_lot15:** LotSize area in 2015(implies-- some renovations)""")
Years = list(pd.DatetimeIndex(data.date).year)
Months = list(pd.DatetimeIndex(data.date).month)
fig1 = plt.figure(figsize=(20, 6))
grid = plt.GridSpec(2, 2, width_ratios=(1, 2), height_ratios=(1, 5), hspace=0.2, wspace=0.2)
Left_ax = fig1.add_subplot(grid[:, 0])
Right_top = fig1.add_subplot(grid[0, 1])
Right_bot = fig1.add_subplot(grid[1, 1], xticklabels=['Jan', 'Feb', 'Mar', 'May', 'Avr', 'Jun', 'Jul', 'Aou',
'Sep', 'Oct', 'Nov', 'Dec'])
sb.countplot(x=Years, palette='mako', ax=Left_ax)
Left_ax.set_title('House sales count by Year', fontdict={'fontsize': 15})
sb.countplot(x=Months, palette='mako', ax=Right_bot)
sb.boxplot(x=Months, ax=Right_top)
Right_top.set_title('House sales count by Month', fontdict={'fontsize': 15})
st.write("""## Univariate Data Exploration:""")
st.write("Sales count by Year & Month")
st.pyplot(fig1)
st.write("House Price distribution")
fig2 = plt.figure(figsize=(20, 8))
plt.subplot(121)
sb.histplot(data=data.price, bins=140)
plt.title('Distribution of the house prices', fontdict={'fontsize': 15})
plt.subplot(122)
sb.boxplot(x=data.price)
plt.title('Boxplot of the house prices', fontdict={'fontsize': 15})
st.pyplot(fig2)
st.write("Bedrooms & Bathrooms distribution")
fig3 = plt.figure(figsize=(20, 6))
plt.subplot(121)
sb.countplot(x=data.bedrooms, palette='mako')
plt.title('Number of bedrooms distribution', fontdict={'fontsize': 15})
plt.subplot(122)
sb.countplot(y=data.bathrooms, palette='mako')
plt.title('Number of bathrooms distribution', fontdict={'fontsize': 15})
st.pyplot(fig3)
st.write("House area distribution")
fig4 = plt.figure(figsize=(20, 15))
sb.histplot(x=data.sqft_living, kde=True, bins=110)
sb.histplot(x=data.sqft_living15, kde=True, bins=110, color='red')
plt.legend(['sqft_living', 'sqft_living15'])
plt.title('Living area distribution', fontdict={'fontsize': 15})
st.pyplot(fig4)
st.write("Classes representation for some categorical features")
fig5 = plt.figure(figsize=(20, 20))
plt.subplot(321)
sb.countplot(x=data.floors, palette='mako')
plt.title('Distribution of houses with respect to floor count', fontdict={'fontsize': 15})
plt.subplot(322)
sb.countplot(x=data.waterfront, palette='mako')
plt.title('Number of houses with/without a water front', fontdict={'fontsize': 15})
plt.subplot(323)
sb.countplot(x=data.view, palette='mako')
plt.title('Distribution of the views count', fontdict={'fontsize': 15})
plt.subplot(324)
sb.countplot(x=data.condition, palette='mako')
plt.title('Houses condition distribution', fontdict={'fontsize': 15});
st.pyplot(fig5)
st.write("""## Multivariate Data Exploration:""")
st.write("Feature correlation heatmap")
fig6 = plt.figure(figsize=(18, 13))
plt.title('Heatmap correlation of the most important features', fontsize=18)
sb.heatmap(data=data.iloc[:, 1:].corr(), annot=True)
st.pyplot(fig6)
st.write("Price Vs Categorical variables")
fig7 = plt.figure(figsize=(20, 20))
plt.subplot(421)
sb.barplot(x=data.bedrooms, y=data.price, palette='mako')
plt.subplot(422)
sb.barplot(x=data.waterfront, y=data.price, palette='mako')
plt.subplot(423)
sb.barplot(x=data.grade, y=data.price, palette='mako')
plt.subplot(424)
sb.barplot(x=data.floors, y=data.price, palette='mako')
plt.subplot(425)
sb.barplot(x=data.condition, y=data.price, palette='mako')
plt.subplot(426)
sb.barplot(x=data.view, y=data.price, palette='mako')
plt.subplot(414)
sb.barplot(x=data.bathrooms, y=data.price, palette='mako')
st.pyplot(fig7)
st.subheader("You can predict the price of a house using a Linear Regression model on the Predict page")
return None
|
import numpy as np
import matplotlib.pyplot as plt
def getrsr(x1, x2, x3):
return (0.4 * x1 + 0.4 * x2 + 0.2 * x3) / 30.0
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
lables = np.array(['查找算法', '排序算法', '树结构', '数字操作', '数组', '图结构', '线性表', '字符串'])
nAttr = 8
data = np.array([getrsr(234.8785317352681, 319.99504802754655, 211.4628228646248),
getrsr(274.4375825834956, 430.18565746895456, 260.71252816399806),
getrsr(253.0781729934846, 278.0895107579895, 167.4751304818027),
getrsr(275.9241742209988, 306.6459105668957, 234.05743064924715),
getrsr(250.177204156439, 193.0810938975884, 157.7242012487926),
getrsr(239.71326425818452, 275.46388971831186, 235.96362690214892),
getrsr(251.74271582577097, 278.39941102547346, 181.1483751175285),
getrsr(213.70544631243826, 323.2956824038571, 182.98785226908083)])
angles = np.linspace(0, 2 * np.pi, nAttr, endpoint=False)
data = np.concatenate((data, [data[0]]))
angles = np.concatenate((angles, [angles[0]]))
fig = plt.figure(facecolor="white")
plt.subplot(111, polar=True)
plt.plot(angles, data, 'bo-', color='b', linewidth=2)
plt.fill(angles, data, facecolor='g', alpha=0.25)
# print(angles * 180 / np.pi)
plt.thetagrids([0., 45., 90., 135., 180., 225., 270., 315.], lables)
plt.figtext(0.52, 0.95, '各类题型能力值雷达图(49823)', ha='center')
plt.grid(True)
plt.savefig('user49823.jpg')
plt.show()
|
# Generated by Django 3.0.3 on 2020-03-18 21:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0005_dimensaomodel_data'),
]
operations = [
migrations.AlterField(
model_name='dimensaomodel',
name='profundidade_media',
field=models.FloatField(default=0, max_length=5),
),
]
|
"""
Example conversion of ical to csv
"""
from csv_ical import Convert
convert = Convert()
convert.CSV_FILE_LOCATION = 'examples/BostonCruiseTerminalSchedule.csv'
convert.SAVE_LOCATION = 'examples/arrive.ics'
convert.read_ical(convert.SAVE_LOCATION)
convert.make_csv()
convert.save_csv(convert.CSV_FILE_LOCATION)
|
#Google BooksでAPIを叩く
import requests as req
ISBN = input()
response = req.get(
'https://www.googleapis.com/books/v1/volumes',
params={
"q": "isbn=" + str(ISBN)
})
BookInfo = dict(response.json()).get("items")[0]["volumeInfo"]
BookTitle = BookInfo["title"]
BookAuthor = BookInfo["authors"][0]
print("Title: " + BookTitle)
print("Author: " + BookAuthor)
|
n1 = int(input('Digite o primeiro número inteiro: '))
n2 = int(input('Digite o segundo número inteiro: '))
if n1 > n2:
print('O primeiro valor é maior do que o segundo')
elif n2 > n1:
print('O segundo valor é maior do que o primeiro')
else:
print('Não há valor maior, os dois são iguais')
|
import csv
reader = csv.reader(file)
writer = csv.writer(file)
writer.writerow(data) # write one line
writer.writerows(data) # write multi line
# Dictionary
reader = csv.DictReader(file)
writer = csv.DictWriter(file)
writer.writeheader() # file header
writer.writerow(data) # write one line
writer.writerows(data) # write multi line
|
import pygame
from GameParameter import display
def drawing_text(text, x, y, font_color=pygame.Color('black'), font_size=30):
font_type = pygame.font.Font('lost.ttf', font_size)
text = font_type.render(text, True, font_color)
display.blit(text, (x, y))
|
import asyncio
import os
import logging
from colorlog import ColoredFormatter
from tonga.stores.persistency.shelve import ShelvePersistency, StoreKeyNotFound
from tonga.stores.persistency.memory import MemoryPersistency
from tonga.stores.persistency.rocksdb import RocksDBPersistency
def setup_logger():
"""Return a logger with a default ColoredFormatter."""
formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s]%(levelname)s: %(name)s/%(module)s/%(funcName)s:%(lineno)d"
" (%(thread)d) %(blue)s%(message)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
)
logger = logging.getLogger('tonga')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
async def main():
print('start')
for i in range(0, 1):
print("Set toto -> b'titi'")
await persistency.set('toto', b'titi')
print("Get toto")
r = await persistency.get('toto')
print('return = ', r)
print("Delete toto")
await persistency.delete('toto')
try:
r = await persistency.get('toto')
print('r = ', r)
except StoreKeyNotFound:
print('logic')
if __name__ == '__main__':
logger = setup_logger()
print('Start memory')
persistency = MemoryPersistency()
persistency.__getattribute__('_set_initialize').__call__()
print(persistency.is_initialize())
asyncio.run(main())
print('Start shelve')
persistency = ShelvePersistency(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'local_store.db'))
persistency.__getattribute__('_set_initialize').__call__()
asyncio.run(main())
print('Start rocksDB')
persistency = RocksDBPersistency(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'local_store'))
persistency.__getattribute__('_set_initialize').__call__()
asyncio.run(main())
|
n=input()
list1=[int(x) for x in raw_input().split(" ")]
list2=[]
for i in list1:
list2.append([i])
Count=0
|
""" script to create hepevt files (as input for JUNO detsim) containing IBD signals.
IBD signal:
- positron with initial momentum between 10 MeV and 100 MeV
- neutron with initial momentum corresponding to the positron energy
format of hepevt file (momentum and mass in GeV):
number particles in event
1 PDGID 0 0 px py pz mass
1 PDGID 0 0 px py pz mass
for example:
2
1 -11 0 0 px py pz 0.000511
1 2112 0 0 px py pz 0.939570
"""
import numpy as np
from matplotlib import pyplot as plt
def calculate_neutron_momentum(mom_positron, theta, mass_p, delta, mass_pos):
"""
function to calculate the neutron momentum as function of the positron momentum and the angle theta
from paper "Angular distribution of neutron inverse beta decay" of Beacom and Vogel (PhysRevD.60.053003.pdf),
page 7, equation 29.
In the case of large positron energies (E_pos > 10 MeV): momentum of positron = energy of positron
(mass can be neglected) and therefore velocity_positron = momentum / energy = 1.
For example: mom_pos(E_pos = 10 MeV) = 9.987 MeV, mom_pos(E_pos = 100 MeV) = 99.9987 MeV
With this approximation equation 29 of the paper becomes function below.
:param mom_positron: momentum of positron in GeV
:param theta: angle theta in degree
:param mass_p: mass of proton in GeV
:param delta: mass neutron - mass proton in GeV
:param mass_pos: mass of positron in GeV
:return: momentum of neutron in GeV
"""
mom_neutron = ((mom_positron - delta) * mom_positron / mass_p * (1 - np.cos(np.deg2rad(theta))) +
(delta**2 - mass_pos**2)/(2 * mass_p))
return mom_neutron
# mass of positron in GeV (float constant):
MASS_POSITRON = 0.51099892/1000.0
# mass of proton in GeV (float constant):
MASS_PROTON = 938.27203/1000.0
# mass of neutron in GeV (float constant):
MASS_NEUTRON = 939.56536/1000.0
# difference MASS_NEUTRON - MASS_PROTON in GeV (float):
DELTA = MASS_NEUTRON - MASS_PROTON
# set minimum and maximum momentum of positron in GeV:
E_min = 0.010
E_max = 0.100
# set path, where the output files (hepevt files should be saved):
output_path = "/home/astro/blum/juno/IBD_events/IBD_hepevt_files/"
# set the number of events, that should be stored in one hepevt file:
number_events_per_file = 100
# set the number of files that should be created:
number_of_files = 200
# parameters for the hepevt file:
# PDG ID of positron:
pdg_positron = str(-11)
# mass of positron in GeV:
mass_positron = str(0.000511)
# PDG ID of neutron:
pdg_neutron = str(2112)
# mass of neutron in GeV:
mass_neutron = str(0.93957)
# preallocate array to check positron momentum:
array_pos = []
array_pos_test = []
array_neutron = []
# loop over the number of files:
for filenumber in range(number_of_files):
# preallocate hepevt array:
hepevt_file = []
# loop over the number of events per file:
for event in range(number_events_per_file):
# generate total momentum of positron in GeV:
momentum_positron = np.random.uniform(E_min, E_max)
# calculate square of the total momentum in GeV**2:
momentum_positron_square = momentum_positron**2
# generate square of x-momentum of positron in GeV**2:
momentum_positron_x_square = np.random.uniform(0.0, momentum_positron_square)
# generate square of y-momentum of positron in GeV**2:
momentum_positron_y_square = np.random.uniform(0.0, (momentum_positron_square - momentum_positron_x_square))
# calculate square of z-momentum of positron in GeV**2:
momentum_positron_z_square = momentum_positron_square - momentum_positron_x_square - momentum_positron_y_square
# calculate the momentum in x, y, z direction in GeV (consider that it can also be negative):
# generate random 0 or 1:
sign_x = np.random.randint(2)
if sign_x == 0:
momentum_positron_x = np.sqrt(momentum_positron_x_square)
else:
momentum_positron_x = -np.sqrt(momentum_positron_x_square)
sign_y = np.random.randint(2)
if sign_y == 0:
momentum_positron_y = np.sqrt(momentum_positron_y_square)
else:
momentum_positron_y = -np.sqrt(momentum_positron_y_square)
sign_z = np.random.randint(2)
if sign_z == 0:
momentum_positron_z = np.sqrt(momentum_positron_z_square)
else:
momentum_positron_z = -np.sqrt(momentum_positron_z_square)
# calculate momentum of the generated x,y,z momenta in GeV as cross-check:
momentum_positron_crosscheck = np.sqrt(momentum_positron_x**2 + momentum_positron_y**2 + momentum_positron_z**2)
# generate angle theta in degree:
theta_degree = np.random.uniform(0.0, 180.0)
# calculate the momentum of neutron in GeV:
momentum_neutron = calculate_neutron_momentum(momentum_positron, theta_degree, MASS_PROTON, DELTA,
MASS_POSITRON)
# calculate square of neutron momentum in GeV**2:
momentum_neutron_square = momentum_neutron**2
# generate square of x-momentum of neutron in GeV**2:
momentum_neutron_x_square = np.random.uniform(0.0, momentum_neutron_square)
# generate square of y-momentum of neutron in GeV**2:
momentum_neutron_y_square = np.random.uniform(0.0, (momentum_neutron_square - momentum_neutron_x_square))
# calculate square of z-momentum of neutron in GeV**2:
momentum_neutron_z_square = momentum_neutron_square - momentum_neutron_x_square - momentum_neutron_y_square
# calculate the momentum in x, y, z, direction in GeV (consider that it can also be negative):
# generate random 0 or 1:
sign_x = np.random.randint(2)
if sign_x == 0:
momentum_neutron_x = np.sqrt(momentum_neutron_x_square)
else:
momentum_neutron_x = -np.sqrt(momentum_neutron_x_square)
sign_y = np.random.randint(2)
if sign_y == 0:
momentum_neutron_y = np.sqrt(momentum_neutron_y_square)
else:
momentum_neutron_y = -np.sqrt(momentum_neutron_y_square)
sign_z = np.random.randint(2)
if sign_z == 0:
momentum_neutron_z = np.sqrt(momentum_neutron_z_square)
else:
momentum_neutron_z = -np.sqrt(momentum_neutron_z_square)
""" fill array as cross-check: """
array_pos.append(momentum_positron)
array_pos_test.append(momentum_positron_crosscheck)
array_neutron.append(momentum_neutron)
""" append information to hepevt_file: """
# 2 particles in the event:
hepevt_file.append("2")
# append positron information:
hepevt_file.append("1\t{0} 0 0 {1} {2} {3} {4}".format(pdg_positron, momentum_positron_x, momentum_positron_y,
momentum_positron_z, mass_positron))
# append neutron information to file:
hepevt_file.append("1\t{0} 0 0 {1} {2} {3} {4}".format(pdg_neutron, momentum_neutron_x, momentum_neutron_y,
momentum_neutron_z, mass_neutron))
# open file:
MyFile = open(output_path + "IBD_hepevt_{1:d}events_file{0:d}.txt".format(filenumber, number_events_per_file), 'w')
for element in hepevt_file:
print >>MyFile, element
MyFile.close()
bins = np.arange(0.005, 0.120, 0.001)
h1 = plt.figure(1)
plt.hist(array_pos, bins, labeL='positron momentum in GeV')
plt.legend()
h2 = plt.figure(2)
plt.hist(array_pos_test, bins, labeL='cross-check positron momentum in GeV')
plt.legend()
h3 = plt.figure(3)
plt.hist(array_neutron, np.arange(0.0, 0.100, 0.001), labeL='neutron momentum in GeV')
plt.legend()
plt.show()
|
import turtle
#turtle.shape('turtle')
#square=turtle.clone()
#square.shape('square')
#square.goto(100,100)
#square.goto(300,300)
#square.stamp()
#square.goto(100,100)
#turtle.mainloop()
UP_ARROW='Up'
LEFT_ARROW='Left'
DOWN_ARROW='Down'
RIGHT_ARROW='Right'
SPACEBAR='space'
UP=0
DOWN=1
LEFT=2
RIGHT=3
direction=UP
def up():
global direction
direction=UP
print('you pressed up!')
def down():
global direction
direction=DOWN
print ('you pressed down!')
def left():
global direction
direction= LEFT
print ('ypu pressed left!')
def right():
global direction
direction= RIGHT
print ('you pressed right!')
turtle.onkeypress(up,UP_ARROW)
turtle.onkeypress(down,DOWN_ARROW)
turtle.onkeypress(left,LEFT_ARROW)
turtle.onkeypress(right,RIGHT_ARROW)
turtle.listen()
|
import argparse
import re
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
args = dict()
data = dict()
origDir = os.getcwd()
#plt.style.use('ggplot')
## plt.style.use('grayscale')
## plt.style.use('fivethirtyeight')
#print plt.style.available
numAccesses = re.compile('Total Accesses: (\d+)')
numHits = re.compile('Total Hits: (\d+)')
numMisses = re.compile('Total Misses: (\d+)')
hitRate = re.compile('Hit Rate: (.+)')
missRate = re.compile('Miss Rate: (.+)')
memInfo = re.compile('(.+) (IFETCH|READ|WRITE)')
cacheType = re.compile('Cache Type: (.+)')
footprintInfo = re.compile(' *([a-z+]+) * (\d+) Bytes * ([0-9\.]+) KB')
gcSectionStart = re.compile('Start GC Section Info: Type = (FULL|YOUNG)')
gcSectionEnd = re.compile('End GC Section Info')
finalSection = re.compile('Start Overall Info')
cacheSection = re.compile('CACHE INFO')
footprintSection = re.compile('FOOTPRINT INFO')
filenameInfo = re.compile('(.+)-pin\.log')
# options are mem info, cache info, footprint info
info_type = 'mem info'
# options are Loads, Stores, Instructions, Everything
cache_type = 'Loads'
# options are mem_eval, gc, overall
stage = 'mem_eval'
# used for reading the memory values
# TODO will have to re-initialize this occassionally
chunksize = 64
addressesSeen = dict()
# used for cache info
# this changes occassionally
gcCount = 0
overallCacheData = dict()
gcCacheData = dict()
gcFootprintData = dict()
totalOverallCacheData = dict()
totalGcCacheData = dict()
totalGcFootprintData =dict()
totalFootprintSize =dict()
gcCollectionOverallCacheData = dict()
gcCollectionGcCacheData = dict()
gcCollectionGcFootprintData =dict()
gcCollectionFootprintSize = dict()
gcCollectionTypes = ['serial_serial_gc', 'parallel_both_gc']
# used for footprintInfo
# this is the current set
# overallFootprint = dict()
def readCommandline():
global args
parser = argparse.ArgumentParser(prog='Plot generator')
parser.add_argument('folder', help='example')
parser.add_argument('-output', help='output name')
parser.add_argument('-colors', dest='colors', help='example')
args = parser.parse_args()
def searchForMatch(line, query):
result = query.search(line)
if result:
return result
return None
def finishProcessingGC(gc_type):
totalOverallCacheData[gc_type] = gcCollectionOverallCacheData
totalGcCacheData[gc_type] = gcCollectionGcCacheData
totalGcFootprintData[gc_type] = gcCollectionGcFootprintData
totalFootprintSize[gc_type] = gcCollectionFootprintSize
def finishProcessingFile(filename):
global gcCollectionOverallCacheData
global gcCollectionGcCacheData
global gcCollectionGcFootprintData
global gcCollectionFootprintSize
result = searchForMatch(filename, filenameInfo)
if result:
name = result.group(1)
gcCollectionOverallCacheData[name] = overallCacheData
gcCollectionGcCacheData[name] = gcCacheData
gcCollectionGcFootprintData[name] = gcFootprintData
gcCollectionFootprintSize[name] = len(addressesSeen)
else:
print "unable to parse filename"
sys.exit(1)
# this for all of the things that need to be initialize at the beginning
# of the file
def processNewFile(filename):
global overallCacheData, gcCacheData, gcFootprintData
global info_type, cache_type, stage
#clearing per gc info
addressesSeen.clear()
overallCacheData = {}
gcCacheData = {}
gcFootprintData = {}
# resetting the fsm state
info_type = 'mem info'
cache_type = 'Loads'
stage = 'mem_eval'
def processNewGC(gc_type):
global gcCollectionOverallCacheData, gcCollectionGCCacheData
global gcCollectionGcFootprintData
gcCollectionOverallCacheData = {}
gcCollectionGcCacheData = {}
gcCollectionGcFootprintData = {}
def performMemEval(line):
global addressesSeen
result = searchForMatch(line, memInfo)
if result:
# print result.group(1)
value = int(result.group(1), 16)
# print "int value ", value
# TODO need to make sure this is correct
value = value & (~(chunksize - 1))
# print "masked value ", value
addressesSeen[value] = 1
def getCacheDict():
global gcCacheData, overallCacheData
temp_dict = None
if stage == 'gc':
temp_dict = gcCacheData[gcCount]
elif stage == 'overall':
temp_dict = overallCacheData
else:
print "problem in getCacheDict"
sys.exit(1)
if cache_type not in temp_dict.keys():
temp_dict[cache_type] = dict()
return temp_dict[cache_type]
def performCacheEval(line):
global cache_type
cacheData = getCacheDict()
result = searchForMatch(line, cacheType)
if result:
cache_type = result.group(1)
category = 'accesses'
result = searchForMatch(line, numAccesses)
if result:
cacheData[category] = result.group(1)
category = 'hits'
result = searchForMatch(line, numHits)
if result:
cacheData[category] = result.group(1)
category = 'misses'
result = searchForMatch(line, numMisses)
if result:
cacheData[category] = result.group(1)
category = 'hit_rate'
result = searchForMatch(line, hitRate)
if result:
cacheData[category] = result.group(1)
def retrieveGCCount(isFull, data):
count = 0
expectedType = ("FULL" if isFull else "YOUNG")
for key in data.keys():
gcEntry = data[key]
if gcEntry["gc_type"] == expectedType:
count += 1
return count
def retrieveFootprint(isFull, data):
sumFootprint = float(0)
numValues = 0
expectedType = ("FULL" if isFull else "YOUNG")
for key in data.keys():
gcEntry = data[key]
if gcEntry["gc_type"] != expectedType:
continue
individualSum = float(0)
for keyTwo in gcEntry.keys():
if keyTwo == "gc_type":
continue
individualSum += float(gcEntry[keyTwo])
sumFootprint += individualSum
numValues += 1
result = "N/A"
if numValues != 0:
result = sumFootprint / numValues
return result
def printGCCountData():
f = sys.stdout
f.write('\ngc count info\n')
gc_types = gcCollectionTypes
f.write('gc_types')
for key in gc_types:
for x in range(0,3):
f.write(' & ')
f.write(key)
f.write('\n')
f.write('benchmarks')
for key in gc_types:
vals = ['young', 'full', 'total']
for v in vals:
f.write(' & ')
f.write(v)
f.write('\n')
benchmarks = totalFootprintSize[gc_types[0]].keys()
for bench in sorted(benchmarks):
f.write(bench)
for gc in gc_types:
young = retrieveGCCount(False, totalGcFootprintData[gc][bench])
old = retrieveGCCount(True, totalGcFootprintData[gc][bench])
total = young + old
result = " & " + str(young) + " & " + str(old) + " & " + str(total)
f.write(result)
f.write('\n')
def printFootprintInfo():
f = sys.stdout
f.write('\nmemory footprint info\n')
gc_types = gcCollectionTypes
f.write('gc_types')
for key in gc_types:
for x in range(0,3):
f.write(' & ')
f.write(key)
f.write('\n')
f.write('benchmarks')
for key in gc_types:
vals = ['young', 'full', 'overall']
for v in vals:
f.write(' & ')
f.write(v)
f.write('\n')
benchmarks = totalFootprintSize[gc_types[0]].keys()
for bench in sorted(benchmarks):
f.write(bench)
for gc in gc_types:
overall = float(chunksize) * totalFootprintSize[gc][bench]
young = retrieveFootprint(False, totalGcFootprintData[gc][bench])
old = retrieveFootprint(True, totalGcFootprintData[gc][bench])
# TODO probably convert bytes to KBs or MBs here
overall = overall if float(overall) != 0.0 else "N/A"
result = " & " + str(young) + " & " + str(old) + " & " + str(overall)
f.write(result)
f.write('\n')
def retrieveCache(isFull, data):
sumFootprint = float(0)
numValues = 0
expectedType = ("FULL" if isFull else "YOUNG")
for key in data.keys():
gcEntry = data[key]
if gcEntry["gc_type"] != expectedType:
continue
individualSum = gcEntry['Everything']['hit_rate']
sumFootprint += float(individualSum)
numValues += 1
result = "N/A"
if numValues != 0:
result = sumFootprint / numValues
return result
def printCacheInfo():
f = sys.stdout
f.write('\ncache hit rate info\n')
gc_types = gcCollectionTypes
f.write('gc_types')
for key in gc_types:
for x in range(0,3):
f.write(' & ')
f.write(key)
f.write('\n')
f.write('benchmarks')
for key in gc_types:
vals = ['young', 'full', 'overall']
for v in vals:
f.write(' & ')
f.write(v)
f.write('\n')
benchmarks = totalFootprintSize[gc_types[0]].keys()
for bench in sorted(benchmarks):
f.write(bench)
for gc in gc_types:
# TODO need to finish this
# print "gc : ", gc, " bench: ", bench
overall = totalOverallCacheData[gc][bench]['Everything']['hit_rate']
if overall == "-nan":
overall = "N/A"
young = retrieveCache(False, totalGcCacheData[gc][bench])
old = retrieveCache(True, totalGcCacheData[gc][bench])
# TODO probably convert bytes to KBs or MBs here
result = " & " + str(young) + " & " + str(old) + " & " + str(overall)
f.write(result)
f.write('\n')
def getFootprintDict():
global gcFootprintData
return gcFootprintData[gcCount]
def performFootprintEval(line):
footprintData = getFootprintDict()
category = ''
result = searchForMatch(line, footprintInfo)
if result:
# the type
key = result.group(1)
# the amount of bytes
value = result.group(2)
footprintData[key] = value
def checkForTransition(line):
global info_type, stage
global cacheData, footprintData
global gcCacheData, gcFootprintData, gcCount, overallCacheData
result = searchForMatch(line, gcSectionStart)
if result:
stage = 'gc'
gcCacheData[gcCount] = dict()
gcFootprintData[gcCount] = dict()
gc_type = result.group(1)
gcCacheData[gcCount]['gc_type'] = gc_type
gcFootprintData[gcCount]['gc_type'] = gc_type
if searchForMatch(line, gcSectionEnd):
stage = 'mem_eval'
info_type = 'mem info'
gcCount += 1
if searchForMatch(line, finalSection):
stage = 'overall'
info_type = 'cache info'
if searchForMatch(line, cacheSection):
info_type = 'cache info'
if searchForMatch(line, footprintSection):
info_type = 'footprint info'
def getEvalRoutine():
return {
'mem info': performMemEval,
'cache info': performCacheEval,
'footprint info': performFootprintEval
}[info_type]
def processData(line):
checkForTransition(line)
routine = getEvalRoutine()
routine(line)
def main():
readCommandline()
os.chdir(args.folder)
parDir = os.getcwd()
for gc_type in glob.glob("*"):
print "gc type ", gc_type
os.chdir(gc_type)
processNewGC(gc_type)
for filename in glob.glob("*.log"):
print "filename: ", filename
processNewFile(filename)
with open(filename) as f:
for line in f:
processData(line)
#print "addresses seen" , len(addressesSeen)
finishProcessingFile(filename)
finishProcessingGC(gc_type)
# going back to parent dir
os.chdir(parDir)
print totalOverallCacheData
print totalGcCacheData
#print totalGcFootprintData
#print totalFootprintSize
printGCCountData()
printFootprintInfo()
printCacheInfo()
if __name__ == '__main__':
main()
# print data
|
# created by Ryan Spies
# 2/19/2015
# Python 2.7
# Description: parse through a summary file of usgs site info obtained from website
# and split out individual cardfiles for each site. Also creates a summary csv file
# with calculated valid data points and percent of total. Used to display in arcmap
import os
import datetime as dt
from dateutil.relativedelta import relativedelta
import dateutil
import dateutil.parser
import glob
import numpy as np
import collections
os.chdir("../..")
maindir = os.getcwd()
################### user input #########################
RFC = 'SERFC_FY2016'
state = 'GA'
station_plot = 'on' # creates a summary bar plot for each station -> choices: 'on' or 'off'
workingdir = maindir + os.sep + 'Calibration_NWS'+ os.sep +RFC[:5] + os.sep + RFC + os.sep +'station_data'
variable = 'ptpx' # choices: 'ptpx', 'tamn', or 'tamx'
timestep = 'daily' # choices: 'hourly' or 'daily'
dim = 'L'; unit = 'IN'
summer_thresh = 12; winter_thresh = 12 #precip thresholds (inches) to flag and set missing
############# files/dir below must exist ####################
station_file = workingdir + os.sep + 'usgs_' + timestep +os.sep + 'usgs_site_locations_' + timestep + '_' + state + '.txt'
#daily_obs_file = workingdir + os.sep + 'nhds_' + timestep +os.sep + 'nhds_site_obs_time_' + state + '.csv'
data_dir = workingdir + os.sep + 'usgs_' + timestep +os.sep + variable + os.sep + 'download_data' + os.sep + state.upper()
out_dir = workingdir + os.sep + 'usgs_' + timestep +os.sep + variable + os.sep + 'cardfiles' + os.sep + state.upper() + os.sep
bad_ptpx_file = workingdir + os.sep + 'usgs_' + timestep +os.sep + 'questionable_ptpx_check_' + timestep + '_' + state + '.txt'
user_bad_data_list = workingdir + os.sep + 'usgs_' + timestep +os.sep + 'CHPS_suspect_map.csv'
#################### end user input ########################
if station_plot == 'on':
import matplotlib.pyplot as plt
import pandas as pd
plt.ioff()
import matplotlib.dates
if variable == 'tamn':
ext = '.tmn'; taplot = 'usgs_' + variable + '.taplot'; tap_open = open(workingdir + os.sep + 'usgs_' + timestep + os.sep + variable + os.sep + taplot, 'wb')
if variable == 'tamx':
ext = '.tmx'; taplot = 'usgs_' + variable + '.taplot'; tap_open = open(workingdir + os.sep + 'usgs_' + timestep + os.sep + variable + os.sep + taplot, 'wb')
if variable == 'ptpx':
ext = '.ptp'
bad_ptpx_summary = open(bad_ptpx_file,'wb')
check_chps = open(user_bad_data_list,'r')
set_miss_dates = []
for line in check_chps: # check csv file with dates of suspect MAP data (from CHPS)
date_chps = dateutil.parser.parse(line)
set_miss_dates.append(date_chps.date())
if timestep == 'hourly':
year_factor = float(24*365)
if timestep == 'daily':
year_factor = float(365)
### parse summary file for station info ###
summary_file = open(workingdir + os.sep + 'station_summaries' + os.sep + 'usgs_summary_' + variable + '_' + timestep + '_' + state + '.csv','w')
summary_file.write('NAME,SITE_ID,LAT,LON,ELEV,MISSING_DATA,TOTAL_DATA,YEARS_DATA,PCT_AVAIL,YEAR_START,YEAR_END\n')
station_summary = {}; elev_list = []
read_stations = open(station_file,'r')
for line in read_stations:
if line[0] != '#':
name = line[13:40].strip() # find the station name
number = line[40:47].strip() # find the station id num (6 digit)
site_id = number.split()[1] # find the station id num (4 digit)
split = filter(None,line[47:].strip().split(' ')) # filter out blank entries in list
lat = split[0]; lon = '-' +split[1]; elev = split[2].strip(); types = split[5]
station_summary[site_id] = [name,number,lat,lon,elev]
elev_list.append(float(elev)) # used to fin max/min for taplot header line
### taplot header line ###
if variable == 'tamn' or variable == 'tamx':
if len(station_summary) <= 26:
total_stations = len(station_summary)
else:
total_stations = 26
units = 'ENGL'
desc = "'Rio Grande'"
max_elev = max(elev_list); min_elev = min(elev_list)
tap_open.write('@A ')
tap_open.write('{:2d} {:4s} {:30s} {:4.0f} {:4.0f}'.format(total_stations,units,desc,max_elev,min_elev))
tap_open.write('\n')
### parse data and create individual datacard files ###
for data_file in glob.glob(data_dir+'/*.txt'):
print os.path.basename(data_file)
name = os.path.basename(data_file)[:-4] # get the actual file name from the path
site_num = name.split('.')[0]
read_data = open(data_file,'r')
count_all = 0; count_missing = 0
site_data = {}; site_data_daily = {}
site_tamx_monthly = {}; site_tamn_monthly = {}
print 'Parsing raw data file...'
for each in read_data:
if each[:4] == 'USGS':
line = each.split('\t')
data = line[3]
date_time = dateutil.parser.parse(line[2])
changemin = date_time.minute
# round sub-hourly data points up to nearest hour
if int(changemin) != 0:
changet = dt.timedelta(minutes=(60-int(changemin)))
round_dt = date_time + changet
else:
round_dt = date_time
if str(data) != 'M' and str(data) != '': # ignore missing data -> filled in below (-999)
if variable == 'ptpx' and float(data) < 12.0 and float(data) >= 0.0: # QA/QC remove unrealistic precip values
if round_dt in site_data:
site_data[round_dt].append(float(data))
else:
site_data[round_dt] = [float(data)]
if variable == 'temp':
if round_dt in site_data:
site_data[round_dt].append(float(data))
else:
site_data[round_dt] = [float(data)]
### also store temp data in daily lists within site_data_daily dictionary -> tmax and tmin calculations
if variable == 'temp':
if round_dt.replace(hour=1,minute=0) in site_data_daily:
site_data_daily[round_dt.replace(hour=1,minute=0)].append(float(data))
else:
site_data_daily[round_dt.replace(hour=1,minute=0)] = [float(data)]
read_data.close()
print 'Writing data to cardfile...'
# NOTE: UNIX requires a binary file to properly read end line formats - 'wb'
print 'Creating -> ' + variable + ' file'
min_date = min(site_data); max_date = max(site_data); iter_date = min_date
# need to be sure that the first data point starts on day 1 hour 1
if iter_date.day != 1 or iter_date.hour != 1:
iter_date = iter_date + relativedelta(months=+1)
iter_date = dt.datetime(iter_date.year,iter_date.month,1,0,0)
min_date = iter_date
month_count = 0; previous_month = 13 # use these for calculating line number for month/year lines
if timestep == 'hourly':
site_label = state + '-' + site_num + '-HLY'
if timestep == 'daily':
site_label = state + '-' + site_num + '-DLY'
if timestep == 'hourly':
step_time = 1
year_factor = float(24*365)
if timestep == 'daily': # daily tmax and tmin cardfiles
step_time = 24
year_factor = float(365)
#cardfile = open(out_dir + site_label + '_ASOS.' + str(min_date.month) + str(min_date.year) + '.' + str(max_date.month) + str(max_date.year) + ext,'wb')
cardfile = open(out_dir + site_label + ext,'wb')
###### header info ######
cardfile.write('$ Data downloaded from http://maps.waterdata.usgs.gov/mapper\n')
cardfile.write('$ Data processed from downloaded text files\n')
cardfile.write('$ Ryan Spies rspies@lynkertech.com\n')
cardfile.write('$ Data Generated: ' + str(dt.datetime.now())[:19] + '\n')
cardfile.write('$ Symbol for missing data = -999\n')
cardfile.write('{:12s} {:4s} {:4s} {:4s} {:2d} {:12s} {:12s}'.format('datacard', 'PTPX', dim,unit,int(step_time),site_label,name.upper()+'USGS'))
cardfile.write('\n')
cardfile.write('{:2d} {:4d} {:2d} {:4d} {:2d} {:8s}'.format(int(min_date.month), int(min_date.year), int(max_date.month),int(max_date.year),1,'F9.2'))
cardfile.write('\n')
###### write formatted data #####
valid_count = 0; miss_count = 0; plot_dict = {}
plot_dict = collections.OrderedDict(plot_dict) # ordered dictionary
while iter_date <= max_date:
if int(iter_date.month) == previous_month:
month_count += 1
else:
month_count = 1
if ext == '.ptp' or ext == '.tpt':
if iter_date in site_data:
valid_count += 1
if ext == '.ptp':
out_data = max(site_data[iter_date])
if ext == '.tpt':
out_data = np.mean(site_data[iter_date])
else:
out_data = -999
miss_count += 1
if out_data != -999 :
plot_dict[iter_date] = float(out_data) # apped data to plot dictionary
if ext == '.tmx' or ext == '.tmn':
if iter_date in site_data_daily and len(site_data_daily[iter_date]) >= 20:
valid_count += 1
if ext == '.tmx':
out_data = np.max(site_data_daily[iter_date])
if int(iter_date.month) in site_tamx_monthly:
site_tamx_monthly[int(iter_date.month)].append(out_data)
else:
site_tamx_monthly[int(iter_date.month)] = [out_data]
if ext == '.tmn':
out_data = np.min(site_data_daily[iter_date])
if int(iter_date.month) in site_tamn_monthly:
site_tamn_monthly[int(iter_date.month)].append(out_data)
else:
site_tamn_monthly[int(iter_date.month)] = [out_data]
else:
out_data = -999
miss_count += 1
cardfile.write('{:12s}{:2d}{:02d}{:4d}{:9.2f}'.format(site_label,int(iter_date.month),int(str(iter_date.year)[-2:]),month_count,float(out_data)))
cardfile.write('\n')
previous_month = int(iter_date.month)
iter_date = iter_date + dt.timedelta(hours=step_time)
cardfile.close()
### save precip data to pandas dataframe, reample, and plot
if ext == '.ptp' and station_plot == 'on':
print 'Creating plot of daily and monthly station data... '
df = pd.DataFrame(plot_dict.items(), columns=['Date_Time', 'ptp'])
resample_df_daily = df.set_index('Date_Time')['ptp'].resample('D', how='sum')# resample to daily
resample_df_monthly = df.set_index('Date_Time')['ptp'].resample('M', how='sum')# resample to monthly
plot_dates_daily = resample_df_daily.index.to_pydatetime(); plot_data_daily = resample_df_daily.values.tolist()
plot_dates_monthly = resample_df_monthly.index.to_pydatetime(); plot_data_monthly = resample_df_monthly.values.tolist()
fig = plt.subplots(figsize=(16,10))
ax1 = plt.subplot(211)
ax1.bar(plot_dates_daily, plot_data_daily, color ='k') # plot data
ax1.set_ylabel('Daily Precip (in)')#; ax1.set_xlabel('Date')
ax1.xaxis.set_major_locator(matplotlib.dates.YearLocator())
plt.xticks(rotation='vertical')
ax1.grid(True)
plt.title('USGS: ' + str(site_num) + ' (' + str(site_label) + ')', fontsize=16)
ax2 = plt.subplot(212)
ax2.bar(plot_dates_monthly, plot_data_monthly, color ='k') # plot data
ax2.set_ylabel('Monthly Precip (in)'); ax2.set_xlabel('Date')
plt.xticks(rotation='vertical')
ax2.xaxis.set_major_locator(matplotlib.dates.YearLocator())
mean_annual_ppt = 'Mean Annual Precip: ' + "%.2f" % (np.nanmean(plot_data_monthly)*12) + ' in'
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax2.text(0.75, 0.95, mean_annual_ppt, fontsize=13, transform=ax2.transAxes,
verticalalignment='top', bbox=props)
#ax.xaxis.set_minor_locator(matplotlib.dates.MonthLocator())
#ax.xaxis.set_minor_formatter(matplotlib.dates.DateFormatter('%m'))
#ax.tick_params(axis='x',labelsize=8, which = 'minor')
ax2.grid(True)
plt.savefig(workingdir + os.sep + 'usgs_' + timestep +os.sep + 'station_data_plots' + os.sep + state + os.sep + site_label)#, bbox_inches='tight')
plt.close()
### write to summary csv files and taplot files ###
if variable == 'ptpx':
summary_file.write(station_summary[name][0]+','+station_summary[name][1]+','+station_summary[name][2]+','+station_summary[name][3]+','+station_summary[name][4]+','+str(miss_count)+','+str(valid_count)+','+str(round((valid_count/year_factor),2))+','+str((float(valid_count)/(miss_count+valid_count))*100)+','+str(min_date.year)[-4:] +','+ str(max_date.year)[-4:] + '\n')
# if variable == 'tmx':
# taplot.write('{:2s} {:20s} {:6.2f} {:6.2f} {:2d} {:4d}'.format('@F',"'"+str(name + ' ASOS')+"'",abs(float(lat)),abs(float(lon)),obs_time,int(site_elev[site_id])))
# taplot.write('\n')
# taplot.write('{:2s} {:3.0f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f}'.format('@G',weighting_factor,np.average(site_tamx_monthly[1]),np.average(site_tamx_monthly[2]),np.average(site_tamx_monthly[3]),np.average(site_tamx_monthly[4]),np.average(site_tamx_monthly[5]),np.average(site_tamx_monthly[6]),
# np.average(site_tamx_monthly[7]),np.average(site_tamx_monthly[8]),np.average(site_tamx_monthly[9]),np.average(site_tamx_monthly[10]),np.average(site_tamx_monthly[11]),np.average(site_tamx_monthly[12])))
# taplot.write('\n')
# summary_tmx.write(str(name)+','+str(site_id)+','+str(lat)+','+str(lon)+','+str(site_elev[site_id])+','+str(miss_count)+','+str(valid_count)+','+str(round((valid_count/year_factor),2))+','+str((float(valid_count)/(miss_count+valid_count))*100)+','+str(min_date.year)[-4:] +','+ str(max_date.year)[-4:] + '\n')
# if variable == 'tmn':
# taplot.write('{:2s} {:3.0f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f} {:4.1f}'.format('@H',weighting_factor,np.average(site_tamn_monthly[1]),np.average(site_tamn_monthly[2]),np.average(site_tamn_monthly[3]),np.average(site_tamn_monthly[4]),np.average(site_tamn_monthly[5]),np.average(site_tamn_monthly[6]),
# np.average(site_tamn_monthly[7]),np.average(site_tamn_monthly[8]),np.average(site_tamn_monthly[9]),np.average(site_tamn_monthly[10]),np.average(site_tamn_monthly[11]),np.average(site_tamn_monthly[12])))
# taplot.write('\n')
# summary_tmn.write(str(name)+','+str(site_id)+','+str(lat)+','+str(lon)+','+str(site_elev[site_id])+','+str(miss_count)+','+str(valid_count)+','+str(round((valid_count/year_factor),2))+','+str((float(valid_count)/(miss_count+valid_count))*100)+','+str(min_date.year)[-4:] +','+ str(max_date.year)[-4:] + '\n')
#
#if taplot != 'na':
# taplot.close()
#if summary_tmx != 'na' and summary_tmn != 'na':
# summary_tmx.close(); summary_tmn.close()
summary_file.close()
print 'Completed!'
|
#!/usr/bin/python
import subprocess
import sys
import time
#import StringIO
import os
import io
from PIL import Image
# Original code written by brainflakes and modified to exit
# image scanning for loop as soon as the sensitivity value is exceeded.
# this can speed taking of larger photo if motion detected early in scan
#
# Motion detection settings:
# need future changes to read values dynamically via command line parameter
# or xml file
# --------------------------
# Threshold:
# How much a pixel has to change by to be marked as "changed").
#
# Sensitivity:
# How many changed pixels before capturing an image) needs to be
# higher if noisy view.
threshold = 18
sensitivity = 180
forceCapture = True
forceCaptureTime = 60 * 60 # Once an hour
# Capture a small test image (for motion detection)
def captureTestImage():
command = "raspistill -w {0} -h {1} -t 1 -e bmp -o /home/pi/viadoxic-spypi/buffer.bmp".format(100, 75)
subprocess.call(command, shell=True)
im = Image.open('/home/pi/viadoxic-spypi/buffer.bmp')
buffer = im.load()
#imageData.close()
return im, buffer
def motion():
print('Motion detected. Starting recording.')
cmd_str = '/home/pi/viadoxic-spypi/dsv60.sh'
proc = subprocess.Popen([cmd_str], shell=True, stdin=None, stdout=None, stderr=None)
sys.exit()
def main():
print('Detecting motion')
os.chdir('/home/pi/viadoxic-spypi/')
# Get first image
image1, buffer1 = captureTestImage()
# Reset last capture time
lastCapture = time.time()
while (True):
# Get comparison image
image2, buffer2 = captureTestImage()
# Count changed pixels
changedPixels = 0
for x in range(0, 100):
# Scan one line of image then check sensitivity for movement
for y in range(0, 75):
# Check green as it's the highest quality channel
pixdiff = abs(buffer1[x, y][1] - buffer2[x, y][1])
if pixdiff > threshold:
changedPixels += 1
# Changed logic - If movement sensitivity exceeded then
# Save image and Exit before full image scan complete
if changedPixels > sensitivity:
lastCapture = time.time()
motion()
break
continue
# Check force capture
if forceCapture:
if time.time() - lastCapture > forceCaptureTime:
changedPixels = sensitivity + 1
# Swap comparison buffers
image1 = image2
buffer1 = buffer2
main()
|
from unittest.case import TestCase
from exercicios_basicos.ex_01_calculadora_soma import efetua_soma
class TestSoma(TestCase):
def test_soma_esta_correta(self):
self.assertEqual(0, efetua_soma())
def test_soma_esta_correta_quando_tem_negativos(self):
self.assertEqual(10, efetua_soma(-1, 1, 10))
|
test_cases = int(input())
case_list = []
for case in range(test_cases):
booking_list = {1:{}}
booking_num = int(input())
for booking in range(booking_num):
start , end , cost = input().split()
start ,end ,cost = int(start) ,int(end),int(cost)
try :
booking_list[start][end] = max(booking_list[start][end],cost)
except :
try:
booking_list[start][end] = cost
except :
booking_list[start] = {end:cost}
booking_list[48] = {49:0}
case_list.append(booking_list)
def exist_in(prev,dic_arcs , index):
try:
dic_arcs[prev][index]
return True
except :
return False
def bigger_next(value,bookings):
for i in range(value+1,max(bookings)+1):
try :
tmp = bookings[i]
return i
except :
pass
return -1
def connexe_graph(bookings):
prev = -1
tmp = dict(bookings)
for index , values in bookings.items() :
for value in values :
try :
tmp[value]
except:
i = bigger_next(value,tmp)
if i!=-1:
tmp[value] = {}
tmp[value][i] = 0
if prev != -1:
if not exist_in(prev,tmp,index) :
tmp[prev][index] = 0
prev = index
return tmp
def find_all_paths(graph, start, path=[],value=0):
global paths
path = path + [start]
value += start[1]
try :
graph[start[0]]
except :
return value
for node in graph[start[0]]:
print(1)
path_node = [node,graph[start[0]][node]]
if path_node not in path:
newpath = find_all_paths(graph,path_node,path,value)
if newpath != None:
paths.append(newpath)
def compress_graph(graph):
root = min(graph)
max_value = 0
finish = False
while True:
finish = False
try :
min_node = min(graph[root])
except :
return max_value
try :
inser_node = graph[min_node]
except :
if len(graph[root]) == 0:
return max_value
max_value = max(max_value,graph[root][min_node])
del graph[root][min_node]
finish = True
if not finish :
for i in inser_node:
try :
graph[root][i] = max(graph[root][i] , graph[root][min_node] + graph[min_node][i])
except :
graph[root][i] = graph[root][min_node] + graph[min_node][i]
max_value = max(max_value,graph[root][min_node])
del graph[min_node]
del graph[root][min_node]
from collections import OrderedDict
for case in case_list :
paths = []
case = OrderedDict(sorted(case.items(), key=lambda t: t[0]))
case = connexe_graph(case)
print(compress_graph(case))
|
from rest_framework import serializers
from .models import ExerciseAndReporting,\
SwimStats,\
BikeStats,\
Steps,\
Sleep,\
Food,\
Alcohol,\
Grades,\
UserQuickLook
class ExerciseAndReportingSerializer(serializers.ModelSerializer):
user_ql= serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = ExerciseAndReporting
fields = ('__all__')
read_only_fields = ('record_date',)
class SwimStatsSerializer(serializers.ModelSerializer):
user_ql= serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = SwimStats
fields = ('__all__')
read_only_fields = ('record_date',)
class BikeStatsSerializer(serializers.ModelSerializer):
user_ql = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = BikeStats
fields = ('__all__')
read_only_fields = ('record_date',)
class StepsSerializer(serializers.ModelSerializer):
user_ql = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = Steps
fields = ('__all__')
read_only_fields = ('record_date',)
class SleepSerializer(serializers.ModelSerializer):
user_ql = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = Sleep
fields = ('__all__')
read_only_fields = ('record_date',)
class FoodSerializer(serializers.ModelSerializer):
user_ql = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = Food
fields = ('__all__')
read_only_fields = ('record_date',)
class AlcoholSerializer(serializers.ModelSerializer):
user_ql = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = Alcohol
fields = ('__all__')
read_only_fields = ('record_date',)
class GradesSerializer(serializers.ModelSerializer):
user_ql = serializers.PrimaryKeyRelatedField(read_only = True)
class Meta:
model = Grades
fields = ('__all__')
read_only_fields = ('record_date',)
class UserQuickLookSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(read_only=True)
grades_ql = GradesSerializer()
exercise_reporting_ql = ExerciseAndReportingSerializer()
swim_stats_ql = SwimStatsSerializer()
bike_stats_ql = BikeStatsSerializer()
steps_ql = StepsSerializer()
sleep_ql = SleepSerializer()
food_ql = FoodSerializer()
alcohol_ql = AlcoholSerializer()
class Meta:
model = UserQuickLook
fields = ('user','created_at','updated_at','grades_ql','exercise_reporting_ql','swim_stats_ql',
'bike_stats_ql','steps_ql','sleep_ql','food_ql','alcohol_ql')
read_only_fields = ('created_at','updated_at',)
def _update_helper(instance, validated_data):
'''
This function will iterate all fields of given instance
and update them with new data (if present) otherwise
with old data
'''
fields = [f.name for f in instance._meta._get_fields()]
for f in fields:
setattr(instance,f,
validated_data.get(f,getattr(instance,f)))
instance.save()
def create(self, validated_data):
user = self.context['request'].user
grades_data = validated_data.pop('grades_ql')
exercise_reporting_data = validated_data.pop('exercise_reporting_ql')
swim_data = validated_data.pop('swim_stats_ql')
bike_data = validated_data.pop('bike_stats_ql')
steps_data = validated_data.pop('steps_ql')
sleep_data = validated_data.pop('sleep_ql')
food_data = validated_data.pop('food_ql')
alcohol_data = validated_data.pop('alcohol_ql')
user_ql = UserQuickLook.objects.create(user=user, **validated_data)
Grades.objects.create(user_ql=user_ql, **grades_data)
ExerciseAndReporting.objects.create(user_ql = user_ql,**exercise_reporting_data)
SwimStats.objects.create(user_ql = user_ql,**swim_data)
BikeStats.objects.create(user_ql = user_ql,**bike_data)
Steps.objects.create(user_ql = user_ql,**steps_data)
Sleep.objects.create(user_ql = user_ql,**sleep_data)
Food.objects.create(user_ql = user_ql,**food_data)
Alcohol.objects.create(user_ql = user_ql,**alcohol_data)
return user_ql
def update(self,instance,validated_data):
grades_data = validated_data.pop('grades_ql')
exercise_reporting_data = validated_data.pop('exercise_and_reporting')
swim_data = validated_data.pop('swim_stats')
bike_data = validated_data.pop('bike_stats')
steps_data = validated_data.pop('steps')
sleep_data = validated_data.pop('sleep')
food_data = validated_data.pop('food')
alcohol_data = validated_data.pop('alcohol')
grades_obj = instance.graded_ql
self._update_helper(grades_obj,grades_data)
exercise_reporting_obj = instance.exercise_reporting_ql
self._update_helper(exercise_reporting_obj, exercise_reporting_data)
swim_obj = instance.swim_stats_ql
self._update_helper(swim_obj,swim_data)
bike_obj = instance.bike_stats_ql
self._update_helper(bike_obj,bike_data)
steps_obj = instance.steps_ql
self._update_helper(steps_obj,steps_data)
sleep_obj = instance.sleep_ql
self._update_helper(sleep_obj,sleep_data)
food_obj = instance.food_ql
self._update_helper(food_obj,food_data)
alcohol_obj = instance.alcohol_ql
self._update_helper(alcohol_obj,alcohol_data)
|
##encoding=utf-8
"""
Import Command
--------------
from archives.htmlparser import htmlparser
"""
from bs4 import BeautifulSoup as BS4
import re
class HTMLParser():
def get_total_number_of_records(self, html):
"""get how many results returns
"""
s = re.findall(r"(?<=>Showing 1-10 of ).{1,10}(?=</span>)", html)[0]
s = s.replace(",", "")
return int(s)
def extract_records(self, html):
"""record extractor from result page
silently handle exception
"""
soup = BS4(html)
for resultsLists in soup.find_all("div", id="resultsLists"):
for resultBox in resultsLists.find_all("div", class_ = "resultBox"):
resultRows = resultBox.find_all("div", class_ = "resultRow")
resultRows.pop()
record = dict()
for resultRow in resultRows:
field = resultRow.find("div", class_ = "field").text
fieldValue = resultRow.find("div", class_ = "fieldValue").text
record[field] = fieldValue
yield record
break # only need the first resultsLists div block
htmlparser = HTMLParser()
if __name__ == "__main__":
import requests
from urlencoder import urlencoder
from metadata import lastname_dict, lastname_reverse_dict
def read(fname):
with open(fname, "rb") as f:
return f.read().decode("utf-8")
def get_test_data():
"""get some test data for test
"""
ses = requests.Session()
ses.post("http://www.archives.com/member/",
data={"__uid":"efdevices@theeagleforce.net","__pwd":"MYpasswd"})
url = urlencoder.url_birth_record("smith", 2000, 10, 1)
with open(r"test_data\birth.html", "wb") as f:
f.write(ses.get(url).content)
url = urlencoder.url_death_record("smith", 2000, 10, 1)
with open(r"test_data\death.html", "wb") as f:
f.write(ses.get(url).content)
url = urlencoder.url_marriage_record("smith", 2000, 10, 1)
with open(r"test_data\marriage.html", "wb") as f:
f.write(ses.get(url).content)
url = urlencoder.url_divorce_record("smith", 2000, 10, 1)
with open(r"test_data\divorce.html", "wb") as f:
f.write(ses.get(url).content)
# get_test_data()
def simulation():
"""
http://www.archives.com/member/Default.aspx?_act=VitalSearchResult&LastName=Smith&DivorceYear=2000&Country=US&State=&Location=US&ShowSummaryLink=1&RecordType=4&activityID=32d47e7f-1b40-44af-b6a1-93501b7c2a59&pagesize=1000&pageNumber=6&pagesizeAP=1000&pageNumberAP=6
http://www.archives.com/member/Default.aspx?_act=VitalSearchResult&LastName=Parra&DivorceYear=2000&Country=US&State=&Location=US&ShowSummaryLink=1&RecordType=4&activityID=32d47e7f-1b40-44af-b6a1-93501b7c2a59&pagesize=1000&pageNumber=2&pagesizeAP=1000&pageNumberAP=2
http://www.archives.com/member/Default.aspx?_act=VitalSearchResult&LastName=Johnson&DivorceYear=2000&Country=US&State=&Location=US&ShowSummaryLink=1&RecordType=4&activityID=32d47e7f-1b40-44af-b6a1-93501b7c2a59&pagesize=1000&pageNumber=5&pagesizeAP=1000&pageNumberAP=5
http://www.archives.com/member/Default.aspx?_act=VitalSearchResult&LastName=Mcdowell&DivorceYear=2000&Country=US&State=&Location=US&ShowSummaryLink=1&RecordType=4&activityID=32d47e7f-1b40-44af-b6a1-93501b7c2a59&pagesize=1000&pageNumber=2&pagesizeAP=1000&pageNumberAP=2
"""
ses = requests.Session()
ses.post("http://www.archives.com/member/",
data={"__uid":"efdevices@theeagleforce.net","__pwd":"MYpasswd"})
url = urlencoder.url_divorce_record("Smith", 2000, 10, 1)
html = ses.get(url).content.decode("utf-8")
print(htmlparser.get_total_number_of_records(html))
simulation()
def test_htmlparser():
# html = read(r"test_data\birth.html")
# html = read(r"test_data\death.html")
# html = read(r"test_data\marriage.html")
# html = read(r"test_data\divorce.html")
print(htmlparser.get_total_number_of_records(html))
for record in htmlparser.extract_records(html):
print(record)
# test_htmlparser()
|
"""Modules used in the README example"""
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LinearRegression
import numpy as np
from flexp import flexp
class LoadData:
"""Load queries and targets from a tsv file"""
provides = ["queries", "targets"]
requires = []
def __init__(self, file_name="example_queries.tsv"):
"""
:param file_name: Path to the dataset to load
:type file_name: str
"""
self.file_name = file_name
def process(self, data):
"""
:param data: Data modified by the module
:type data: dict|object
"""
# Read the file and split the lines into query and target value
with open(self.file_name, 'r') as f:
lines = f.readlines()
lines = [line.strip().rsplit("\t", 1) for line in lines]
queries, targets = zip(*lines)
# Store queries and targets in data
data['queries'] = queries
data['targets'] = [float(t) for t in targets]
def close(self):
pass
class Lowercase:
"""Lowercase queries"""
requires = ["queries"]
provides = ["lowercased"]
def process(self, data):
"""
:param data: Data modified by the module
:type data: dict|object
"""
data["lowercased"] = [q.lower() for q in data["queries"]]
class TfIdf:
"""Compute TF-IDF features for queries"""
requires = ["lowercased"]
provides = ["features"]
def process(self, data):
"""
:param data: Data modified by the module
:type data: dict|object
"""
tfidf = TfidfVectorizer()
data["features"] = tfidf.fit_transform(data["lowercased"])
class TrainTestSplit:
"""Split data to training and test set"""
requires = ["features", "targets"]
provides = ["train", "test"]
def process(self, data):
"""
:param data: Data modified by the module
:type data: dict|object
"""
x_train, x_test, y_train, y_test = train_test_split(
data["features"], data["targets"], random_state=42)
data["train"] = (x_train, y_train)
data["test"] = (x_test, y_test)
class Train:
"""Train a model and save its predictions on the test set"""
requires = ["train", "test"]
def __init__(self):
self.regressor = LinearRegression()
def process(self, data):
"""
:param data: Data modified by the module
:type data: dict|object
"""
self.regressor.fit(data["train"][0], data["train"][1])
data['predictions'] = self.regressor.predict(data['test'][0])
# Store predictions in the experiment folder
with open(flexp.get_file_path("predictions.csv"), "w") as fout:
fout.write("\n".join(str(row) for row in data['predictions']))
def rmse(a, b):
"""Root mean square error"""
return np.sqrt(((a - b) ** 2).mean())
class Eval:
"""Evaluate the model"""
requires = ["predictions"]
def process(self, data):
"""
:param data: Data modified by the module
:type data: dict|object
"""
error = rmse(np.array(data['test'][1]), np.array(data['predictions']))
with open(flexp.get_file_path("results.csv"), "w") as fout:
print("RMSE: {}".format(error), file=fout)
|
import struct
class Packer(object):
def __init__(self, buf):
self._buffer = buf
self._offset = 0
self._typemethods = {'b': self.pack_int8, 'B': self.pack_uint8,
'h': self.pack_int16, 'H': self.pack_uint16,
'i': self.pack_int32, 'I': self.pack_uint32,
'q': self.pack_int64, 'Q': self.pack_uint64,
'f': self.pack_float, 'd': self.pack_double,
's': self.pack_string, 'm': self.pack_message,
}
@property
def offset(self):
return self._offset
def pack_integer(self, fmt, value):
struct.pack_into(fmt, self._buffer, self._offset, value)
self._offset = self._offset + struct.calcsize(fmt)
def pack_int8(self, value):
self.pack_integer('<b', value)
def pack_int16(self, value):
self.pack_integer('<h', value)
def pack_int32(self, value):
self.pack_integer('<l', value)
def pack_int64(self, value):
self.pack_integer('<q', value)
def pack_uint8(self, value):
self.pack_integer('<B', value)
def pack_uint16(self, value):
self.pack_integer('<H', value)
def pack_uint32(self, value):
self.pack_integer('<I', value)
def pack_uint64(self, value):
self.pack_integer('<Q', value)
def pack_float(self, value):
self.pack_string(str(value))
def pack_double(self, value):
self.pack_string(str(value))
def pack_string(self, value):
l = len(value)
self.pack_uint16(l)
struct.pack_into('%ds' % (l,), self._buffer, self._offset,
value.encode('utf-8'))
self._offset = self._offset + l
def pack_binary(self, value):
l = len(value)
self.pack_uint32(l)
struct.pack_into('%ds' % (l,), self._buffer, self._offset,
value.encode('utf-8'))
self._offset = self._offset + l
def pack_list(self, l):
self.pack_uint32(len(l))
for value in l:
self._typemethods[l.typecode](value)
def pack_set(self, s):
self.pack_uint32(len(s))
for value in s:
self._typemethods[s.typecode](value)
def pack_dict(self, d):
self.pack_uint32(len(d))
for k, v in d.items():
self._typemethods[d.key_typecode](k)
self._typemethods[d.value_typecode](v)
def pack_message(self, msg):
msg.pack(self)
|
from molecularfunctionsOOP import particleClass
from molecularPhysicalQuantities import PlotPQs
import numpy as np
from JosPlotPy import AnimatedScatter
import molecularPhysicalQuantities as PQ
#matplotlib.pyplot.close("all") #closing all the figures
#set global constants
Np=108
deltat=.004
mass = 1
dens = .85
temp = .10
amountoftimesteps=1000
inittau=0 #tau is the correlation time
endtau=100
amountoftau=20
particles = particleClass(Np, dens, temp,mass)
plots=PlotPQs(particles,amountoftimesteps,deltat)
plots.PlotThings(particles,deltat)
#PQ.plotcorr(particles,inittau,endtau,amountoftau,amountoftimesteps,deltat)
Animation=AnimatedScatter(particles,deltat)
Animation.show()
|
from bs4 import BeautifulSoup
import requests
import os
import spotipy
import spotipy.util as util
import datetime
'''Scrapes hotnewhiphop top 100 songs then creates and returns list of songs by artists I enjoy
parameters:
fav_artists - artists I want songs from '''
def getTopSongs(fav_artists):
artists = []
song_titles = []
songs = []
url = 'https://www.hotnewhiphop.com/top100/'
source = requests.get(url).text
soup = BeautifulSoup(source, 'html.parser')
for song in soup.find_all('div', class_='chartItem-body-artist'):
song_title = song.a.text.strip()
for artist in song.find('strong', class_='chartItem-artist-artistName'):
if artist in fav_artists:
songs.append([song_title, artist])
return songs
'''Normalizes song name. Removes producer name, and other aspects that will affect the song being found on Spotify
parameters:
songs - list of songs to normalize'''
def normalizeSongs(songs):
for song in songs:
song_title = song[0]
if '(' in song_title:
index = song_title.find('(')
song_title = song_title[0: index-1]
song[0] = song_title
print("Songs that I want from HotNewHipHop's top 100: ")
print('--------------------')
for song in songs:
print('{} - {}'.format(song[0], song[1]))
print('--------------------\n')
return songs
'''Gathers and returns access token to my personal Spotify account
parameters:
user - username of account'''
def getToken(user):
desired_scope = 'playlist-modify-private, playlist-read-private'
id = os.environ.get('SPOT_CLIENT')
secret = os.environ.get('SPOT_SECRET')
uri = 'http://google.com/'
access_token = util.prompt_for_user_token(username=user, scope=desired_scope, client_id=id, client_secret=secret,
redirect_uri=uri)
if access_token:
return access_token
else:
print('ERROR obtaining token.')
return
'''Decides playlist name to add songs to based on month and year. Returns string in year-month format (ex: 18Nov)'''
def determinePlaylist():
date = datetime.datetime.now()
month = date.strftime('%b')
year = date.strftime('%y')
playlist_name = year + month
return playlist_name
'''Searches to see if playlist exists in my Spotify library. Returns True if it does and False otherwise
parameters:
sp - spotify session
playlist_name = playlist that is being searched for'''
def playlistExists(sp, playlist_name):
my_playlists = sp.current_user_playlists()
for playlist in my_playlists['items']:
if playlist_name == playlist['name']:
return True
return False
'''Creates Spotify playlist for current month
parameters:
sp - spotify session
user = username of account
playlist_name - name of playlist to be created'''
def createPlaylist(sp, user, playlist_name):
sp.user_playlist_create(user, playlist_name, public= False)
print('New playlist, {}, created'.format(playlist_name))
return
'''Obtains and returns desired playlist's id.
parameters:
sp - spotify session
user - username of account
playlist_name - name of playlist to get id for'''
def getPlaylistID(sp, user, playlist_name):
playlists = sp.user_playlists(user)
for playlist in playlists['items']:
if playlist['name'] == playlist_name:
id = playlist['id']
return id
'''Searches Spotify for song. If the song is found, the function returns the song id. If the song is not found it returns False
parameters:
sp - spotify session
song - song list containing name and artist. Ex: ["Gooey", "Glass Animals"]'''
def spotifySearch(sp, song):
tracks = []
title, wanted_artist = song[0], song[1]
query = '{} - {}'.format(title, wanted_artist)
search_query = sp.search(query, type='track')
for result in search_query['tracks']['items']:
tracks.append(result['external_urls'])
if not tracks:
print('{} - {}'.format(song[0], song[1]))
return
track = tracks[0]
wanted_track = track['spotify']
link, song_id = wanted_track.split('https://open.spotify.com/track/')
return song_id
'''Gathers and returns list of ids in master_ids
parameters:
path - path of file being read'''
def readFile(path):
contents = ''
with open(path) as file:
ids = file.read()
file.close()
contents = ids.split('\n')
return contents
'''Writes ids to master_ids file
parameters:
path - path of file to write to
id - song id to add'''
def writeToFile(path, id):
with open(path, 'a+') as file:
file.write(id)
file.write('\n')
return
'''Adds song to the corresponding month's playlist. Returns nothing
parameters:
sp - spotify session
user - username of account
song - list of song id and name
playlist - monthly playlist name that song will be added to'''
def addSong(sp, user, song, playListID):
track_uri = [song[0]]
song_added = song[1]
print(f'{song_added[0]} - {song_added[1]}')
sp.user_playlist_add_tracks(user, playListID, track_uri)
return
#main script
def main():
desired_artists = ['Drake', 'Nav', 'Machine Gun Kelly', 'A$AP Rocky', 'NF', 'Post Malone', 'Chance The Rapper', 'J. Cole', 'Juice WRLD',
'Kanye West', 'Kid Cudi', 'Kendrick Lamar', 'Lil Uzi Vert', 'Russ', 'B.o.B', 'Lil Dicky', 'Chris Webby', 'Eminem',
'Travis Scott', 'Flatbush Zombies', 'Logic', 'Trippie Redd', 'Vic Mensa', 'Young Thug', 'Mac Miller', 'Khalid', 'Tyler, The Creator',
'Wiz Khalifa']
#data pertaining to me personally
spotify_username = 'ccmatt19'
file_path = '/Users/mattcole/Desktop/Spotify_Playlist_Creator/master_ids.txt'
desired_songs = getTopSongs(desired_artists)
songs = normalizeSongs(desired_songs)
song_ids = []
missing_ids = []
token = getToken(spotify_username)
session = spotipy.Spotify(auth=token)
desired_playlist = determinePlaylist()
if not playlistExists(session, desired_playlist):
createPlaylist(session, spotify_username, desired_playlist)
print()
playlistID = getPlaylistID(session, spotify_username, desired_playlist)
print('Songs not found on Spotify: ')
print('---------------------------')
for song in desired_songs:
song_id = spotifySearch(session, song)
if song_id:
song_ids.append([song_id, song])
print('---------------------------\n')
master_file_contents = readFile(file_path)
print('Songs added: ')
for song in song_ids:
if song[0] not in master_file_contents:
addSong(session, spotify_username, song, playlistID)
writeToFile(file_path, song[0])
print('\nPROGRAM COMPLETE! ')
if __name__ == '__main__':
main()
|
#! /usr/bin/env python
"""
dumps crops from a databag to disk.dumps
USING:
Author: Martin Humphreys
"""
import cv2
from argparse import ArgumentParser
from math import floor, ceil
import os
import uuid
from DataBag import DataBag
from Query import Query
import numpy as np
def build_parser():
parser = ArgumentParser()
parser.add_argument('bag', help='The databag file with stored detection or tracking results')
parser.add_argument('output_dir', help='Place to dump crops to')
parser.add_argument("-b", "--background", help="Normalize with given background")
parser.add_argument('-v', "--verbose", help='print verbose statements while executing', action = 'store_true')
return parser
def main(opts):
if not os.path.isfile(opts.bag):
parser.error("DataBag file %s does not exist." % opts.bag)
if not os.path.exists(opts.output_dir):
os.makedirs(opts.output_dir)
bag = DataBag.fromArg(opts.bag)
query = Query(bag)
for f in query.frame_list():
if opts.verbose:
print "Extracting crops from frame", f.frame
for p in query.particles_in_frame(f.frame):
if f.frame % 10 == 5: # only one particle crop per burst please.
crop = bag.getCrop(p.frame, p.id)[0]
path = os.path.join(opts.output_dir, str(p.category))
file = "{}.png".format(uuid.uuid4())
if not os.path.exists(path):
os.makedirs(path)
cv2.imwrite(os.path.join(path, file), crop)
if __name__ == '__main__':
main(build_parser().parse_args())
|
import cv2 as cv
import numpy as np
def detect(image):
classifier = cv.CascadeClassifier()
classifier.load(cv.samples.findFile('./data/haarcascade_frontalface.xml'))
faces = classifier.detectMultiScale(image)
heights = [face[-1] for face in faces]
f = [0,0,0,0]
if(heights):
m = max(heights)
f = 0
for i in range(len(faces)):
if(faces[i][-1] == m):
f = faces[i]
if(f[2]==0 and f[3]==0):
return -1,image
else:
return 1,image[f[1]:f[1]+f[3],f[0]:f[0]+f[2]]
if __name__ =='__main__':
cap = cv.VideoCapture(0)
while True:
ret, frame = cap.read()
print(detect(frame))
|
def recurPower(base, exp):
if exp == 1:
return base
elif exp > 0 and exp % 2 == 0:
return recurPower(base ** 2, exp / 2)
elif exp > 0 and exp % 2 != 0:
return recurPower(base, exp - 1) * base
print recurPower(2, 3)
|
import logging
import sys
import warnings
import os
import six
from tqdm import tqdm
log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def loglevel_from_string(level):
"""
>>> _loglevel_from_string('debug')
10
>>> _loglevel_from_string(logging.INFO)
20
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
assert isinstance(level, six.integer_types)
return level
def get_loglevel_from_env(default_level):
"""
>>> os.environ['FLEXP_LOGLEVEL'] = 'info'
>>> get_loglevel_from_env(logging.DEBUG)
20
>>> del os.environ['FLEXP_LOGLEVEL']
>>> get_loglevel_from_env(logging.DEBUG)
10
"""
flexp_loglevel = os.environ.get('FLEXP_LOGLEVEL')
if flexp_loglevel is not None:
loglevel = flexp_loglevel
else:
loglevel = default_level
return loglevel_from_string(loglevel)
class TqdmLoggingHandler(logging.Handler):
"""
credit: https://stackoverflow.com/questions/38543506/change-logging-print-function-to-tqdm-write-so-logging-doesnt-interfere-wit
"""
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg, file=sys.stderr)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def _setup_logging(level=logging.DEBUG, filename='log.txt', disable_stderr=False):
_close_file_handlers()
level = loglevel_from_string(level)
root_logger = logging.getLogger()
root_logger.setLevel(level)
if filename is not None:
file_handler = logging.FileHandler(filename)
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
if not disable_stderr:
tqdm_handler = TqdmLoggingHandler(level)
tqdm_handler.setFormatter(log_formatter)
root_logger.addHandler(tqdm_handler)
warnings.simplefilter("once")
def _close_file_handlers():
root_logger = logging.getLogger()
for file_handler in root_logger.handlers:
file_handler.close()
root_logger.handlers = []
|
from app import app
from flask import render_template, request, redirect, jsonify, make_response
from datetime import datetime
import os
from werkzeug.utils import secure_filename
from flask import send_file, send_from_directory, safe_join, abort, session, url_for
from flask import flash
@app.template_filter("clean_date")
def clean_date(dt):
return dt.strftime("%d %b %Y %H:%M:%S %A")
@app.route("/")
def index():
# print(app.config)
return render_template("public/index.html")
@app.route("/about")
def about():
return """
<h1 style='color: red;'>I'm a red H1 heading!</h1>
<p>This is a lovely little paragraph</p>
<code>Flask is <em>awesome</em></code>
"""
@app.route("/sign_up", methods=["GET", "POST"])
def sign_up():
if request.method == "POST":
req = request.form
print (req)
missing = list()
for k, v in req.items():
if v =="":
missing.append(k)
if missing:
feedback = f"Missing fields for {', '.join(missing)}"
return render_template("public/sign_up.html", feedback=feedback)
return redirect (request.url)
return render_template("public/sign_up.html")
@app.route("/jinja")
def jinja():
# Strings
my_name = "Djonny"
# Integers
my_age = 45
# Lists
langs = ["Python", "JavaScript", "Bash", "Ruby", "C", "Rust"]
# Dictionaries
friends = {
"Tony": 43,
"Cody": 28,
"Amy": 26,
"Clarissa": 23,
"Wendell": 39
}
# Tuples
colors = ("Red", "Blue")
# Booleans
cool = True
# Classes
class GitRemote:
def __init__(self, name, description, domain):
self.name = name
self.description = description
self.domain = domain
def pull(self):
return f'Pullen repo {self.name}'
def clone(self):
return f"Cloning into {self.domain}"
def fff(self):
return self.description
my_remote = GitRemote(
name="Learning Flask",
description="Learn the Flask web framework for Python",
domain="https://github.com/Julian-Nash/learning-flask.git"
)
# Functions
def repeat(x, qty=1):
return x * qty
date = datetime.utcnow()
my_html = "<h1>This is some HTML</h1>"
suspicious = "<script>alert('NEVER TRUST USER INPUT!')</script>"
return render_template(
"public/jinja.html", my_name=my_name, my_age=my_age, langs=langs,
friends=friends, colors=colors, cool=cool, GitRemote=GitRemote,
my_remote=my_remote, repeat=repeat, date=date, my_html=my_html,
suspicious=suspicious
)
users = {
"mitsuhiko": {
"name": "Armin Ronacher",
"bio": "Creatof of the Flask framework",
"twitter_handle": "@mitsuhiko"
},
"gvanrossum": {
"name": "Guido Van Rossum",
"bio": "Creator of the Python programming language",
"twitter_handle": "@gvanrossum"
},
"elonmusk": {
"name": "Elon Musk",
"bio": "technology entrepreneur, investor, and engineer",
"twitter_handle": "@elonmusk"
}
}
@app.route("/profile/<username>")
def profile(username):
user = None
if username in users:
user = users[username]
return render_template("public/profile.html", username=username, user=user)
@app.route("/multiple/<foo>/<bar>/<baz>")
def multiple(foo, bar, baz):
print(f"foo is {foo}")
print(f"bar is {bar}")
print(f"baz is {baz}")
return f"foo is {foo}, bar is {bar}, baz is {baz}"
'''
@app.route("/json", method=["POST"])
def json_example():
# Validate the request body contains JSON
if request.get_json:
# Parse the JSON into a Python dictionary
req = request.get_json()
# Print the dictionary
print(req)
# Return a string along with an HTTP status code
return "JSON received!", 200
else:
# The request body wasn't JSON so return a 400 HTTP status code
return "Request was not JSON", 400 return 'Thanks!', 200
'''
@app.route("/guestbook")
def guestbook():
return render_template("public/guestbook.html")
@app.route("/guestbook/create-entry", methods=["POST"])
def create_entry():
req = request.get_json()
print(req)
# res = make_response(jsonify({"message": "OK"}), 200)
res = make_response(jsonify(req), 200)
return res
@app.route("/query")
def query():
if request.args:
# We have our query string nicely serialized as a Python dictionary
args = request.args
# We'll create a string to display the parameters & values
serialized = ", ".join(f"{k}: {v}" for k, v in request.args.items())
# Display the query string to the client in a different format
return f"(Query) {serialized}", 200
else:
return "No query string received", 200
'''
@app.route("/query")
def query():
args = request.args
print(args)
for k, v in args.items():
print(f"{k}: {v}")
return "No query string received", 200
'''
#app.config["IMAGE_UPLOADS"] = "/home/tech-3/Рабочий стол/test/app/static/img/uploads"
app.config["IMAGE_UPLOADS"] = "/home/lem/PROJECTS/test/app/static/img/uploads"
app.config["ALLOWED_IMAGE_EXTENSIONS"] = ["JPEG", "JPG", "PNG", "GIF"]
app.config["MAX_IMAGE_FILESIZE"] = 4 * 1024 * 1024
def allowed_image(filename):
if not "." in filename:
return False
ext = filename.rsplit(".", 1)[1]
if ext.upper() in app.config["ALLOWED_IMAGE_EXTENSIONS"]:
return True
else:
return False
def allowed_image_filesize(filesize):
if int(filesize) <= app.config["MAX_IMAGE_FILESIZE"]:
return True
else:
return False
@app.route("/upload-image", methods=["GET", "POST"])
def upload_image():
if request.method == "POST":
if request.files:
if "filesize" in request.cookies:
if not allowed_image_filesize(request.cookies["filesize"]):
print("Filesize exceeded maximum limit")
return redirect(request.url)
image = request.files["image"]
if image.filename == "":
print("No filename")
return redirect(request.url)
if allowed_image(image.filename):
filename = secure_filename(image.filename)
image.save(os.path.join(app.config["IMAGE_UPLOADS"], filename))
print("Image saved")
return redirect(request.url)
else:
print("That file extension is not allowed")
return redirect(request.url)
return render_template("public/upload_image.html")
#Learning Flask Ep. 14
# The absolute path of the directory containing images for users to download
app.config["CLIENT_IMAGES"] = "/home/lem/PROJECTS/test/app/static/client/img"
# The absolute path of the directory containing CSV files for users to download
app.config["CLIENT_CSV"] = "/home/lem/PROJECTS/test/app/static/client/csv"
# The absolute path of the directory containing PDF files for users to download
app.config["CLIENT_PDF"] = "/home/lem/PROJECTS/test/app/static/client/pdf"
@app.route("/get-image/<image_name>")
def get_image(image_name):
try:
return send_from_directory(app.config["CLIENT_IMAGES"], filename=image_name, as_attachment=True)
except FileNotFoundError:
abort(404)
#============================================================
#Flask cookies | Learning Flask Ep. 15
@app.route("/cookies")
def coolies():
resp = make_response("Cookies")
cookies = request.cookies
print(request.cookies)
resp.set_cookie("flavor", "chocolate_chip")
resp.set_cookie("sex", value="money", max_age=10, path=request.path)
return resp
#============================================================
#The Flask session object | Learning Flask Ep. 16
app.config["SECRET_KEY"] = "OB3Ux3QBsUxCdK0ROCQd_w"
nusers = {
"julian": {
"username": "julian",
"email": "julian@gmail.com",
"password": "example",
"bio": "Some guy from the internet"
},
"clarissa": {
"username": "clarissa",
"email": "clarissa@icloud.com",
"password": "sweetpotato22",
"bio": "Sweet potato is life"
}
}
@app.route("/sign_in", methods=['POST', 'GET'])
def sign_in():
if request.method == 'POST':
req = request.form
print (req)
username = req.get('username')
password = req.get('password')
if not username in nusers:
print ("Username not found")
return redirect(request.url)
else:
user = nusers[username]
if not password == user['password']:
print ('Incorrect password')
return redirect(request.url)
else:
session['USERNAME'] = user['username']
session['PASSWORD'] = user['password']
print('Session username set')
print(session)
# return redirect(request.url)
return redirect(url_for("user_profile"))
return render_template('public/sign_in.html')
@app.route("/user_profile")
def user_profile():
if not session.get("USERNAME") is None:
#
username = session.get("USERNAME")
user = nusers[username]
return render_template("public/user_profile.html", user=user)
else:
print("No username found is session")
return redirect(url_for("sign_in"))
# return render_template("public/user_profile.html", user=user)
@app.route("/sign_out")
def sign_out():
session.pop("USERNAME", None)
return redirect(url_for("sign_in"))
# return '<h2>end</h2>'
#============================================================================
#Flask message flashing | Learning Flask Ep. 17
#============================================================================
@app.route("/signup", methods=['GET', 'POST'])
def signup():
if request.method == 'POST':
req=request.form
username = req.get('username')
email = req.get('email')
password = req.get('password')
if not len(password) >= 10:
flash ('Пароль должен содержать не менее 10 символов', 'warning')
print('Пароль должен содержать не менее 10 символов')
return redirect(request.url)
flash ('Аккаунт создан', 'success')
print ('Аккаунт создан')
return redirect(request.url)
return render_template('public/signup.html')
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import glob
import shutil
import matplotlib.pyplot as plt
# %matplotlib inline
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
"""## Downloading the dataset"""
_URL = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
zip_file = tf.keras.utils.get_file(origin=_URL,
fname="flower_photos.tgz",
extract=True)
base_dir = os.path.join(os.path.dirname(zip_file), 'flower_photos')
"""## Exploring our dataset"""
classes = ['roses', 'daisy', 'dandelion', 'sunflowers', 'tulips']
for cl in classes:
img_path = os.path.join(base_dir, cl)
images = glob.glob(img_path + '/*.jpg')
print("{}: {} Images".format(cl, len(images)))
num_train = int(round(len(images)*0.8))
train, val = images[:num_train], images[num_train:]
for t in train:
if not os.path.exists(os.path.join(base_dir, 'train', cl)):
os.makedirs(os.path.join(base_dir, 'train', cl))
shutil.move(t, os.path.join(base_dir, 'train', cl))
for v in val:
if not os.path.exists(os.path.join(base_dir, 'val', cl)):
os.makedirs(os.path.join(base_dir, 'val', cl))
shutil.move(v, os.path.join(base_dir, 'val', cl))
round(len(images)*0.8)
train_dir = os.path.join(base_dir, 'train')
val_dir = os.path.join(base_dir, 'val')
"""# Data Augmentation
Overfitting generally occurs when we have small number of training examples. One way to fix this problem is to augment our dataset so that it has sufficient number of training examples. Data augmentation takes the approach of generating more training data from existing training samples, by augmenting the samples via a number of random transformations that yield believable-looking images. The goal is that at training time, your model will never see the exact same picture twice. This helps expose the model to more aspects of the data and generalize better.
"""
batch_size = 200
IMG_SHAPE = 256
image_gen_train = ImageDataGenerator(
rescale=1./255,
rotation_range=45,
width_shift_range=.25,
height_shift_range=.25,
horizontal_flip=True,
vertical_flip=True,
zoom_range=0.3,
)
train_data_gen = image_gen_train.flow_from_directory(
batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE,IMG_SHAPE),
class_mode='sparse'
)
"""Let's preview the changes that we made in a random picture from our dataset."""
# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.
def plotImages(images_arr):
fig, axes = plt.subplots(1, 10, figsize=(50,50))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
plt.tight_layout()
plt.show()
augmented_images = [train_data_gen[0][0][0] for i in range(10)]
plotImages(augmented_images)
"""Now let's create our validation data set. Note that in the validation data set we will not use data augmentantion as we want to test the accuracy of our model in real data."""
image_gen_val = ImageDataGenerator(rescale=1./255)
val_data_gen = image_gen_val.flow_from_directory(batch_size=batch_size,
directory=val_dir,
target_size=(IMG_SHAPE, IMG_SHAPE),
class_mode='sparse')
"""## Setting up the network's architecture"""
model = Sequential()
model.add(Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_SHAPE,IMG_SHAPE, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, 3, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, 3, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(5, activation='softmax'))
"""And compiling our model."""
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
"""## Training the CNN.
We will randomly choose to train our model in 80 epochs. This will give us a clear view in order to know if we need more epochs or less depedning on the loss function that we will compare after the completion of training of our model.
"""
epochs = 80
history = model.fit_generator(
train_data_gen,
steps_per_epoch=int(np.ceil(train_data_gen.n / float(batch_size))),
epochs=epochs,
validation_data=val_data_gen,
validation_steps=int(np.ceil(val_data_gen.n / float(batch_size))),
verbose=1
)
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
|
"""
MAP@K word level and character level are explained in detail in this paper:
dpUGC: Learn Differentially Private Representationfor User Generated Contents
Xuan-Son Vu, Son N. Tran, Lili Jiang
In: Proceedings of the 20th International Conference on Computational Linguistics and
Intelligent Text Processing, April, 2019, (to appear)
Please cite the above paper if you use codes in this file.
"""
def apk(actual, predicted, k=10):
"""
Computes the average precision at k.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
actual : list
A list of elements that are to be predicted (order doesn't matter)
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if len(predicted) > k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
if not actual:
return 0.0
return score / min(len(actual), k)
def mapk(actual, predicted, k=10, word_level=True):
"""
Computes the mean average precision at k.
This function computes the mean average prescision at k between two lists
of lists of items.
Parameters
----------
actual : list
A list of lists of elements that are to be predicted
(order doesn't matter in the lists)
predicted : list
A list of lists of predicted elements
(order matters in the lists)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average precision at k over the input lists
"""
# print("Sending arr = ", arr)
if word_level:
return calc_map(actual, predicted, topK=k)
else:
# arr = [apk(a, p, k) for a, p in zip(actual, predicted)]
# return np.mean(arr)
return calc_map_character_level(actual, predicted, topK=k)
def calc_map(actual, predicted, topK=10):
"""
:param actual:
:param predicted:
:param topK:
:return:
"""
# print("Input: actual %s, predicted %s"%(actual, predicted))
if len(predicted) > topK:
predicted = predicted[:topK]
idx = 1
hit = 0
map_arr = []
for answer in predicted:
if answer in actual[:topK]:
hit += 1
val = (hit * 1.0) / (idx * 1.0)
# print("hit = %s, idx = %s"%(hit, idx))
map_arr.append(val)
# print("hit: %s, map_arr = %s"%(answer, map_arr))
idx += 1
# print("map_arr = %s done", map_arr)
if len(map_arr) > 0:
return np.mean(map_arr)
else:
return 0.0
def calc_map_character_level(actual, predicted, topK=10):
"""
:param actual:
:param predicted:
:param topK:
:return:
"""
# print("Input: actual %s, predicted %s" % (actual, predicted))
if len(predicted) > topK:
predicted = predicted[:topK]
if len(actual) > topK:
actual = actual[:topK]
rank = 1
hit = 0
actual_seq = ''.join([word for word in actual])
predicted_seq = ''.join([word for word in predicted])
map_arr = []
for char in predicted_seq:
if char in actual_seq[:rank]:
hit += 1
val = (hit * 1.0) / (rank * 1.0)
# print("hit = %s, idx = %s" % (hit, rank))
map_arr.append(val)
# print("hit: %s, map_arr = %s" % (char, map_arr))
rank += 1
# print("map_arr = %s done", map_arr)
return np.mean(map_arr)
import unittest
import numpy as np
def test_apk(self):
self.assertAlmostEqual(apk(range(1, 6), [6, 4, 7, 1, 2], 2), 0.25)
self.assertAlmostEqual(apk(range(1, 6), [1, 1, 1, 1, 1], 5), 0.2)
predicted = range(1, 21)
predicted.extend(range(200, 600))
self.assertAlmostEqual(apk(range(1, 100), predicted, 20), 1.0)
def test_mapk(self):
self.assertAlmostEqual(mapk([range(1, 5)], [range(1, 5)], 3), 1.0)
self.assertAlmostEqual(mapk([[1, 3, 4], [1, 2, 4], [1, 3]],
[range(1, 6), range(1, 6), range(1, 6)], 3), 0.685185185185185)
self.assertAlmostEqual(mapk([range(1, 6), range(1, 6)],
[[6, 4, 7, 1, 2], [1, 1, 1, 1, 1]], 5), 0.26)
self.assertAlmostEqual(mapk([[1, 3], [1, 2, 3], [1, 2, 3]],
[range(1, 6), [1, 1, 1], [1, 2, 1]], 3), 11.0 / 18)
if __name__ == '__main__':
a1 = ["1", '2', '3', '4']
b1 = ['1', '5', '2', '8']
print(mapk(a1, b1, 4))
a1 = ["15"]
b1 = ["1", "2", "3", "4", "5","6","7","8","9","10"]
print("MapK:", mapk(a1, b1, 4))
# unittest.main()
|
class Visit:
# ENROLLHD visit
#def __init__(self, seq, age, days, vtype, tfc, motor, function, cognitive, pbas):
# self.seq = seq # visit sequence nr
# self.age = age # Age at time of visit
# self.days = days # Days since Baseline visit
# self.vtype = vtype # Visit type
# self.motor = motor # UHDRS Motor form
# self.tfc = tfc # UHDRS TFC form
# self.function = function # UHDRS Function form
# self.cognitive = cognitive # UHDRS Cognitive form
# self.pbas = pbas # PBA-s form
#def __init__(self, seq, age, visit, hdcat, motscore, miscore, tfcscore, fascore, fiscore, sdmt1, sdmt2, sit1, sit2,
# sit3, depscore, irascore, psyscore, aptscore, exfscore):
#
# self.seq = seq
# self.age = age
# self.visit = visit
# self.hdcat = hdcat
# self.motscore = motscore
# self.miscore = miscore
# self.tfcscore = tfcscore
# self.fascore = fascore
# self.fiscore = fiscore
# self.sdmt1 = sdmt1
# self.sdmt2 = sdmt2
# self.sit1 = sit1
# self.sit2 = sit2
# self.sit3 = sit3
# self.depscore = depscore
# self.irascore = irascore
# self.psyscore = psyscore
# self.aptscore = aptscore
# self.exfscore = exfscore
def __init__(self):
self.seq = -1
self.age = -1
self.visit = -1
self.hdcat = -1
self.motscore = -1
self.miscore = -1
self.tfcscore = -1
self.fascore = -1
self.fiscore = -1
self.sdmt1 = -1
self.sdmt2 = -1
self.sit1 = -1
self.sit2 = -1
self.sit3 = -1
self.depscore = -1
self.irascore = -1
self.psyscore = -1
self.aptscore = -1
self.exfscore = -1
class Motor:
# UHDRS Motor Diagnostic Confidence (Motor)
def __init__(self,motscore, miscore, ocularh, ocularv, sacinith, sacinitv, sacvelh, sacvelv, dysarth, tongue, fingtapr, fingtapl, prosupr, prosupl,
luria, rigarmr, rigarml, brady, dysttrnk, dystrue, dystlue, dystrle, dystlle, chorface, chorbol, chortrnk, chorrue, chorlue, chorrle,
chorlle, gait, tandem, retropls, diagconf):
# General Scores
self.motscore = motscore # UHDRS motor score
self.miscore = miscore # UHDRS motor score (incomplete)
# Group Ocular Pursuit
self.ocularh = ocularh # Horizontal
self.ocularv = ocularv # Vertical
# Group Saccade initiation
self.sacinith = sacinith # Horizontal
self.sacinitv = sacinitv # Vertical
# Group Saccade velocity
self.sacvelh = sacvelh # Horizontal
self.sacvelv = sacvelv # Vertical
self.dysarth = dysarth # Dysarthria
self.tongue = tongue # Tongue protrusion
# Group Finger taps
self.fingtapr = fingtapr # Right
self.fingtapl = fingtapl # Left
# Group Pronate supinate‐hand
self.prosupr = prosupr # Right
self.prosupl = prosupl # Left
self.luria = luria # Luria
# Group Rigidity‐arms
self.rigarmr = rigarmr # Right
self.rigarml = rigarml # Left
self.brady = brady # Bradykinesia-Body
# Group Maximal dystonia
self.dysttrnk = dysttrnk # Trunk
self.dystrue = dystrue # RUE -
self.dystlue = dystlue # LUE
self.dystrle = dystrle # RLE
self.dystlle = dystlle # LLE
# Group Maximal chorea
self.chorface = chorface # Face
self.chorbol = chorbol # BOL
self.chortrnk = chortrnk # Trunk
self.chorrue = chorrue # RUE
self.chorlue = chorlue # LUE
self.chorrle = chorrle # RLE
self.chorlle = chorlle # LLE
self.gait = gait # Gait
self.tandem = tandem # Tandem walking
self.retropls = retropls # Retropulsion pull test
# Diagnostic Confidence
self.diagconf = diagconf
class TFC:
# UHDRS Total Functional Capacity (TFC)
def __init__(self, tfcscore, occupatn, finances, chores, adl, carelvl):
self.tfcsore = tfcscore # Functional Score
self.occupatn = occupatn # Occupation
self.finances = finances # Finances
self.chores = chores # Domestic chores
self.adl = adl # ADL
self.carelvl = carelvl # Care level
class Function:
# UHDRS Functional Assessment Independence Scale (Function)
def __init__(self, fascore, fiscore, emplusl, emplany, volunt, fafinan, grocery, cash, supchild, drive, housewrk,
laundry, prepmeal, telephon, ownmeds, feedself, dress, bathe, pubtrans, walknbr, walkfall, walkhelp,
comb, trnchair, bed, toilet, carehome, indepscl):
self.fascore = fascore
self.fiscore = fiscore
self.emplusl = emplusl
self.emplany = emplany
self.volunt = volunt
self.fafinan = fafinan
self.grocery = grocery
self.cash = cash
self.supchild = supchild
self.drive = drive
self.housewrk = housewrk
self.laundry = laundry
self.prepmeal = prepmeal
self.telephon = telephon
self.ownmeds = ownmeds
self.feedself = feedself
self.dress = dress
self.bathe = bathe
self.pubtrans = pubtrans
self.walknbr = walknbr
self.walkfall = walkfall
self.walkhelp = walkhelp
self.comb = comb
self.trnchair = trnchair
self.bed = bed
self.toilet = toilet
self.carehome = carehome
self.indepscl = indepscl
class Cognitive:
# Cognitive Assessments (Cognitive)
def __init__(self, gen1, gen2, gen3, gen4, gen5, gen6):
# Section Specifics
self.gen1 = gen1
self.gen2 = gen2
self.gen3 = gen3
self.gen4 = gen4
self.gen5 = gen5
self.gen6 = gen6
|
num1=int(input("Enter a number"))
num2=int(input("Enter the divisor"))
def is_div(num1,num2):
if num1%num2 ==0:
return True
return False
print(is_div(num1,num2))
|
#************************************ (C) COPYRIGHT 2019 ANO ***********************************#
import sensor, image, time, math, struct
import json
from pyb import LED,Timer
from struct import pack, unpack
import Message,LineFollowing,DotFollowing,ColorRecognition,QRcode,Photography
#初始化镜头
sensor.reset()
sensor.set_pixformat(sensor.RGB565)#设置相机模块的像素模式
sensor.set_framesize(sensor.QQVGA)#设置相机分辨率160*120
sensor.skip_frames(time=3000)#时钟
sensor.set_auto_whitebal(False)#若想追踪颜色则关闭白平衡
clock = time.clock()#初始化时钟
#主循环
while(True):
clock.tick()#时钟初始化
#接收串口数据
Message.UartReadBuffer()
if Message.Ctr.WorkMode==1:#点检测
DotFollowing.DotCheck()
elif (Message.Ctr.WorkMode==2):#线检测
LineFollowing.LineCheck()
elif Message.Ctr.WorkMode==3:#颜色识别
ColorRecognition.ColorRecognition()
elif Message.Ctr.WorkMode==4:#二维码识别
QRcode.ScanQRcode()
elif Message.Ctr.WorkMode==5:#拍照
Photography.Photography('IMG.jpg',10)
Message.Ctr.WorkMode = LastWorkMode
LastWorkMode = Message.Ctr.WorkMode
#用户数据发送
#Message.UartSendData(Message.UserDataPack(127,127,32767,32767,65536,65536,65536,65536,65536,65536))
#计算程序运行频率
if Message.Ctr.IsDebug == 1:
fps=int(clock.fps())
Message.Ctr.T_ms = (int)(1000/fps)
print('fps',fps,'T_ms',Message.Ctr.T_ms)
#************************************ (C) COPYRIGHT 2019 ANO ***********************************#
|
from itertools import combinations
input = list(map(int, open('data/09.txt').read().split('\n')))
for i, num in enumerate(input):
if i >= 25 and num not in list(map(lambda x: x[0]+x[1], list(combinations(input[i-25:i], 2)))):
print(num)
|
song="JINGLE Bells jingle Bells Jingle All The Way"
song.upper()
song_words=song.split()
count=0
print(song_words)
for word in song_words:
if(word.startswith("jingle")):
count=count+1
print(count)
name = "pavan"
|
#Iterative Approach
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if not head:
return
current = head
nxt = head.next
prev = None
while nxt:
current.next = prev
prev = current
current = nxt
nxt = nxt.next
current.next = prev
head = current
return head
#Recursive Approach
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
def rev(prv, cur):
if not cur:
return prv
if cur:
nxt = cur.next
cur.next = prv
return rev(cur, nxt)
return rev(None, head)
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class Cython(Package):
url = "http://cern.ch/service-spi/external/MCGenerators/distribution/cython/cython-0.22-src.tgz"
version('0.22', 'f7653aaae762593e13a66f94dadf1835')
depends_on('python', type=('build', 'run'))
extends('python')
def install(self, spec, prefix):
with working_dir(str(spec.version)):
python = which('python')
python('setup.py', 'build')
python('setup.py', 'install', '--prefix=%s' % prefix)
|
"""Construct aggregate text blob from multiple manuals."""
import os
def build_aggregate(lower_bound, upper_bound, start_time, time_elapsed):
"""Construct aggregate text blob from multiple manuals."""
year_list = ['/Users/alextruesdale/Documents/moodys_code/WIP/text_dictionaries/text_output/industrials19{}.txt'.format(year)
for year in range(lower_bound, upper_bound)]
if os.path.exists('working_directory/decade_aggregate.txt'):
os.remove('working_directory/decade_aggregate.txt')
for append_file in year_list:
time_elapsed = RunTimeData.interim_print_statement(append_file, start_time, time_elapsed)
with open(append_file, 'r') as append_file:
with open('working_directory/decade_aggregate.txt', 'a') as decade_aggregate:
file_read = append_file.read()
decade_aggregate.write(file_read)
with open('working_directory/decade_aggregate.txt', 'r') as decade_aggregate:
working_file = decade_aggregate.read()
return working_file
|
#!/usr/bin/env python
class Header(object):
"""DNS message header class that has header properties."""
__message_id = 1
__flags = 1
__question_count = 0
__answer_count = 0
__authority_count = 0
__additional_count= 0
@property
def message_id(self):
"""Message id property of dns message header."""
return self.__message_id
@message_id.setter
def message_id(self, value):
"""Setter function of message id property.
:param value: Value of message id.
:type value: Unsigned 16 bit integer.
"""
self.__message_id = value
@property
def flags(self):
"""Flags property of dns message header."""
return self.__flags
@flags.setter
def flags(self, value):
"""Setter function of flags property.
:param value: Value of flags.
:type value: Unsigned 16 bit integer.
"""
self.__flags = value
@property
def question_count(self):
"""Question count property of dns message header."""
return self.__question_count
@question_count.setter
def question_count(self, value):
"""Setter function of question count property.
:param value: Value of question count.
:type value: Unsigned 16 bit integer.
"""
self.__question_count = value
@property
def answer_count(self):
"""Answer count property of dns message header."""
return self.__answer_count
@answer_count.setter
def answer_count(self, value):
"""Setter function of answer count property.
:param value: Value of answer count.
:type value: Unsigned 16 bit integer.
"""
self.__answer_count = value
@property
def authority_count(self):
"""Authority count property of dns message header."""
return self.__authority_count
@authority_count.setter
def authority_count(self, value):
"""Setter function of authority count property.
:param value: Value of authority count.
:type value: Unsigned 16 bit integer.
"""
self.__authority_count = value
@property
def additional_count(self):
"""Additional count property of dns message header."""
return self.__additional_count
@additional_count.setter
def additional_count(self, value):
"""Setter function of additional count property.
:param value: Value of additional count.
:type value: Unsigned 16 bit integer.
"""
self.__additional_count = value
|
# Generated by Django 3.1.3 on 2021-05-23 15:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('project', '0005_plasma'),
]
operations = [
migrations.AddField(
model_name='oxygen',
name='user',
field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
migrations.AlterField(
model_name='oxygen',
name='p_alt_number',
field=models.CharField(max_length=30),
),
migrations.AlterField(
model_name='oxygen',
name='p_number',
field=models.CharField(max_length=30),
),
migrations.AlterField(
model_name='patient',
name='p_alt_number',
field=models.CharField(max_length=30),
),
migrations.AlterField(
model_name='patient',
name='p_number',
field=models.CharField(max_length=30),
),
migrations.AlterField(
model_name='plasma',
name='p_age',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='plasma',
name='p_alt_number',
field=models.CharField(max_length=30),
),
migrations.AlterField(
model_name='plasma',
name='p_number',
field=models.CharField(max_length=30),
),
]
|
from rest_service.config import Config
from rest_service.service import RestService
import unittest
import time
import os
import requests
BASIC_CONFIG = '''
[rest-service]
host = '0.0.0.0'
port = 9448
use_ssl = false
cert_pem = '/path/rest/server.pem'
key_pem = '/path/rest/server.key'
use_jwt = false
use_wsgi = false
views = ['rest_service.resources.ExampleView',
'rest_service.resources.ExampleAdminView',
'rest_service.resources.ExampleUserView' ]
using_postgres = false
postgres_host = '1.80.67.2'
postgres_port = 5432
postgres_user = 'postres_test'
postgres_pass = '9C4q&7$ES9X1a1M^gA4369p9C4q&7$ES9X1a1M^gA4369p9C4q&7$ES9X1a1M^gA4369p'
postgres_use_ssl = false
postgres_db = 'rest-service'
using_mongo = false
mongo_host = '1.80.67.2'
mongo_port = 5432
mongo_user = 'postres_test'
mongo_pass = '9C4q&7$ES9X1a1M^gA4369p9C4q&7$ES9X1a1M^gA4369p9C4q&7$ES9X1a1M^gA4369p'
mongo_use_ssl = false
mongo_db = 'rest-service'''
class TestConfigLoad(unittest.TestCase):
TMP_FILE = None
TMP_FILENAME = None
def setUp(self):
Config.parse_string(BASIC_CONFIG)
def test_basicLoad(self):
rs = RestService.from_config()
self.assertTrue(len(rs.views) == 3)
def test_basicStartRequest(self):
rs = RestService.from_config()
rs.run()
rsp = requests.get("http://127.0.0.1:9448/example")
self.assertTrue(rsp.content == b'{"result": "example works"}')
rs.stop()
if __name__ == '__main__':
unittest.main()
|
from flask import jsonify , request
def product_delete(collection_name,name):
productlist = collection_name
try :
query = productlist.delete_one({'name':name})
output = {'Status code': 200 ,'Message': 'Product successfully deleted'}
return jsonify({'result':output})
except :
return not_found()
def not_found():
output = {'Status code': 404 ,'Message': 'Not found Error Occured' + request.url}
return jsonify({'Error':output})
|
# https://github.com/Rapptz/discord.py/blob/async/examples/reply.py
import discord
import requests #allows for the api
import xml.etree.cElementTree as ET
from discord.ext.commands import Bot
from discord.ext.commands import MemberConverter
from TOKEN import TOKEN #gets the token from token.py
from TOKEN import BUNGIEAPIKEY
bot = Bot(command_prefix="?")
@bot.command(pass_context = True)
async def createTeam(ctx, teamName: str):
#teamName should be in the tag
#the author should be inserted as a member of that team
userName = ctx.message.author
tree = ET.parse('bot.xml')
root = tree.getroot()
team = ET.SubElement(root, 'team')
team.set('teamName', teamName)
team.set('leader', str(userName))
for x in range(6):
member = ET.Element("member")
team.append(member)
# Add leader as the first member
team[0].text = str(userName)
tree.write('bot.xml')
await bot.say("Created team with name: " + teamName)
@bot.command(pass_context = True)
async def addTo(ctx, teamName: str, member1: discord.Member):
userName = ctx.message.author
tree = ET.parse('bot.xml')
root = tree.getroot()
memberAdded = False
duplicateMember = False
FailMessage = (str(member1) + ' was not added to the team')
SuccessMessage = (str(member1) + ' was added successfully')
for team in root.iter('team'):
if teamName == team.attrib['teamName']:
for member in team.iter('member'):
"""
Stop on each member
Check if the name equals the input member
if it does match, then we can set a local bool
Our if statement can include that local bool and make sure it equal false
"""
if member.text == str(member1):
duplicateMember = True
memberAdded = False
if member.text == None and duplicateMember == False:
member.text = str(member1)
print(member.text)
memberAdded = True
duplicateMember = True
message = ''
if memberAdded == False:
message = FailMessage
else:
message = SuccessMessage
tree.write('bot.xml')
await bot.say(message)
@bot.command(pass_context = True)
async def removeFrom(ctx, teamName: str, member1: discord.Member):
tree = ET.parse('bot.xml')
root = tree.getroot()
memberRemoved = False
FailMessage = (str(member1) + ' was not removed to the team')
SuccessMessage = (str(member1) + ' was removed successfully')
for team in root.iter('team'):
if teamName == team.attrib['teamName']:
for member in team.iter('member'):
if member.text == str(member1):
member.text = None
memberRemoved = True
break
message = ''
if memberRemoved == False:
message = FailMessage
else:
message = SuccessMessage
tree.write('bot.xml')
await bot.say(message)
@bot.command(pass_context = True)
async def showTeam(ctx, teamName: str):
tree = ET.parse('bot.xml')
root = tree.getroot()
default_message = 'Team currently constists of: '
member_list = ''
for team in root.iter('team'):
if teamName == team.attrib['teamName']:
for member in team.iter('member'):
member_list += member.text
await bot.say(default_message + member_list)
@bot.command()
async def xml(type: str, category: str, message: str):
if type == "write":
root = ET.Element('root')
ET.SubElement(root, category).text = message
tree = ET.ElementTree(root)
tree.write('bot.xml')
elif type == "get":
root = tree.getroot()
output = ''
for child in root:
if child.text == message:
output = child.text
if output == '':
await bot.say('Message not found')
else:
await bot.say('Message: ' + output)
else:
await bot.say('Command not recognized')
#does the whole bitcoin thing
@bot.command()
async def invest():
url = "https://api.coindesk.com/v1/bpi/currentprice/BTC.json"
response = requests.get(url)
value = response.json()['bpi']['USD']['rate']
value = value.replace(",", "") #doesn't work if there are commas
if float(value) > 8000:
await bot.say("INVEST! Bitcoin is amazing.")
else:
await bot.say("Are you seriously asking me about bitcoin? Stop it already")
await bot.say("Bitcoin price is: " + value)
@bot.command()
async def jeebs():
userName = "sr_jeebs/"
getUserUrl = "https://www.bungie.net/Platform//Destiny2/SearchDestinyPlayer/2/" + userName
response = requests.get(getUserUrl, headers={"X-API-Key": BUNGIEAPIKEY})
value = response.json()
membershipId = value["Response"][0]["membershipId"] #gets a bungie user membershipId
getStatsUrl = "https://www.bungie.net/Platform/Destiny2/2/Account/" + membershipId + "/Stats/"
response5 = requests.get(getStatsUrl, headers={"X-API-Key": BUNGIEAPIKEY})
value5 = response5.json()
deathsPvp = value5["Response"]["mergedAllCharacters"]["results"]["allPvP"]["allTime"]["deaths"]["basic"]["value"]
deathsPve = value5["Response"]["mergedAllCharacters"]["results"]["allPvE"]["allTime"]["deaths"]["basic"]["value"]
await bot.say("Sean has died a total of " + str(int(deathsPve + deathsPvp)) + " times")
#NOTE: figure out a way to get this activating daily?
#see discord python tutorial part 2
@bot.command()
async def motivate():
url = "http://quotes.rest/qod.json"
response = requests.get(url)
value = response.json()
qod = value['contents']['quotes'][0]['quote']
await bot.say(qod)
@bot.command()
async def weather(zip : str):
apiKey = '13c40a7680f1a39e960be305fa7e46f2'
metric = "&units=metric"
url = "http://api.openweathermap.org/data/2.5/weather?zip=" + zip + "&APPID=" + apiKey + metric
try:
response = requests.get(url)
value = response.json()
temp = (float(value['main']['temp']) * 1.8) + 32
intTemp = int(temp)
await bot.say("The temperature at zip code " + zip + " is " + str(intTemp))
except requests.exceptions.RequestException as e: # This is the correct syntax
await bot.say("Uh, we failed cap'n")
except KeyError:
await bot.say("Nope, didn't find that zip")
#gets a bunch of arguments, instead of just one at a time
@bot.command()
async def test(*args):
await bot.say('{} arguments: {}'.format(len(args), ', '.join(args)))
@bot.command()
async def image():
await bot.say('https://ichef.bbci.co.uk/news/660/cpsprodpb/71E1/production/_99735192_gettyimages-459467912.jpg')
@bot.command()
async def getKd(userName : str):
getUserUrl = "https://www.bungie.net/Platform//Destiny2/SearchDestinyPlayer/2/" + userName
response = requests.get(getUserUrl, headers={"X-API-Key": BUNGIEAPIKEY})
value = response.json()
membershipId = value["Response"][0]["membershipId"] #gets a bungie user membershipId
getStatsUrl = "https://www.bungie.net/Platform/Destiny2/2/Account/" + membershipId + "/Stats/"
response2 = requests.get(getStatsUrl, headers={"X-API-Key": BUNGIEAPIKEY})
value2 = response2.json()
kills = value2["Response"]["mergedAllCharacters"]["results"]["allPvP"]["allTime"]["kills"]["basic"]["value"]
deaths = value2["Response"]["mergedAllCharacters"]["results"]["allPvP"]["allTime"]["deaths"]["basic"]["value"]
killDeath = round((kills/deaths), 2)
await bot.say("PVP kill/death: " + str(killDeath))
@bot.command()
async def efficiency(userName : str):
getUserUrl = "https://www.bungie.net/Platform//Destiny2/SearchDestinyPlayer/2/" + userName
response = requests.get(getUserUrl, headers={"X-API-Key": BUNGIEAPIKEY})
value = response.json()
membershipId = value["Response"][0]["membershipId"] #gets a bungie user membershipId
getStatsUrl = "https://www.bungie.net/Platform/Destiny2/2/Account/" + membershipId + "/Stats/"
response2 = requests.get(getStatsUrl, headers={"X-API-Key": BUNGIEAPIKEY})
value2 = response2.json()
kills = value2["Response"]["mergedAllCharacters"]["results"]["allPvP"]["allTime"]["kills"]["basic"]["value"]
deaths = value2["Response"]["mergedAllCharacters"]["results"]["allPvP"]["allTime"]["deaths"]["basic"]["value"]
assists = value2["Response"]["mergedAllCharacters"]["results"]["allPvP"]["allTime"]["assists"]["basic"]["value"]
efficiency = round(((kills + assists)/deaths), 2)
await bot.say("PVP efficiency: " + str(efficiency))
#this is how you get the context, which is from the first parameter
@bot.command(pass_context = True)
async def test2(ctx):
#gets the author's tag
member = ctx.message.author
#member.mention will tag the author in with it
#Documentation for the member object:
#https://discordpy.readthedocs.io/en/rewrite/api.html#discord.Member
await bot.say("Greetings " + str(member.mention))
@bot.command()
async def helloWorld():
await bot.say("!play Never gonna give you up")
bot.run(TOKEN)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 9 16:43:49 2020
@author: logun
"""
import matplotlib.pyplot as plt
import cv2
img = cv2.imread('binary_small.png', cv2.IMREAD_GRAYSCALE)
plt.figure(dpi=700)
dims = img.shape
M_0_0 = 0
M_1_0 = 0
M_0_1 = 0
for row in range (dims[0]):
for col in range (dims[1]):
M_0_0 += img[row][col]
M_1_0 += row * img[row][col]
M_0_1 += col * img[row][col]
x_strich = M_1_0 / M_0_0
y_strich = M_0_1 / M_0_0
#return central moment pq
def cm(p,q):
mü_p_q = 0
for row in range (dims[0]):
for col in range (dims[1]):
mü_p_q += ((row - x_strich) ** p) * ((col - y_strich)**q) * img[row][col]
return mü_p_q
#return pq as string for labeling the plot
def pq_str(p,q):
return(str(p)+","+str(q))
#calcs scale invariants
def scale_inv(mü_p_q, mü_0_0, p, q):
return(mü_p_q/mü_0_0**(1+(p+q)/2))
steps=[0,1,2,5]
müs = []
mü_strich =[]
müs_divided =[]
labels = []
for p in steps:
for q in steps:
cur = cm(p,q)
müs.append(cur)
labels.append(pq_str(p,q))
if(p+q>=2):
mü_strich.append(scale_inv(cur,müs[0],p,q))
elif(p==0 and q==0):
mü_strich.append(1)
else:
mü_strich.append(0)
mü_strich_2_0 = scale_inv(cm(2,0),müs[0],2,0)
mü_strich_0_2 = scale_inv(cm(0,2),müs[0],0,2)
mü_strich_1_1 = scale_inv(cm(1,1),müs[0],1,1)
quadr_2_0_0_2 = (mü_strich_2_0-mü_strich_0_2)**2
ellipse_exz = (quadr_2_0_0_2 - 4*mü_strich_1_1)/quadr_2_0_0_2
print(ellipse_exz)
#plt.plot(['0,0', '0,1', '0,2'], [1, 2, 3])
#plt.plot(labels, müs_divided)
|
import dash_bootstrap_components as dbc
from dash import Dash, html
def test_mdcap001_components_as_props(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
dbc.Checklist(
[
{"label": html.H2("H2 label"), "value": "h2"},
{
"label": html.A("Link in checklist", href="#"),
"value": "a",
},
],
id="checklist",
),
dbc.RadioItems(
[
{"label": html.H3("on"), "value": "on"},
{"label": html.P("off"), "value": "off"},
],
id="radio-items",
),
dbc.Checkbox(label=html.H4("h4"), value="h4", id="checkbox"),
dbc.RadioButton(label=html.H6("h6"), value="h6", id="radiobutton"),
dbc.Switch(label=html.H5("h5"), value="h5", id="switch"),
]
)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#checklist h2", "H2 label")
dash_duo.wait_for_text_to_equal("#checklist a", "Link in checklist")
dash_duo.wait_for_text_to_equal("#radio-items h3", "on")
dash_duo.wait_for_text_to_equal("#radio-items p", "off")
dash_duo.wait_for_text_to_equal("#checkbox+label", "h4")
dash_duo.wait_for_text_to_equal("#radiobutton+label", "h6")
dash_duo.wait_for_text_to_equal("#switch+label", "h5")
|
# change gui font size in linux: xrandr --output HDMI-0 --dpi 55
# https://further-reading.net/2018/08/quick-tutorial-pyqt-5-browser/
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWebEngineWidgets import *
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QLabel
import numpy as np
import collections
import pandas as pd
import pathlib
# %%
output_path="/dev/shm"
_code_git_version="b6664c00a1c4eb67090f6149fbe1b39cfba9783d"
_code_repository="https://github.com/plops/cl-py-generator/tree/master/example/19_qt_webengine/source/run_00_browser.py"
_code_generation_time="09:58:10 of Saturday, 2020-05-23 (GMT+1)"
# %% open gui windows
app=QApplication([""])
web=QWebEngineView()
web.load(QUrl("https://youtube.com"))
web.show()
|
# class 2
# 7/19/16
# Playing with conditionals
input_value = input("Enter a number: ")
x = int(input_value)
if x == 2:
print('Your number is equal to 2')
elif x > 100:
print('Your number is greater than 100')
elif x % 5 == 0:
print('Your number is a multiple of five and less than 100')
else:
print('Your number is not equal to 2, is less than 100, and is not a multiple of 5')
print('Done evaluating number.')
# Iterations
input_value = input("Enter a number: ")
y = int(input_value)
while y > 0:
print(y)
y -= 1
print('Done')
# For Loop
numbers = [1,2,3,4]
for num in numbers:
print("The current number is: " + str(num))
# While loop
input_value = False
while input_value != "yes":
input_value = input("Do you like pita chips? ")
print('Good!')
# Function calls
a = 3
print(type(a))
# Define functions
def print_plus_5(x):
print(x + 5)
def plus_5(y):
return y + 5
print(plus_5(4))
def area_of_rectangle(width, height):
return width * height
def rectangle_area():
width = int(input("Enter a width: "))
height = int(input("Enter a height: "))
return width * height
print(rectangle_area())
|
# from re import compile, search
#
# VALID = compile('\([0-9]{3}\) [0-9]{3}-[0-9]{4}')
#
#
# def validPhoneNumber(number):
# """ valid_phone_number == PEP8, forced camelCase by CodeWars """
# check = search(VALID, number)
# return number == check.group(0) if check else False
from re import compile, match
VALID = compile('^\(\d{3}\) \d{3}-\d{4}$')
def validPhoneNumber(number):
""" valid_phone_number == PEP8 (forced by CodeWars)
Thanks to 'g964 & werneckpaiva' from CodeWars """
return bool(match(VALID, number))
|
#!/usr/bin/env python3
import yaml
from aws_cdk import core
from three_tier_web.three_tier_web_stack import ThreeTierWebStack
# Demonstrates how to externalize configurations on your stack
config = yaml.load(open('./config.yaml'), Loader=yaml.FullLoader)
app = core.App()
# Create a different stack depending on the environment
# Useful if you are just deploying on a single AWS account
# Otherwise use core.Environments
stack_name = "three-tier-web-" + config["environment"]
ThreeTierWebStack(app, stack_name, config)
app.synth()
|
{
'targets': [
{
'target_name': 'NodeCoreAudio',
'sources': [
'NodeCoreAudio/AudioEngine.cpp',
'NodeCoreAudio/NodeCoreAudio.cpp',
],
'include_dirs': [
'<(module_root_dir)/NodeCoreAudio/',
'<(module_root_dir)/portaudio/'
],
"conditions" : [
[
'OS!="win"', {
"libraries" : [
'-lportaudio'
],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ],
'cflags_cc': [ '-std=c++0x' ]
}
],
[
'OS=="win"', {
"include_dirs" : [ "gyp/include" ],
"libraries" : [
'<(module_root_dir)/gyp/lib/portaudio_x86.lib'
],'copies': [
{
'destination': '<(module_root_dir)/build/Debug/',
'files': [
'<(module_root_dir)/gyp/lib/portaudio_x86.dll',
'<(module_root_dir)/gyp/lib/portaudio_x86.lib',
]
}
]
}
]
],
}
]
}
|
from outbreaksim import Simulation
import numpy as np
import matplotlib.pyplot as plt
# Monte Carlo Simulation with varying percentage of the population being stationary
def main():
svals = [0.00, 0.25, 0.5, 0.75, 1]
numRuns = 10000
# Simulation Options
options = {
'N': 100, # Grid Size (N x N)
'M': 1000, # Initial Population Size
'X': 0.01, # % Of initial population that is infected
'Pm': 0.80, # Mobility
'Pd': 0.08, # Death Rate of Virus
'K': 7 # Mean Infection Duration
}
plot_death_rates = []
plot_infection_rates = []
plot_max_infection_rates = []
plot_max_infection_times = []
plot_t_stops = []
for S in svals:
print('Current Value of S: {}'.format(S))
options['S'] = S
death_rates = []
infection_rates = []
max_infection_rates = []
max_infection_times = []
t_stops = []
for run in range(numRuns):
Tmax = 500
mySim = Simulation(options)
mySim.initialize_population()
Tstop = None
for i in range(Tmax):
new_infections = mySim.step()
if not new_infections and mySim.all_healthy():
Tstop = i
break
if Tstop is None:
Tstop = Tmax
death_rates.append(mySim.total_deaths / mySim.M)
infection_rates.append(mySim.total_infected / mySim.M)
max_infection_rates.append(mySim.max_infection_rate)
max_infection_times.append(mySim.max_infection_time)
t_stops.append(Tstop)
plt.hist(100 * np.array(death_rates), bins=60)
plt.title('Death Rate For S = {}'.format(S))
plt.ylabel('Frequency')
plt.xlabel('Total Death Rate (%)')
plt.savefig('tdr_hist_{}.svg'.format(int(S * 100)))
plt.show()
plt.hist(100 * np.array(infection_rates), bins=60)
plt.title('Total Infection Rate For S = {}'.format(S))
plt.ylabel('Frequency')
plt.xlabel('Total Infection Rate (%)')
plt.savefig('tir_hist_{}.svg'.format(int(S * 100)))
plt.show()
plt.hist(100 * np.array(max_infection_rates), bins=60)
plt.title('Max Infection Rate For S = {}'.format(S))
plt.ylabel('Frequency')
plt.xlabel('Max Infection Rate (%)')
plt.savefig('mir_hist_{}.svg'.format(int(S * 100)))
plt.show()
average_total_infection = np.mean(infection_rates) * 100
average_total_death = np.mean(death_rates) * 100
average_max_infection = np.mean(max_infection_rates) * 100
average_max_infection_time = np.mean(max_infection_times)
average_t_stop = np.mean(t_stops)
plot_infection_rates.append(average_total_infection)
plot_death_rates.append(average_total_death)
plot_max_infection_rates.append(average_max_infection)
plot_max_infection_times.append(average_max_infection_time)
plot_t_stops.append(average_t_stop)
print('-----')
print('Average Total Infection Rate: {:.4f} %'.format(average_total_infection))
print('Average Total Death Rate: {:.4f} %'.format(average_total_death))
print('Average Max Infection Rate: {:.4f} %'.format(average_max_infection))
print('Average Max Infection Time: {:.2f}'.format(average_max_infection_time))
print('Average T(Stop): {:.2f}'.format(average_t_stop))
print('-----')
plt.plot(svals, plot_death_rates)
plt.xlabel('S')
plt.ylabel('Total Death Rate (%)')
plt.savefig('total_death_rate.svg')
plt.show()
plt.plot(svals, plot_infection_rates)
plt.xlabel('S')
plt.ylabel('Total Infection Rate (%)')
plt.savefig('total_infection_rate.svg')
plt.show()
plt.plot(svals, plot_max_infection_rates)
plt.xlabel('S')
plt.ylabel('Max Infection Rate (%)')
plt.savefig('max_infection_rate.svg')
plt.show()
plt.plot(svals, plot_t_stops)
plt.xlabel('S')
plt.ylabel('T (Stop)')
plt.savefig('t_stop.svg')
plt.show()
plt.plot(svals, plot_max_infection_times)
plt.xlabel('S')
plt.ylabel('Max Infection Time ')
plt.savefig('max_infection_time.svg')
plt.show()
if __name__ == '__main__':
main()
|
"""
MongoDB/AWS DocumentDB登録のための観測所マスター情報
"""
from __future__ import annotations
import copy
import dataclasses
import inspect
import urllib.parse
from datetime import datetime, timezone
from logging import Logger, getLogger, NullHandler
from pathlib import Path
from typing import Optional, Any, Sequence, Mapping, MutableMapping, Union
from pymongo import MongoClient
from pymongo.errors import OperationFailure, ServerSelectionTimeoutError
from Fundamental import InsertError, DBError, JST, DataReadError
logger: Logger = getLogger(__name__)
logger.addHandler(NullHandler())
@dataclasses.dataclass(frozen=True)
class MongoDBConfig:
"""
MongoDB接続設定
"""
host: str # ホスト
user_name: str # ユーザ名
password: str # パスワード
database: str # データベース名
collection: str # コレクション名
ca_file: Optional[Path] = None # CAファイルパス
replica_set: Optional[str] = None # レプリカセット
read_preference: Optional[str] = None # 読み取り負荷分散オプション
port: int = 27017 # ポート
def __post_init__(self):
if self.ca_file is not None:
if not self.ca_file.is_file():
raise DataReadError(f"The specified SSL CA file {str(self.ca_file)} is not found."
f" Please bring it there."
f" ({inspect.currentframe().f_code.co_name} in module {__name__}).")
@property
def uri(self) -> str:
"""
MongoDB URI
Returns:
URI(str)
"""
username: str = urllib.parse.quote_plus(self.user_name)
password: str = urllib.parse.quote_plus(self.password)
return f'mongodb://{username}:{password}@{self.host}:{self.port}/{self.database}'
@property
def pymongo_option_dict(self) -> Mapping[str, Any]:
"""
Pymongoのオプション辞書
Returns:
オプション辞書(Mapping[str, Any])
"""
option_dict: MutableMapping[str, Any] = dict()
if self.ca_file is not None:
option_dict["ssl"] = True
option_dict["ssl_ca_certs"] = str(self.ca_file.resolve())
if self.replica_set is not None:
option_dict["replicaset"] = self.replica_set
if self.read_preference is not None:
option_dict["read_preference"] = self.read_preference
return option_dict
def make_mongodb_config(
database_name: str,
collection_name: str
) -> MongoDBConfig:
"""
MongoDB/DocumentDB設定
Args:
database_name(str): データベース名
collection_name(str): コレクション名
Returns:
DB設定(MongoDBConfig)
"""
try:
return MongoDBConfig(
host="localhost",
user_name="test",
password="Apollo13",
database=database_name,
collection=collection_name)
except KeyError as e:
raise DataReadError(f"The database setting does not have enough items."
f" Please check the 'database' dictionary in a setting file."
f" ({inspect.currentframe().f_code.co_name} in module {__name__},"
f" message: {e.args}).")
class MongoDB:
"""
MongoDB/DocumentDBアクセスのラッパクラス
Attributes:
__client (pymongo.MongoClient): MongoDBのデータベース
__collection (pymongo.Collection): MongoDBのデータベース内コレクション
"""
def __init__(self, config: MongoDBConfig):
"""
設定辞書データ(たいていconfig.json)のDB設定を読み込み、DBへの接続準備をする。
Args:
config: 辞書データ。
"ca_file", "host", "port", "user_name", "password", "database","collection"
のフィールドを持つ。
"""
try:
with MongoClient(config.uri, **config.pymongo_option_dict) as self.__client:
self.__collection = self.__client[config.database].get_collection(config.collection)
except ServerSelectionTimeoutError as e:
raise DBError(e.args)
@property
def all_documents(self) -> Sequence[Mapping[str, Any]]:
"""
コレクションのすべてのドキュメントのリストを返す。
Returns:
ドキュメントのリスト。
"""
try:
return list(self.__collection.find())
except OperationFailure as e:
raise DBError(e.args)
@property
def first_document(self) -> Optional[Any]:
"""
コレクションの最初のドキュメントのゲッタ
Returns:
コレクションの最初のドキュメント(Optional[Mapping[str, Any]])
"""
try:
found: Sequence[Any] = list(self.__collection.find_one())
if len(found) > 0:
return next(iter(found))
else:
return None
except OperationFailure as e:
raise DBError(e.args)
def insert(self, document: Mapping[str, Any]) -> None:
"""
コレクションに新たなドキュメントをinsertする。
Args:
document(Mapping[str, Any]): 書き込みたいドキュメント
"""
try:
doc_id = identity(document)
if len(self.select(doc_id)) == 0:
result = self.__collection.insert_one(document)
else:
result = self.__collection.replace_one(doc_id, document)
if not result.acknowledged:
raise InsertError(f"write failed for {result.inserted_id}")
except OperationFailure as e:
raise DBError(e.args)
def insert_all(self, documents: Sequence[Mapping[str, Any]]) -> None:
"""
コレクションに新たなドキュメント列を全てinsert
Args:
documents(Sequence[Mapping[str, Any]]): 書き込みたいドキュメント列
"""
try:
documents_with_create_time: Sequence[MutableMapping[str, Any]] = [
dict(copy.deepcopy(document)) for document in documents]
for document in documents_with_create_time:
document["createTime"] = datetime.now(tz=JST)
self.__collection.insert_many(documents_with_create_time)
except OperationFailure as e:
raise DBError(e.args)
def upsert_stations(self, documents: Sequence[Mapping[str, Any]]) -> None:
"""
コレクションに観測所ごとのデータドキュメント列をupsertで登録
Args:
documents(Sequence[Mapping[str, Any]]): 書き込みたいドキュメント列
"""
try:
for document in documents:
for station_id, station_data in document["data"].items():
self.__collection.update_one(
{r"_id": document["_id"]},
{r"$set": {f"data.{station_id}": station_data},
r"$setOnInsert": {"createTime": datetime.now(timezone.utc)}},
upsert=True)
except OperationFailure as e:
raise DBError(e.args)
def upsert_all(self, documents: Sequence[Mapping[str, Any]]) -> None:
"""
upsert
Args:
documents(Sequence[Mapping[str, Any]]): 書き込みたいドキュメント列
"""
try:
for document in documents:
self.__collection.update_one(
{r"_id": document["_id"]},
{r"$set": document,
r"$setOnInsert": {"createTime": datetime.now(timezone.utc)}},
upsert=True)
except OperationFailure as e:
raise DBError(e.args)
def upsert_each(self, documents: Sequence[Mapping[str, Any]]) -> None:
"""
コレクションに観測所ごとのデータドキュメント列をupsertで登録
Args:
documents(Sequence[Mapping[str, Any]]): 書き込みたいドキュメント列
"""
try:
for document in documents:
self.__collection.update_one(
{r"_id": document["_id"]},
{r"$set": {f"data": document["data"]},
r"$setOnInsert": {"createTime": datetime.now(timezone.utc)}},
upsert=True)
except OperationFailure as e:
raise DBError(e.args)
def remove_all(self):
"""
コレクションのドキュメントを全て削除
"""
try:
self.__collection.remove()
except OperationFailure as e:
raise DBError(e.args)
def replace_all(self, documents: Sequence[Mapping[str, Any]]) -> None:
"""
コレクションのドキュメントを全て入れ替え(前のものを全て削除してから全て挿入)
Args:
documents(Sequence[Mapping[str, Any]]): 書き込みたいドキュメント列
"""
self.remove_all()
self.insert_all(documents)
def update(self, update_field: Mapping[str, Any]) -> None:
"""
コレクションのフィールドを置き換える。
Args:
update_field(Mapping[str, Any]): 置き換えるべきフィールド
"""
try:
self.__collection.update_one(update_field["_id"], {'$set': update_field})
except OperationFailure as e:
raise DBError(e.args)
def delete(self, field_key: Mapping[str, Any]) -> None:
"""
コレクションの、指定された"_id"キーを持つフィールドを削除する。
Args:
field_key(Mapping[str, Any]): 削除すべきフィールドの"_id"キー
"""
try:
self.__collection.delete_one(field_key)
except OperationFailure as e:
raise DBError(e.args)
def select(self, field: Mapping[str, Any]) -> Sequence[Any]:
"""
コレクションの、指定されたフィールドキーを持つドキュメントのリストを取得する。
Args:
field(Mapping[str, Any]): 取得すべきフィールドのキー
Returns:
取得したドキュメントのリスト
"""
try:
return list(self.__collection.find(field))
except OperationFailure as e:
raise DBError(e.args)
def identity(input_document: Mapping[str, Any]) -> Union[Mapping[str, Any], Any]:
"""
ドキュメントの"_id"フィールドの辞書を返す
Args:
input_document(Mapping[str, Any]): ドキュメントの辞書データ
Returns:
"_id"フィールドの辞書(Mapping[str, Any])
"""
return {"_id": input_document["_id"]}
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import json
import numpy as np
import pandas as pd
import scipy.sparse as sparse
import data_utils as data
import datasets
import upper_bounds
import defenses
from upper_bounds import hinge_loss, hinge_grad, logistic_grad
attack_label = 'alfa'
percentile = 90
use_train = False
epsilons = [0.005, 0.01, 0.015, 0.02, 0.03]
for dataset_name in ['enron', 'mnist_17']:
if dataset_name == 'enron':
weight_decay = 0.09
elif dataset_name == 'mnist_17':
weight_decay = 0.01
X_train, Y_train, X_test, Y_test = datasets.load_dataset(dataset_name)
if use_train:
raise NotImplementedError
else:
feasible_flipped_mask = defenses.find_feasible_label_flips_in_sphere(X_test, Y_test, percentile)
X_augmented = data.vstack(X_train, X_test[feasible_flipped_mask, :])
Y_augmented = data.vstack(Y_train, -Y_test[feasible_flipped_mask])
print('X_train size: ', X_train.shape)
print('X_augmented size: ', X_augmented.shape)
n = X_train.shape[0]
m = X_augmented.shape[0] - n
X_flipped = X_augmented[n:, :]
Y_flipped = Y_augmented[n:]
gurobi_svm = upper_bounds.GurobiSVM(weight_decay=weight_decay)
gurobi_svm.fit(X_train, Y_train, verbose=True)
orig_losses = gurobi_svm.get_indiv_hinge_losses(X_flipped, Y_flipped)
for epsilon in epsilons:
print('>> epsilon %s' % epsilon)
num_points_to_add = int(np.round(epsilon * n))
q_finder = upper_bounds.QFinder(m=m, q_budget=num_points_to_add)
q = np.ones(m) * (num_points_to_add / m)
for iter_idx in range(100):
old_q = q
sample_weights = np.concatenate((
np.ones(n),
q))
gurobi_svm.fit(X_augmented, Y_augmented, sample_weights=sample_weights, verbose=True)
poisoned_losses = gurobi_svm.get_indiv_hinge_losses(X_flipped, Y_flipped)
loss_diffs = poisoned_losses - orig_losses
q = q_finder.solve(loss_diffs, verbose=True)
print("At iteration %s, q is:" % iter_idx)
print(q)
if np.all(old_q == q):
print('Done, terminating')
break
q_idx = np.where(q)[0][0]
assert q[q_idx] == num_points_to_add
if sparse.issparse(X_flipped):
x = X_flipped[q_idx, :].toarray()
else:
x = X_flipped[q_idx, :]
X_modified, Y_modified = data.add_points(
x,
Y_flipped[q_idx],
X_train,
Y_train,
num_copies=num_points_to_add
)
attack_save_path = datasets.get_target_attack_npz_path(
dataset_name,
epsilon,
weight_decay,
percentile,
attack_label)
if sparse.issparse(X_modified):
X_poison = X_modified[n:, :].asfptype()
else:
X_poison = X_modified[n:, :]
np.savez(
attack_save_path,
X_poison=X_poison,
Y_poison=Y_modified[n:]
)
|
import tweepy
import json
import pymongo
print("hello_tweepy.py Loaded")
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client.twit_ids
if db == None:
print ('kaaskoek')
# sirColgrevance
auth = tweepy.OAuthHandler("npb4vI5OhwkXyxY8ixvZ2qAHx","SR10qi0e2nLcl4a2cXDZ8ZNeM3MyaCdm31fVmD0Nm1MYhs8nB0")
auth.set_access_token("971671773933645824-B4U9gTzabJFqB7SjiMWKtjcisqPIvpL","0iDxzvlar7OIDH0RiJ4JL30muWBdzcc6OVudTuxA9MUC9")
#SirConstantine
# auth = tweepy.OAuthHandler("zqievuaw5A4ByVVtrVvcjGd5z","UZJ4X75Na20t80kxlEdfiVcvCrMVv9iFC7LURuU80TWo4yvBzs")
# auth.set_access_token("971671773933645824-ieJQ3Gifvwdtnl13EAgyQ9QFYrd865m","EQccniXzaQ48zKqvOo4jwOlXi50rj5zAF1yHrzFO55P0a")
ids = open("ids.json",'w')
nodelist = open("nodelist.json", 'a')
edgelist = open("edgelist.json",'w')
api = tweepy.API(auth)
source_id = ('209564656')
user = api.get_user(source_id)
ids_list = set()
node_list = set()
edgelist_dict = {'Source': [], 'Target': []}
print(user.id)
print(user.followers_count)
for friend in tweepy.Cursor(api.followers_ids, user_id='209564656').items():
print(friend)
ids_list.add(friend)
node_list.add(friend)
edgelist_dict["Source"].append(str(source_id))
edgelist_dict["Target"].append(str(friend))
json.dump(edgelist_dict,edgelist)
json.dump(list(ids_list),ids)
json.dump(list(node_list),nodelist)
ids.close()
edgelist.close()
nodelist.close()
|
# from selenium import webdriver
# import time
# from selenium.webdriver.remote.webelement import WebElement
# from selenium.common.exceptions import StaleElementReferenceException
#
# def wait(driver):
# elem = driver.find_element_by_tag_name("html")
# count = 0
# while True:
# count += 1
# if count >20 :
# print('timing out after 10 second and returning')
# return
# time.sleep(5)
# try:
# elem == driver.find_element_by_tag_name('html')
# except StaleElementReferenceException:
# return
# driver = webdriver.PhantomJS(executable_path=r'D:\Program Files\phantomjs-2.1.1-windows\bin\phantomjs')
# driver.get('http://pythonscraping.com/pages/javascript/redirectDemo1.html')
# wait(driver)
# print(driver.page_source)
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.common.exceptions import TimeoutException
#用ph的地址参数调用webdiver中的ph方法,也是创建一个无头浏览器对象
divers = webdriver.PhantomJS(executable_path=r'D:\Program Files\phantomjs-2.1.1-windows\bin\phantomjs')
divers.get('http://pythonscraping.com/pages/javascript/redirectDemo1.html')
try:
body = WebDriverWait(divers,15).until(ec.presence_of_element_located((By.XPATH,"//body[contains(text(),'This is the page you are looking for!')]")))
print(body.text)
except TimeoutException:
print('time out')
|
import re
a = input()
pattern = re.compile(r'[^\w+]')
s = re.sub(pattern, '', a).lower()
if s == s[::-1]:
print("Палиндром")
else:
print("Не палиндром")
|
# https://wikidocs.net/1015
s1 = set([1, 2, 3])
print(s1)
s2 = set('Hello')
print(s2)
s3 = set()
print(s3)
l1 = list(s1)
print(l1)
s1 = set([1, 2, 3, 4, 5, 6])
s2 = set([4, 5, 6, 7, 8, 9])
print(s1 & s2)
print(s1.intersection(s2))
print(s1 | s2)
print(s1.union(s2))
print(s1 - s2)
print(s1.difference(s2))
print(s1)
s1.add(7)
print(s1)
s1.update([8, 9, 10])
print(s1)
s1.remove(10)
print(s1)
|
from .src.graph_pb2 import GraphDef
from .src.node_def_pb2 import NodeDef
from .src.versions_pb2 import VersionDef
from .src.attr_value_pb2 import AttrValue
from .src.tensor_shape_pb2 import TensorShapeProto
from .src import types_pb2 as dt
from collections import defaultdict
import numpy as np
import chainer.variable
import chainer.computational_graph as c
def convert_dtype(dtype):
if dtype == np.float32:
return dt.DT_FLOAT
elif dtype == np.float64:
return dt.DT_DOUBLE
elif dtype == np.int32:
return dt.DT_INT32
elif dtype == np.uint8:
return dt.DT_UINT8
elif dtype == np.int16:
return dt.DT_INT16
elif dtype == np.int8:
return dt.DT_INT8
elif dtype == np.dtype('S1'):
return dt.DT_STRING
else:
raise ValueError('Unsupported type.')
class NodeName:
"""Class that creates the node's name from the list of nodes on the network.
Give unique names to unique nodes on the network.
Attributes:
name_to_id :A dictionary in which the key is the object name and the value
is list of the object IDs.
"""
def __init__(self, nodes):
self.name_to_id = defaultdict(list)
for n in nodes:
name = NodeName.base_name(n)
if not id(n) in self.name_to_id[name]:
self.name_to_id[name].append(id(n))
@staticmethod
def base_name(obj):
name_scope = (obj.name_scope + '/') if hasattr(obj, 'name_scope') else ''
if hasattr(obj, '_variable') and obj._variable is not None:
if isinstance(obj._variable(), chainer.Parameter):
return name_scope + (('Parameter_' + obj.name) if obj.name is not None else 'Parameter')
if isinstance(obj, chainer.variable.VariableNode):
return name_scope + 'Variable_' + obj.label
return name_scope + obj.label
def name(self, obj):
"""Return the name of the object.
Args:
obj :A object on the network
"""
bn = NodeName.base_name(obj)
if len(self.name_to_id[bn]) == 1:
return bn
else:
return bn + '_' + str(self.name_to_id[bn].index(id(obj)))
def make_list_of_nodes(fn):
list_of_nodes = []
g = c.build_computational_graph(fn)
node_name = NodeName(g.nodes)
for n in g.nodes:
inputs = []
for e1, e2 in g.edges:
if e2 == n:
inputs.append(node_name.name(e1))
attr_shape = []
if hasattr(n, 'shape'):
attr_shape = list(n.shape)
dtype = dt.DT_INVALID
if hasattr(n, 'dtype'):
dtype = convert_dtype(n.dtype)
list_of_nodes.append({'name': node_name.name(n),
'op': n.__class__.__name__,
'inputs': inputs,
'attr.shape': attr_shape,
'attr.dtype': dtype})
return list_of_nodes
def make_attr(shape, dtype):
dim_list = [TensorShapeProto.Dim(size=s) for s in shape]
if len(dim_list) == 0:
return None
return {'shape': AttrValue(shape=TensorShapeProto(dim=dim_list)),
'dtype': AttrValue(type=dtype)}
def graph(lastVar):
nodes = []
list_of_nodes = make_list_of_nodes(lastVar)
for node in list_of_nodes:
nodes.append(NodeDef(name=node['name'], op=node['op'],
input=node['inputs'],
attr=make_attr(node['attr.shape'], node['attr.dtype'])))
return GraphDef(node=nodes, versions=VersionDef(producer=22))
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.core.util_rules.external_tool import TemplatedExternalTool
class GrpcPythonPlugin(TemplatedExternalTool):
options_scope = "grpc-python-plugin"
help = "The gRPC Protobuf plugin for Python."
default_version = "1.32.0"
default_known_versions = [
"1.32.0|macos_arm64 |b2db586656463841aa2fd4aab34fb6bd3ef887b522d80e4f2f292146c357f533|6215304",
"1.32.0|macos_x86_64|b2db586656463841aa2fd4aab34fb6bd3ef887b522d80e4f2f292146c357f533|6215304",
"1.32.0|linux_arm64 |9365e728c603d64735963074340994245d324712344f63557ef3630864dd9f52|5233664",
"1.32.0|linux_x86_64|1af99df9bf733c17a75cbe379f3f9d9ff1627d8a8035ea057c3c78575afe1687|4965728",
]
default_url_template = (
"https://binaries.pantsbuild.org/bin/grpc_python_plugin/{version}/"
"{platform}/grpc_python_plugin"
)
default_url_platform_mapping = {
"macos_arm64": "macos/x86_64", # TODO: Build for arm64.
"macos_x86_64": "macos/x86_64",
"linux_arm64": "linux/arm64",
"linux_x86_64": "linux/x86_64",
}
|
# Generated by Django 2.1.3 on 2019-02-08 18:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogapp', '0002_com'),
]
operations = [
migrations.CreateModel(
name='tegory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.RemoveField(
model_name='com',
name='post',
),
migrations.DeleteModel(
name='com',
),
]
|
from selenium import webdriver
import pytest
from selenium.webdriver.common.by import By
@pytest.yield_fixture()
def driver():
_driver = webdriver.Chrome()
yield _driver
_driver.quit()
def login(driver, username, password):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").send_keys(username)
driver.find_element_by_name("password").send_keys(password)
driver.find_element_by_name("login").click()
def test_home_task_14(driver):
login(driver, username="admin", password="admin")
driver.find_element(By.XPATH, "//span[@class='name' and contains(., 'Catalog')]").click()
driver.find_element(By.XPATH, "//table[@class='dataTable']//a[contains(., 'Rubber Ducks')]").click()
ducks = driver.find_elements(By.XPATH, "//table[@class='dataTable']//a[@href[contains(., 'product_id')] and "
"contains(., 'Duck')]")
products_list = []
for duck in ducks:
products_list.append(duck.get_attribute("innerText"))
for product in products_list:
driver.find_element(By.XPATH, "//table[@class='dataTable']//a[text()='%s']" % product).click()
message = driver.get_log("browser")
print(message)
driver.find_element(By.XPATH, "//button[@type='button' and @name='cancel']").click()
|
import re
import string
import pandas as pd
data = pd.read_excel('sampledata.xlsx')
data.columns = ['Text']
print(data.head())
print(data.shape)
'''
Clean data
'''
from eunjeon import Mecab
from konlpy.tag import Okt
def preprocword(text):
def clean_text(text):
text = text.replace(".", " ").strip()
text = text.replace("·", " ").strip()
pattern = '[a-zA-Z0-9]'
text = re.sub(pattern=pattern, repl='', string=text)
pattern = '[-=+,#/\:$.@*\"※&%ㆍ』\\‘|\(\)\[\]\<\>`\'…》▲▶△“’_♥■]'
text = re.sub(pattern=pattern, repl='', string=text)
return text
def delete(keyword):
keyword = deleteW(keyword, "!")
keyword = deleteW(keyword, "?")
keyword = deleteW(keyword, "!?")
keyword = deleteW(keyword, "?!")
keyword = deleteW(keyword, ";")
keyword = deleteW(keyword, "~")
keyword = dltdot(keyword)
keyword = clean_text(keyword)
return keyword
def deleteW(keyword, delword):
while 1:
if delword + delword in keyword:
# print("변경 전: " + keyword)
keyword = keyword.replace(delword + delword, delword)
# print("변경 후: " + keyword)
else:
break;
return keyword
def dltdot(keyword):
while 1:
if "…" in keyword:
# print("변경 전: " + keyword)
keyword = keyword.replace("…", "..")
# print("변경 후: " + keyword)
else:
break;
while 1:
if "..." in keyword:
# print("변경 전: " + keyword)
keyword = keyword.replace("...", "..")
# print("변경 후: " + keyword)
else:
break;
return keyword
keyword = text # <-원문 넣을 곳
mecab = Okt()
keyword = delete(str(keyword))
return mecab.morphs(keyword)
data['tokens'] = data['Text'].apply(lambda x: preprocword(x))
data['Text_Final'] = [' '.join(sen) for sen in data['tokens']]
data = data[['Text_Final', 'tokens']]
print(data.head(10))
#
# def remove_punct(text):
# text_nopunct = ''
# text_nopunct = re.sub('['+string.punctuation+']', '', text)
# return text_nopunct
#
# data['Text_Clean'] = data['Text'].apply(lambda x: remove_punct(x))
#from nltk import word_tokenize, WordNetLemmatizer
'''
split data into test and train
'''
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import load_model
all_test_words = [word for tokens in data["tokens"] for word in tokens]
test_sentence_lengths = [len(tokens) for tokens in data["tokens"]]
TEST_VOCAB = sorted(list(set(all_test_words)))
print(len(TEST_VOCAB))
print("%s words total, with a vocabulary size of %s" % (len(all_test_words), len(TEST_VOCAB)))
print("Max sentence length is %s" % max(test_sentence_lengths))
MAX_SEQUENCE_LENGTH = max(test_sentence_lengths)
tokenizer = Tokenizer(num_words=len(TEST_VOCAB))
tokenizer.fit_on_texts(data["Text_Final"].tolist())
test_sequences = tokenizer.texts_to_sequences(data["Text_Final"].tolist())
test_cnn_data = pad_sequences(test_sequences, maxlen=MAX_SEQUENCE_LENGTH)
for i in test_cnn_data:
print(i)
'''
Test CNN
'''
model = load_model("best_model10.h5")
predictions = model.predict(test_cnn_data)
labels = [1,0]
predicton_labels=[]
for p in predictions:
if np.argmax(p) == 1:
print("긍정")
else:
print("부정")
# print(sum(data.Label == predicton_labels)/len(predicton_labels))
#
# print(data.Label.value_counts())
|
#!/usr/bin/env python3
import rospy
import numpy as np
from sensor_msgs.msg import Image,CameraInfo
from cv_bridge import CvBridge, CvBridgeError
import tf
from std_msgs.msg import String,Int64MultiArray
from geometry_msgs.msg import *
from vision_msgs.msg import Detection2DArray
def same_object(labels,Object):
count = 0
for i in labels:
if abs(Object[0].pose.position.x-i[0].pose.position.x)<=0.05 and abs(Object[0].pose.position.y-i[0].pose.position.y)<=0.05:
# print("Object ", Object[2]," is very close to Object ", id)
# final_labels[count] = Object
return False
count+=1
return True
def objects_callback(data):
global boundingboxes
boundingboxes = data
def image_info_callback(data):
global K
K = 1/data.K[4]
def get_depth(img,x,y,cx,cy,fx,fy):
center_x , center_y = cx,cy
#print(type(img))
unit_scaling = 1
constant_x = unit_scaling/fx
constant_y = unit_scaling/fy
depth = img[int(y)][int(x)]
#print(depth)
return [ (x - center_x)*depth*constant_x ,(y - center_y)*depth*constant_y , depth*unit_scaling]
def depth_image_callback(data):
global K,boundingboxes,labels,check,L,count,final_labels,detect_time
if(check=="Detect"):
print("If of camera callback")
Lock = True
bridge = CvBridge()
try:
img = bridge.imgmsg_to_cv2(data, "32FC1")
except CvBridgeError as e:
print(e)
if(len(boundingboxes.detections)):
for i in range(len(boundingboxes.detections)):
id = i
totalSampledX,totalSampledY,totalSampledCenter=0,0,0
box_size_x,box_size_y = boundingboxes.detections[i].bbox.size_x,boundingboxes.detections[i].bbox.size_y
center = Point()
xAxis = Point()
yAxis = Point()
center.x = boundingboxes.detections[i].bbox.center.x
center.y = boundingboxes.detections[i].bbox.center.y
center3D = get_depth(img,center.x +0.5 , center.y + 0.5, img.shape[1]/2 - 0.5, img.shape[0]/2 -0.5, 1/K,1/K)
for j in range(int(box_size_x)):
for k in range(4):
xAxis.x, xAxis.y = center.x + j ,center.y + k
axisSampledX = get_depth(img,xAxis.x , xAxis.y , img.shape[1]/2 - 0.5, img.shape[0]/2 -0.5, 1/K,1/K)
yAxis.x, yAxis.y = center.x+ k, center.y - j
axisSampledY = get_depth(img,yAxis.x , yAxis.y , img.shape[1]/2 - 0.5, img.shape[0]/2 -0.5, 1/K,1/K)
axisMeanX =[0,0,0]
axisMeanX[0] += axisSampledX[0]
axisMeanX[1] += axisSampledX[1]
axisMeanX[2] += axisSampledX[2]
totalSampledX+=1
axisMeanY =[0,0,0]
axisMeanY[0] += axisSampledY[0]
axisMeanY[1] += axisSampledY[1]
axisMeanY[2] += axisSampledY[2]
totalSampledY+=1
for i in range(len(axisMeanX)):
axisMeanX[i] = axisMeanX[i]/totalSampledX
axisMeanY[i] = axisMeanY[i]/totalSampledY
n_xAxis = Vector3(axisMeanX[0] - center3D[0], axisMeanX[1] - center3D[1], axisMeanX[2] - center3D[2])
n_yAxis = Vector3(axisMeanY[0] - center3D[0], axisMeanY[1] - center3D[1], axisMeanY[2] - center3D[2])
n_zAxis = np.cross([axisMeanX[0] - center3D[0], axisMeanX[1] - center3D[1],
axisMeanX[2] - center3D[2]],[axisMeanY[0] - center3D[0], axisMeanY[1] - center3D[1], axisMeanY[2] - center3D[2]])
n_zAxis =Vector3(n_zAxis[0],n_zAxis[1],n_zAxis[2])
M =[ [n_xAxis.x, n_yAxis.x,n_zAxis.x,0],
[n_xAxis.y, n_yAxis.y, n_zAxis.y,0],
[n_xAxis.z, n_yAxis.z, n_zAxis.z,0],
[ 0, 0, 0, 1 ]]
for a in range(3):
for b in range(3):
M[a][b] = M[a][b]/(max(M[0][a],M[1][a],M[2][a])- min(M[0][a],M[1][a],M[2][a]))
M = np.asarray(M)
q= tf.transformations.quaternion_from_matrix(M)
q*=tf.transformations.quaternion_from_euler(0, -3.14/2,-3.14/2)
q = q/(sum(q**2)**0.5 )
pose_0 = PoseStamped()
pose_0.header.frame_id= "camera_depth_frame"
pose_0.pose.position.x = center3D[0]
pose_0.pose.position.y = center3D[1]
pose_0.pose.position.z = center3D[2]
pose_0.pose.orientation.x = q[0]
pose_0.pose.orientation.y = q[1]
pose_0.pose.orientation.z = q[2]
pose_0.pose.orientation.w = q[3]
tes = tf.TransformListener()
while True:
try :
p_wrt_odom = tes.transformPose("/map", pose_0)
break
except:
continue
print(p_wrt_odom)
# br = tf.TransformBroadcaster()
# br.sendTransform([p_wrt_odom.pose.position.x,p_wrt_odom.pose.position.y,p_wrt_odom.pose.position.z],
# [p_wrt_odom.pose.orientation.x,p_wrt_odom.pose.orientation.y,p_wrt_odom.pose.orientation.z,p_wrt_odom.pose.orientation.w],
# rospy.Time.now(),
# "object_"+str(id+count),
# "odom")
if "object_"+str(id+count) not in labels:
L.append([p_wrt_odom,id +count])
labels.append("object_"+str(id+count))
flag =1
print("Detection:",detect_time)
if detect_time == 5:
pub = rospy.Publisher("/check", String, queue_size=1)
flag_off = String()
flag_off.data = "stop"
pub.publish(flag_off)
flag=0
#detect_time = 0
detect_time +=flag
else:
print("Else of Depth Image Callback",count)
count = len(labels)
pub = rospy.Publisher('id_array', Int64MultiArray, queue_size=1)
ar = Int64MultiArray()
print(labels)
for i in L:
if same_object(final_labels,i):
final_labels.append(i)
print([str(round(i[0].pose.position.x,2))+"_"+str(round(i[0].pose.position.y,2)) for i in final_labels])
for i in final_labels:
br = tf.TransformBroadcaster()
br.sendTransform([i[0].pose.position.x,i[0].pose.position.y,i[0].pose.position.z],
[i[0].pose.orientation.x,i[0].pose.orientation.y,i[0].pose.orientation.z,i[0].pose.orientation.w],
rospy.Time.now(),
"object_"+str(i[1]),
"map")
ar.data.append(i[1])
pub.publish(ar)
def callback(data):
global check,L
check = data.data
def id_callback(data):
print("In ID Callback")
global trans,L2,Lock
if Lock == True:
ids = data.data
tf.Transp
listener = tf.TransformListener()
for i in ids:
while True:
try:
t,r =listener.lookupTransform("/map","object_"+str(i), rospy.Time(0))
break
except:
continue
L2.append([t,r,i])
Lock = False
pub = rospy.Publisher('id_array', Int64MultiArray, queue_size=1)
ar = Int64MultiArray()
for i in L2:
br = tf.TransformBroadcaster()
br.sendTransform(i[0],
i[1],
rospy.Time.now(),
"object_"+str(i[2]),
"odom")
ar.data.append(i[2])
rospy.sleep(0.002)
pub.publish(ar)
def image_callback5(data):
pass
#print(data)
# print("Henlo")
def listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber("/objects", Detection2DArray, objects_callback)
rospy.Subscriber("/check", String, callback)
rospy.Subscriber("/camera/depth/image_rect_raw", Image,depth_image_callback )
rospy.Subscriber("/camera/depth/camera_info",CameraInfo, image_info_callback)
rospy.spin()
if __name__ == '__main__':
global labels,L,count,final_labels, detect_time
detect_time = 0
final_labels =[]
L,count,labels = [], 0 ,[]
listener()
|
'''
@Description:
@Date: 2020-05-06 23:47:09
@Author: Wong Symbol
@LastEditors: Wong Symbol
@LastEditTime: 2020-05-29 20:24:23
'''
'''
区间交集问题
问题描述:快速找出两组区间的交集
'''
def intervalIntersection(A, B):
i,j = 0,0 # 指针
res = [] # 保存结果
while i < len(A) and j < len(B):
a1,a2 = A[i][0],A[i][1]
b1,b2 = B[j][0],B[j][1]
# 两个区间存在交集
if b2 >= a1 and a2 >= b1:
# 计算出交集,保存结果
start = max(a1,b1)
end = min(a2,b2)
res.append([start, end])
# 指针前进
if b2 < a2:
j += 1
else:
i += 1
return res
A = [[0,2],[5,10],[13,23],[24,25]]
B = [[1,5],[8,12],[15,24],[25,26]]
print(intervalIntersection(A, B))
|
# auth : wdt0818@naver.com, bluehdh0926@gmail.com
#
import os
class PathSearcher():
def __init__(self):
self.setPath(os.environ['HOMEDRIVE'] + os.environ['HOMEPATH'] + "\AppData\Local\Packages")
self.is_find = False
self.findPath = ""
self.debug = False
def setPath(self, path):
self.default_path = path
def getPath(self):
return self.default_path
def getFindPath(self):
return self.searchFileList
def getFindDir(self):
self.search(self.default_path)
return self.findDir
def run(self, file, extension='', drive="c", detailPath = ''):
return self.fileSearch(file=file, extension= extension, drive=drive, detailPath=detailPath)
def search(self, path):
try:
for _dir in os.listdir(path):
fullPath = os.path.join(os.path.abspath(path), _dir)
if os.path.isdir(fullPath) and fullPath.find('MicrosoftStickyNotes_') != -1: self.detailSearch(fullPath)
except Exception as e:
print("{0} search, check this {0}".format(__file__, e))
pass
if not self.is_find: self.reSearch()
def detailSearch(self, rootPath):
for (path, dirname, files) in os.walk(rootPath):
for f in files:
fullPath = path + '/' + f
if fullPath.find("plum.sqlite") != -1:
self.findDir = path
self.findPath = fullPath
self.is_find = True
def reSearch(self):
self.is_find = False
print("can't reSearch")
pass
def fileSearch(self, file, extension='', drive="c", detailPath = ''):
self.file = file
self.extension = extension
self.searchFileList = []
if detailPath == '':
self.detailPath = drive + ":/"
else:
self.detailPath = detailPath
if self.extension is '':
for (path, dirname, files) in os.walk(self.detailPath):
for filename in files:
self.filename = os.path.splitext(filename)[0]
if self.filename == self.file:
if self.debug: print("%s\%s" % (path, filename))
self.searchFileList.append(path + "\\" + filename)
else:
for (path, dirname, files) in os.walk(drive + ":/"):
for filename in files:
self.pullfilename = self.file + "." + self.extension
if filename == self.pullfilename:
if self.debug: print("%s\%s" % (path, filename))
self.searchFileList.append(path + "\\" + filename)
return self.searchFileList
def dirSearch(self, dir, drive="c"):
self.dir = dir.upper()
self.searchDirList = []
for (path, dirnames, files) in os.walk(drive + ":/"):
for dirname in dirnames:
if dirname.upper() == self.dir:
if self.debug: print("%s\%s" % (path, dirname))
self.searchDirList.append(path + "\\" + dirname)
return self.searchDirList
if __name__ == '__main__':
proc = PathSearcher()
# proc.dirSearch(dir="syncn")
print(proc.run(file="plum", detailPath="c:/Users\jis\AppData\Local\Packages"))
# target = proc.run()
# print(target)
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################################
# #
# extract_gyro_data.py: find gyro drift during the grating movements #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 09, 2021 #
# #
#################################################################################################
import os
import sys
import re
import string
import math
import numpy
import unittest
import time
import unittest
from datetime import datetime
from time import gmtime, strftime, localtime
import Chandra.Time
import Ska.engarchive.fetch as fetch
import scipy
from scipy.optimize import curve_fit
#
#--- plotting routine
#
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
#
#--- reading directory list
#
path = '/data/mta/Script/Gyro/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import several functions
#
import mta_common_functions as mcf #---- contains other functions commonly used in MTA scripts
#
#--- temp writing file name
#
rtail = int(time.time())
zspace = '/tmp/zspace' + str(rtail)
#
#--- some data
#
catg_list = ['roll', 'pitch', 'yaw']
grating_data = "/data/mta_www/mta_otg/OTG_filtered.rdb"
#---------------------------------------------------------------------------------------
#-- extract_gyro_data: find gyro drift during the grating movements --
#---------------------------------------------------------------------------------------
def extract_gyro_data():
"""
find gyro drift during the grating movements
input: none
output: plots, table, and html
"""
#
#--- find the last entry date
#
l_time = find_last_entry()
#
#--- find unprocessed data
#
gout = find_grating_insr_retr(l_time)
#
#--- go through each data
#
for k in range(0, len(gout[0])):
#for k in range(0, 5):
action = gout[0][k]
grating = gout[1][k]
start = gout[2][k]
stop = gout[3][k]
print(action + ' : ' + grating + ' : ' + str(start) + ' : ' + str(stop))
#
#--- extract data; fout contains data of roll, pitch, and yaw (time and value)
#
fout = extract_each_drift_data(start, stop)
if len(fout[0][0]) == 0:
continue
for k in range(0, 3) :
#
#--- fit polinomial to the drift data
#
dout = fit_polinom_to_center(fout[k], start, stop, action, grating, catg_list[k])
if dout == False:
break
[estimates, diff_data, f_time, sec1, sec2, sec3] = dout
#
#--- update data table
#
update_table(sec1, sec2, sec3, action, grating, catg_list[k], start, stop)
#
#--- create drift data plot
#
plot_drift(fout[k], f_time, estimates, start, stop, action, grating, catg_list[k])
#
#--- create deviation data plot
#
plot_dev(f_time, diff_data, action, grating, catg_list[k], start, stop)
#---------------------------------------------------------------------------------------
#-- find_grating_insr_retr: find time when the grating motion happened --
#---------------------------------------------------------------------------------------
def find_grating_insr_retr(l_time):
"""
find time when the grating motion happened
input: none, but read from /data/mta_www/mta_otg/OTG_filtered.rdb
output: action --- a list of movement
grating --- a lit of grating
tstart --- a list of starting time in seconds from 1998.1.1
tstop --- a list of stopping time in seconds from 1998.1.1
"""
#
#--- read grating motion
#
gdata = read_data_file(grating_data)
gdata = gdata[2:]
action = []
grating = []
tstart = []
tstop = []
for ent in gdata:
atemp = re.split('\s+', ent)
#
#--- only new observations are kept
#
start = convert_to_stime(atemp[2])
if start < l_time:
continue
#
#--- only the data show the movement are used
#
if atemp[0] in ('INSR', 'RETR'):
action.append(atemp[0])
grating.append(atemp[1])
tstart.append(start)
tstop.append(convert_to_stime(atemp[4]))
return [action, grating, tstart, tstop]
#---------------------------------------------------------------------------------------
#-- extract_each_drift_data: extract roll, pitch, yaw data around movement --
#---------------------------------------------------------------------------------------
def extract_each_drift_data(start, stop):
"""
extract roll, pitch, yaw data around movement
input: start --- starting time in seconds from 1998.1.1
stop --- ending time in seconds from 1998.1.1
output: roll --- a list of arrays of time and data; roll
pitch --- a list of arrays of time and data; pitch
yaw --- a list of arrays of time and data; yaw
"""
#
#--- find a mid point and the range
#
mid = 0.5 * (stop + start)
diff = stop - start
dstart = start - diff
dstop = stop + diff
#
#--- extract data from ska database
#
roll = get_data_from_ska('AOGBIAS1', dstart, dstop)
pitch = get_data_from_ska('AOGBIAS2', dstart, dstop)
yaw = get_data_from_ska('AOGBIAS3', dstart, dstop)
roll = [roll[0] - mid, roll[1] * 1.0e8]
pitch = [pitch[0] - mid, pitch[1] * 1.0e8]
yaw = [yaw[0] - mid, yaw[1] * 1.0e8]
return [roll, pitch, yaw]
#---------------------------------------------------------------------------------------
#-- fit_polinom_to_center: fitting a 5th degree polinomial to the data and find differences
#---------------------------------------------------------------------------------------
def fit_polinom_to_center(fdata, start, stop, action, grating, catg):
"""
fitting a 5th degree polinomial to the data and find differences
input: fdata --- a list of arrays of time and data
start --- starting time in seconds from 1998.1.1
stop --- stopping time in seconds from 1998.1.1
action --- movement
grating --- grating
cag --- catogry (roll/pitch/yaw)
output: estimates --- a list of model fitted data
diff_data --- a list of difference between themodel and the data
f_time --- a list of time for the selected data period
sec1,sec2,sec3 --- list of [avg, std] of three sections
"""
#
#--- set fitting range
#
tdiff = stop - start
dstart = -1.5 * tdiff
dstop = 1.5 * tdiff
#
#--- limit data to the fitting range
#
t_array = fdata[0]
v_array = fdata[1]
index = (t_array > dstart) & (t_array < dstop)
f_time = t_array[index]
f_data = v_array[index]
#
#--- fit the data
#
paraminitial = [0.0 for i in range(0, 5)]
try:
popt, pcov = curve_fit(p_model, f_time, f_data, p0=paraminitial)
except:
print("Something wrong with curve fitting")
return False
#
#--- create lists of data of estimated fitting and deviations
#
[estimates, diff_data] = compute_difference(f_time, f_data, popt)
#
#--- find avg and std of three sections
#
[sec1, sec2, sec3] = compute_avg_of_three_sections(f_time, diff_data, start, stop)
#
#--- write out the polinomial fitting result
#
write_poli_fitting_result(popt, start, stop, action, grating, catg)
return [estimates, diff_data, f_time, sec1, sec2, sec3]
#---------------------------------------------------------------------------------------
#-- compute_difference: create model fitting results and a list of difference from the data
#---------------------------------------------------------------------------------------
def compute_difference(f_time, f_data, popt):
"""
create model fitting results and a list of difference from the data
input: f_time --- an array of time data
f_data --- an array of data
popt --- a list of parameters
output: estimates --- a list of model fitted data
diff_data --- a list of difference between themodel and the data
"""
[a0, a1, a2, a3, a4] = popt
estimates = []
diff_data = []
for k in range(0, len(f_time)):
est = p_model(f_time[k], a0, a1, a2, a3, a4)
diff = f_data[k] - est
estimates.append(est)
diff_data.append(diff * 1.0e2)
return [estimates, diff_data]
#---------------------------------------------------------------------------------------
#-- compute_avg_of_three_sections: compute avg and std of three sctions --
#---------------------------------------------------------------------------------------
def compute_avg_of_three_sections(f_time, diff_data, start, stop):
"""
compute avg and std of three sctions
input: f_time --- an array of time data
diff_data --- an array of data
start --- starting time in seconds from 1998.1.1
stop --- stopping time in seconds from 1998.1.1
output: [[<avg>, <std>], ...] --- three section avg and std
"""
hdiff = 0.5 * (stop - start)
dstart = -hdiff
dstop = hdiff
index = f_time < dstart
out1 = select_and_avg(index, diff_data)
index = (f_time >= dstart) & (f_time < dstop)
out2 = select_and_avg(index, diff_data)
index = f_time >= dstop
out3 = select_and_avg(index, diff_data)
return [out1, out2, out3]
#---------------------------------------------------------------------------------------
#-- select_and_avg: compute avg and std --
#---------------------------------------------------------------------------------------
def select_and_avg(index, idata):
"""
compute avg and std
input: index --- a list of indices to select data
idata --- a array of data
output: avg --- avg
std --- std
"""
idata = numpy.array(idata)
selected = idata[index]
avg = numpy.mean(selected)
std = numpy.std(selected)
return [avg, std]
#---------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------
def p_model(x, a0, a1, a2, a3, a4):
out = a0 + a1 * x + a2 * x**2 + a3 * x**3 + a4 * x**4
return out
#---------------------------------------------------------------------------------------
#-- write_poli_fitting_result: printing out polinomial parameters --
#---------------------------------------------------------------------------------------
def write_poli_fitting_result(params, start, stop, action, grating, catg):
"""
printing out polinomial parameters
input: params --- polinomial fitting results (with params to get fitting parameters)
start --- starting time in seconds from 1998.1.1
stop --- stopping time in seconds from 1998.1.1
action --- movement
grating --- grating
catg --- category
output: <data_dir>/Polinomial_results/<GRATING>_<ACTION>/<time>_pol_fit_result
"""
line = str(int(0.5 * (start+ stop)))
for ent in params:
line = line + "\t%2.4e" % ent
line = line + '\n'
#
#--- create output directory if it does not exist
#
out = data_dir + 'Polinomial_results/'
cmd = 'mkdir -p ' + out
os.system(cmd)
#
#--- print the result
#
out = out + 'poli_fit_' + catg.lower() + '_' + grating.lower() + '_' + action.lower()
with open(out, 'a') as fo:
fo.write(line)
#---------------------------------------------------------------------------------------
#-- convert_to_stime: convert frational year to Chandra time --
#---------------------------------------------------------------------------------------
def convert_to_stime(tent):
"""
convert frational year to Chandra time
input: tent --- time in frational year
output: out --- time in seconds from 1998.1.1
"""
atemp = re.split('\.', str(tent))
tv = atemp[0]
year = tv[0] + tv[1] + tv[2] + tv[3]
fday = tv[4] + tv[5] + tv[6]
stime = str(year) + ':' + str(fday) + ':00:00:00'
sv = float('0.' + atemp[1]) * 86400.0
stime = Chandra.Time.DateTime(stime).secs + sv
return stime
#---------------------------------------------------------------------------------------
#-- update_table: update data table --
#---------------------------------------------------------------------------------------
def update_table(sec1, sec2, sec3, action, grating, catg, start, stop):
"""
update data table
input: sec1 --- [avg, std] of before the movement
sec2 --- [avg, std] of during the movement
sec3 --- [avg, std] of after the movement
action --- movement
grating --- grating
catg --- category
start --- start time in seconds from 1998.1.1
stop --- stop time in seconds from 1998.1.1
output: updated <data_dir>/gyro_drift_<catg>
"""
line = str(int(0.5 * (start + stop))) + '\t'
line = line + '%2.3f+/-%2.3f\t' % (sec1[0], sec1[1])
line = line + '%2.3f+/-%2.3f\t' % (sec2[0], sec2[1])
line = line + '%2.3f+/-%2.3f\t' % (sec3[0], sec3[1])
line = line + '%2.3f\t' % comp_ratio(sec1[1], sec2[1])
line = line + '%2.3f\t' % comp_ratio(sec3[1], sec2[1])
line = line + '%2.3f\t' % comp_ratio(sec1[1], sec3[1])
#line = line + grating + '\t' + action + '\t'
line = line + '%3.1f' % (stop - start)
line = line + '\n'
outfile = data_dir + 'gyro_drift_' + catg + '_' +grating.lower() + '_' + action.lower()
if os.path.isfile(outfile):
with open(outfile, 'a') as fo:
fo.write(line)
else:
with open(outfile, 'w') as fo:
fo.write("#time before during after ")
fo.write("b/d a/d b/a duration\n")
fo.write(line)
#---------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------
def comp_ratio(a, b):
if b == 0:
return 0.0
else:
return a / b
#---------------------------------------------------------------------------------------
#-- plot_drift: plotting gyro drift data --
#---------------------------------------------------------------------------------------
def plot_drift(data_set, f_time, estimates, start, stop, action, grating, catg):
"""
plotting gyro drift data
input: ftime --- a list of time
vdata --- y data
action --- movement
grating --- grating
catg --- category
start --- start time in seconds from 1998.1.1
stop --- stop time in seconds from 1998.1.1
output: <web_dir>/Individual_plots/<GRATING>_<ACTION>/<time>/gyro_drift_<catg>.png
"""
#
#--- center the time to midlle
#
ctime = 0.5 * (start + stop)
diff = stop - start
dstart = start - ctime
dstop = stop - ctime
tarray = data_set[0]
darray = data_set[1]
#
#--- set sizes
#
fsize = 18
color = 'blue'
color2 = 'green'
color3 = 'red'
marker = '.'
psize = 8
lw = 3
width = 10.0
height = 7.0
resolution = 200
[xmin, xmax, ymin, ymax] = set_range(dstart, dstop, data_set[1])
#
#--- output file name
#
outdir = web_dir + 'Individual_plots/' + grating.upper()
outdir = outdir + '_' + action.upper() + '/' + str(int(ctime)) + '/'
cmd = 'mkdir -p ' + outdir
os.system(cmd)
outname = outdir + 'gyro_drift_' + catg + '.png'
#
#--- close everything opened before
#
plt.close('all')
#
#--- set font size
#
mpl.rcParams['font.size'] = fsize
props = font_manager.FontProperties(size=fsize)
#
#--- set plotting range
#
ax = plt.subplot(111)
ax.set_autoscale_on(False)
ax.set_xbound(xmin,xmax)
ax.set_xlim(xmin=xmin, xmax=xmax, auto=False)
ax.set_ylim(ymin=ymin, ymax=ymax, auto=False)
#
#--- plot data
#
plt.plot(tarray, darray, color=color, marker=marker, markersize=psize, lw=0)
#
#--- plot fitted data
#
plt.plot(f_time, estimates, color=color2, marker=marker, markersize=0, lw=5, alpha=0.6)
#
#--- add start and stop lines
#
plt.plot([0, 0],[ymin, ymax], color='black', marker=marker, markersize=0, linestyle='--', lw=2)
plt.plot([xmin, xmax],[0, 0], color='black', marker=marker, markersize=0, linestyle='--', lw=2)
plt.plot([dstart, dstart],[ymin, ymax], color=color3, marker=marker, markersize=0, lw=4, alpha=0.3)
plt.plot([dstop, dstop], [ymin, ymax], color=color3, marker=marker, markersize=0, lw=4, alpha=0.3)
#
#--- add label
#
plt.xlabel('Time (sec)')
plt.ylabel(catg.capitalize())
#
#--- save the plot in png format
#
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(width, height)
plt.tight_layout()
plt.savefig(outname, format='png', dpi=resolution)
plt.close('all')
#---------------------------------------------------------------------------------------
#-- plot_dev: plotting deviation data --
#---------------------------------------------------------------------------------------
def plot_dev(ftime, vdata, action, grating, catg, start, stop):
"""
plotting deviation data
input: ftime --- a list of time
vdata --- y data
action --- movement
grating --- grating
catg --- category
start --- start time in seconds from 1998.1.1
stop --- stop time in seconds from 1998.1.1
output: <web_dir>/Individual_plots/<GRATING>_<ACTION>/<time>/deviation_<catg>.png
"""
#
#--- center the time to midlle
#
ctime = 0.5 * (start + stop)
dstart = start - ctime
dstop = stop - ctime
#
#--- set sizes
#
fsize = 18
color = 'blue'
color2 = 'green'
color3 = 'red'
marker = '.'
psize = 8
lw = 3
width = 10.0
height = 7.0
resolution = 200
[xmin, xmax, ymin, ymax] = set_range(dstart, dstop, vdata, chk =1)
if ymax > abs(ymin):
ymin = -1.0 * ymax
else:
ymax = abs(ymin)
ymin = - ymax
#
#--- output file name
#
outdir = web_dir + 'Individual_plots/' + grating.upper() + '_'
outdir = outdir + action.upper() + '/' + str(int(ctime)) + '/'
cmd = 'mkdir -p ' + outdir
os.system(cmd)
outname = outdir + 'deviation_' + catg + '.png'
#
#--- close everything opened before
#
plt.close('all')
#
#--- set font size
#
mpl.rcParams['font.size'] = fsize
props = font_manager.FontProperties(size=fsize)
#
#--- set plotting range
#
ax = plt.subplot(111)
ax.set_autoscale_on(False)
ax.set_xbound(xmin,xmax)
ax.set_xlim(xmin=xmin, xmax=xmax, auto=False)
ax.set_ylim(ymin=ymin, ymax=ymax, auto=False)
#
#--- plot data
#
plt.plot(ftime, vdata, color=color, marker=marker, markersize=psize, lw=0)
#
#--- add start and stop lines
#
plt.plot([0,0 ],[ymin, ymax], color='black', marker=marker, markersize=0, linestyle='--', lw=2)
plt.plot([xmin, xmax],[0, 0], color='black', marker=marker, markersize=0, linestyle='--', lw=2)
plt.plot([dstart, dstart],[ymin, ymax], color=color3, marker=marker, markersize=0, lw=4, alpha=0.3)
plt.plot([dstop, dstop], [ymin, ymax], color=color3, marker=marker, markersize=0, lw=4, alpha=0.3)
#
#--- add label
#
plt.xlabel('Time (sec)')
plt.ylabel(catg.capitalize())
#
#--- save the plot in png format
#
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(width, height)
plt.tight_layout()
plt.savefig(outname, format='png', dpi=resolution)
plt.close('all')
#---------------------------------------------------------------------------------------
#-- set_range: set plotting range ---
#---------------------------------------------------------------------------------------
def set_range(start, stop, ydata, chk=0):
"""
set plotting range
input: start --- start time in seconds from 1998.1.1
stop --- stop time in seconds from 1998.1.1
ydata --- ydata
chk --- if chk > 0; take the range to 3 times of the
stop - start range
output: [xmin, xmax, ymin, ymax]
"""
tdiff = stop - start
cent = 0.5 * (start + stop)
xmin = start - 1.2 * tdiff
xmax = stop + 1.2 * tdiff
ymin = min(ydata)
ymax = max(ydata)
diff = ymax - ymin
if diff == 0:
yavg = numpy.mean(ydata)
ymin = yavg - 0.1
ymax = yavg + 0.1
else:
ymin -= 0.1 * diff
ymax += 0.1 * diff
return [xmin, xmax, ymin, ymax]
#---------------------------------------------------------------------------------------
#-- create_drift_html_page: create a web page for a given plot group --
#---------------------------------------------------------------------------------------
def create_drift_html_page(start, stop, grating, action, savef):
"""
create a web page for a given plot group
input: start --- movement starting time in seconds from 1998.1.1
stop --- movement stopping time in seconds from 1998.1.1
grating --- grating
action --- movement direction (inser or reter)
savef --- a list of lists of fitting results of three sections of three categories
output: <web_dir>/Individual_plots/<GRATING>_<ACTION>/<time in seconds>/<gating>_<action>_<catg>_<time>.html
"""
#
#--- find mid time
#
stime = int(0.5 * (start+ stop))
ltime = Chandra.Time.DateTime(stime).date
#
#--- set the directory which keeps plots
#
outdir = web_dir + 'Individual_plots/' + grating.upper() + '_' + action.upper() + '/'
#
#--- create table of plots
#
line = '<table border=0>\n'
for k in range(0, 3):
#
#--- create stat results table for the html page
#
results = create_result_table(savef[k])
#
#--- category of this data
#
catg = catg_list[k]
#
#--- names of two plots for this category
#
outname1 = outdir + str(stime) + '/' + 'gyro_drift_' + grating + '_' + action + '_' + catg +' _'\
+ str(int(0.5 * (start + stop))) + '.png'
outname2 = outdir + str(stime) + '/' + 'deviation_' + grating + '_' + action + '_' + catg + '_'\
+ str(int(0.5 * (start + stop))) + '.png'
line = line + '<tr>\n'
line = line + '<th><img src="' + outname1 + '" width=40%"></th>\n'
line = line + '<th><img src="' + outname2 + '" width=40%"></th>\n'
line = line + '<td>' + results + '</td>\n'
line = line + '</tr>\n'
line = line + '</table>'
#
#--- read the template
#
tname = house_keeping + 'drift_plot_template'
f = opne(tnane , 'r')
template = f.read()
f.close()
#
#--- insert the data
#
template = template.replace('#GRAT#', grating)
template = template.replace('#ACT#', action)
template = template.replace('#STIME#', mtime)
template = template.replace('#LTIME#', ltime)
template = template.replace('#TABLE#', line)
#
#--- output file name
#
outdir = web_dir + 'Individual_plots/' + grating.upper() + '_' + action.upper() + '/'
cmd = 'mkdir -p ' + outdir
os.system(cmd)
outname = outdir + grating + '_' + action + '_' + catg + '_' + str(int(0.5 * (start + stop))) + '.html'
#
#--- print out the result
#
fo = open(outname, 'w')
fo.wirte(template)
fo.close()
#---------------------------------------------------------------------------------------
#-- create_result_table: create the result table --
#---------------------------------------------------------------------------------------
def create_result_table(data):
"""
create the result table
input: data --- a list of list of fitting results
[[<before avg>, <before std>], [<during avg>, <during std>], [..., ...]]
output: line --- a html element
"""
#
#--- before the movement
#
bavg = data[0][0]
bstd = data[0][1]
#
#--- during the movement
#
mavg = data[1][0]
mstd = data[1][1]
#
#--- after the movement
#
aavg = data[2][0]
astd = data[2][1]
#
#--- create data table
#
line = '<ul>\n'
line = line + '<li>Before: %2.3f+/-%2.3f' % (bavg, bstd) + '</li>\n'
line = line + '<li>During: %2.3f+/-%2.3f' % (mavg, mstd) + '</li>\n'
line = line + '<li>After: %2.3f+/-%2.3f' % (aavg, astd) + '</li>\n'
line = line + '<li>Before/During: %2.3f' % comp_ratio(bstd, mstd) + '</li>\n'
line = line + '<li>After/During: %2.3f' % comp_ratio(astd, mstd) + '</li>\n'
line = line + '<li>Before/After: %2.3f' % comp_ratio(astd, bstd) + '</li>\n'
line = line + '</ul>\n'
return line
#---------------------------------------------------------------------------------------
#-- find_last_entry: find the last entry date --
#---------------------------------------------------------------------------------------
def find_last_entry():
"""
find the last entry date
input: none but read from <data_dir>/gyro_drift_yaw
output: ltime --- time in seconds from 1998.1.
"""
save = []
for tail in ['hetg_insr', 'hetg_retr', 'letg_insr', 'letg_retr']:
try:
ifile = data_dir + 'gyro_drift_yaw_' + tail
data = read_data_file(ifile)
atemp = re.split('\s+', data[-1])
ltime = float(atemp[0])
except:
ltime = 0
save.append(ltime)
ltime = max(save)
return ltime
#---------------------------------------------------------------------------------------
#-- get_data_from_ska: extract data from ska database --
#---------------------------------------------------------------------------------------
def get_data_from_ska(msid, tstart, tstop):
"""
extract data from ska database
input: msid --- msid
tstart --- starting time in seconds from 1998.1.1
tstop --- stopping time in seconds from 1998.1.1
output: time --- a list of time
data --- a list of data
"""
out = fetch.MSID(msid, tstart, tstop)
time = out.times
data = out.vals
return [time, data]
#---------------------------------------------------------------------------------------
#-- read_data_file: read a data file --
#---------------------------------------------------------------------------------------
def read_data_file(ifile, spliter = '', remove=0, skip=''):
"""
read a data file
input: infile --- input file name
spliter --- if you want to a list of lists of data, provide spliter, e.g.'\t+'
remove --- the indicator of whether you want to remove the data after read it. default=0: no
skip --- whether skip the line if marked. default:'' --- don't skip
output: data --- either a list of data lines or a list of lists
"""
try:
f = open(ifile, 'r')
data = [line.strip() for line in f.readlines()]
f.close()
except:
return []
if remove > 0:
mcf.rm_files(ifile)
if spliter != '':
atemp = re.split(spliter, data[0])
alen = len(atemp)
save = []
for k in range(0, alen):
save.append([])
for ent in data:
if skip != '':
if ent[0] == skip:
continue
atemp = re.split(spliter, ent)
for k in range(0, alen):
try:
val = float(atemp[k])
except:
val = atemp[k].strip()
save[k].append(val)
return save
else:
return data
#---------------------------------------------------------------------------------------
#-- TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST --
#---------------------------------------------------------------------------------------
class TestFunctions(unittest.TestCase):
"""
testing functions
"""
#---------------------------------------------------------------------------------------
def test_find_grating_insr_retr(self):
[action, grating, tstart, tstop] = find_grating_insr_retr(0)
k = 100
print(str(action[k]) + '<-->' + str(grating[k]) + '<-->' + str(tstart[k]) + '<-->' + str(tstop[k]))
#---------------------------------------------------------------------------------------
def test_extract_each_drift_data(self):
begin = 646978280
end = 646978300
out = extract_each_drift_data(begin, end)
print(str(out[0][0][:10]))
#---------------------------------------------------------------------------------------
def test_fit_polinom_to_center(self):
begin = 646978280
end = 646978300
out = extract_each_drift_data(begin, end)
start = 646978291
stop = 646978314
[estimates, diff_data, f_time, sec1, sec2, sec3] = fit_polinom_to_center(out[0], start, stop)
print(str(estimates[:10]))
print(str(diff_data[:10]))
print(str(sec1))
print(str(sec2))
print(str(sec3))
#---------------------------------------------------------------------------------------
def test_convert_to_stime(self):
ttime = "2000160.04125300"
stime = convert_to_stime(ttime)
print("TIME FORMAT CONVERSION: " + str(ttime) + '<--->' + str(stime))
#---------------------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) >= 2:
if sys.argv[1].lower() == 'test':
#
#--TEST TEST TEST TEST TEST TEST ----------------------------
#
sys.argv = [sys.argv[0]]
unittest.main()
#
#-- REGULAR RUN ----------------------------
#
else:
extract_gyro_data()
else:
extract_gyro_data()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# According to:
# http://liangjiabin.com/blog/2015/04/leetcode-best-time-to-buy-and-sell-stock.html
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
days_count = len(prices)
pre_profit = [0] * days_count
post_profit = [0] * days_count
# Get max profit when buy and sell Stock only once in pre ith day.
min_buy = prices[0]
for i in range(1, days_count):
min_buy = min(min_buy, prices[i])
pre_profit[i] = max(pre_profit[i-1], prices[i]-min_buy)
# Get max profit when buy and sell Stock only once in post (n-i) days.
max_sell = prices[-1]
for j in range(days_count-2, -1, -1):
max_sell = max(max_sell, prices[j])
post_profit[j] = max(post_profit[j+1], max_sell-prices[j])
# Find the max profit when buy and sell Stock only twice.
# First in the pre kth day, and second in the post (n-k) days
max_profit = 0
for i in range(days_count):
max_profit = max(max_profit, pre_profit[i] + post_profit[i])
return max_profit
"""
[]
[1,2]
[1,3,5]
[2,8,3,9]
[2,8,3,9,1,2]
[2,8,3,9,1,9]
[6,5,4,3,2,1]
"""
|
from django.test import TestCase
from . import models
class AdsTestCase(TestCase):
def test_ads(self):
for i in range(20):
o = models.Ad(
title="Puppy #{}".format(i + 1),
description="lovely puppy, brown and happy",
posted_by="Udi",
contact_email="udi@10x.org.il",
price=10 + i * 5,
status=models.Ad.Status.NEW if i % 2 else models.Ad.Status.APPROVED
)
o.full_clean()
o.save()
self.assertEquals(models.Ad.objects.count(), 20)
|
import collections
def chain_map():
a = {'a': 'a', 'c': 'c', 'num': 0}
b = {'b': 'b', 'c': 'cc'}
c = {'b': 'bbb', 'c': 'ccc'}
print(a)
a.update(b)
print(a)
a.update(c)
print(a)
print('---- chain map ----')
m = collections.ChainMap(a, b, c)
print(m)
print('--- m.maps --')
print(m.maps)
m.maps.reverse()
print('--- m.maps.reverse()[0] ---')
print(m.maps[0])
m.maps.insert(0, {'c': 'cccc'})
print('--- 0th insert ---')
print(m.maps)
del m.maps[0]
print(m.maps)
print('--- final m ---')
print(m)
print("m['c']: ", m['c'])
def default_dict():
l = ['a', 'a', 'a', 'b', 'b', 'c']
"""
d = {}
for word in l:
d.setdefault(word, 0) # if word doesn't exist, set 0
d[word] += 1
print(d)
"""
d = collections.defaultdict(int)
for word in l:
d[word] += 1
print(d)
d_set = collections.defaultdict(set)
s = [('red', 1), ('blue', 2), ('red', 3), ('blue', 4), ('red', 1), ('blue', 4)]
for k, v in s:
d_set[k].add(v)
print(d_set)
if __name__ in '__main__':
# chain_map()
default_dict()
|
#Variables
matriz = []
#Funciones
def mat(n):
for i in range (n):
matriz.append([])
for j in range (n):
matriz[i].append(0)
return matriz
def llenar(n):
matriz = mat(n)
for x in range (n):
for y in range (n):
matriz[x][y] = float(input('Valor de [' + str(x) + '][' + str(y) + '] = '))
def gauss(n):
for z in range (n-1):
for x in range(1, n-z):
if (matriz[z][z] != 0 ):
p = matriz[x+z][z] / matriz[z][z]
for y in range (n):
matriz[x+z][y] = matriz[x+z][y] - (matriz[z][y]*p)
def det(n):
deter=1
for x in range (n):
deter=matriz[x][x]*deter
print ('\nEl determinante de la matriz es = ', deter)
def im(n):
print("\nMatriz resultante:")
for i in range (n):
print (" ",matriz[i][:])
#Programa
n = int(input ('Tamano de la matriz : '))
llenar(n)
im(n)
gauss(n)
det(n)
|
from db_patient import Patient
import sendgrid
import os
import datetime
from sendgrid.helpers.mail import *
def existing_beats(patient_id):
"""
checks whether there are existing heart beats for a patient
:param patient_id: integer ID of patient to check if there is beat data
:return: True if patient has recorded heart rates, False if not
"""
for user in Patient.objects.raw({"_id": patient_id}):
patient = user
hr_list = patient.heart_rate
if hr_list == []:
return False
else:
return True
def validate_patient(patient_id):
"""
check whether patient exists in database
:param patient_id: integer ID to look for in database
:return: True if patient exists in database, False if not
"""
for user in Patient.objects.raw({"_id": patient_id}):
patient = user
try:
patient
return True
except NameError:
return False
def email_alert(patient_id):
"""
Send an email to the attending physician of tachycardic patient
:param patient_id: integer ID of tachycardic patient
:return: None
"""
for user in Patient.objects.raw({"_id": patient_id}):
patient = user
attendant_email = patient.attending_email
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
from_email = Email("tachycardic_alert@gmail.com")
to_email = Email(attendant_email)
subject = "Patient Tachycardic Alert"
content = Content("text/plain", "Patient {0}".format(patient_id))
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
return True
def is_tachycardic(patient_id):
"""
check whether a patient is tachycardic
:param patient_id: integer ID of patient to check tachycardia status
:return: True if patient is tachycardic, False if patient is not
"""
for user in Patient.objects.raw({"_id": patient_id}):
patient = user
age = patient.user_age
heart_rate_list = patient.heart_rate
heart_rate = heart_rate_list[-1]
tachycardic = False
if age < 1:
return False
elif age < 3:
if heart_rate > 151:
tachycardic = True
elif age < 5:
if heart_rate > 137:
tachycardic = True
elif age < 8:
if heart_rate > 133:
tachycardic = True
elif age < 12:
if heart_rate > 130:
tachycardic = True
elif age < 15:
if heart_rate > 119:
tachycardic = True
else:
if heart_rate > 100:
tachycardic = True
if tachycardic is True:
return True
return False
def hr_avg_since(pat_id, start_time):
"""
find the heart rate for a given patient since a certain time
:param pat_id: integer ID of patient to find average heart rate of
:param start_time: date time string to find heart rate since
:return: average heart rate since time given by datetime string
"""
for user in Patient.objects.raw({"_id": pat_id}):
patient = user
heart_rate_list = patient.heart_rate
hr_times_list = patient.h_r_times
hr_to_average = []
parse_date = datetime.datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S.%f")
index = 0
for date in hr_times_list:
if date > parse_date:
hr_to_average.append(heart_rate_list[index])
index = index + 1
if len(hr_to_average) == 0:
return 0
avg_hr = sum(hr_to_average)/len(hr_to_average)
return avg_hr
|
from django.conf.urls import url
from rest_framework_jwt.views import obtain_jwt_token, verify_jwt_token
from api import views
urlpatterns = [
url(r'^auth/token/?$', obtain_jwt_token,
name="obtain-token"),
url(r'^auth/token-verify/?$', verify_jwt_token,
name="verify-token"),
url(r'^categories/?$', views.CategoryListView.as_view(),
name='apicategory'),
url(r'^home/?$',
views.DashBoardView.as_view(),
name='dashboard'),
url(r'^join/?$', views.JoinSocialClubView.as_view(),
name='join'),
url(r'^unjoin/?$', views.UnjoinSocialClubView.as_view(),
name='unjoin'),
url(r'^joined/?$', views.JoinedClubsView.as_view(),
name='joined'),
url(r'^categories/?$', views.CategoryListView.as_view(),
name='apicategory'),
url(r'^category/(?P<pk>[0-9]+)/events/?$', views.SocialClubDetail.as_view(),
name="user-detail"),
url(r'^attend/?$', views.AttendSocialEventView.as_view(),
name='attend'),
url(r'^unsubscribe/?$', views.UnsubscribeEventView.as_view(),
name='unsubscribe'),
url(r'^subscribed/?$', views.SubscribedEventsView.as_view(),
name='subscribed'),
url(r'^create/event/?$', views.CreateEventView.as_view(),
name='create_event'),
url(r'^event/(?P<pk>[0-9]+)/?$', views.EventDetail.as_view(),
name='event_detail'),
url(r'^oauthcallback/?$', views.OauthCallback.as_view(),
name='oauth_callback'),
url(r'^slack/actions/?$', views.SlackActionsCallback.as_view(),
name='slack_action_callback'),
url(r'^slack/authorize/?$', views.LaunchSlackAuthorization.as_view(),
name="authorizeslack"),
url(r'^slack/code/?$', views.SlackTokenCallback.as_view(),
name='add_slack_token')
]
|
import uuid
import adal
from settings import settings
from office365.directory.group_profile import GroupProfile
from office365.graph_client import GraphClient
def acquire_token():
authority_url = 'https://login.microsoftonline.com/{0}'.format(settings['tenant'])
auth_ctx = adal.AuthenticationContext(authority_url)
token = auth_ctx.acquire_token_with_username_password(
'https://graph.microsoft.com',
settings['user_credentials']['username'],
settings['user_credentials']['password'],
settings['client_credentials']['client_id'])
return token
def create_group_for_team(groups, name):
grp_properties = GroupProfile(name)
grp_properties.securityEnabled = False
grp_properties.mailEnabled = True
grp_properties.groupTypes = ["Unified"]
target_group = groups.add(grp_properties)
return target_group
def print_failure(retry_number):
print(f"{retry_number}: trying to create a team...")
client = GraphClient(acquire_token)
group_name = "Team_" + uuid.uuid4().hex
result = client.teams.create(group_name)
client.execute_query_retry(max_retry=5, failure_callback=print_failure)
print("Team has been provisioned")
|
import graphene
from ingredients.serializers import IngredientSerializers, CategorySerializers
class CategoryInput(graphene.InputObjectType):
name = graphene.String()
class IngredientInput(graphene.InputObjectType):
id = graphene.ID()
name = graphene.String()
category = graphene.ObjectType
|
# coding=utf8
from lib.models_others import CorpModel
from lib.corp import Corp
import re, urllib.parse, json, time
class ZJRCCorp(Corp):
def __init__(self):
config = {
'info_from': '浙江人才网',
'corplist_url': 'http://www.zjrc.com/Services/Jobs/GetSearch.ashx',
'corp_url': 'http://www.zjrc.com/Jobs/Comp/{corp_code}',
'corplist_post_data': None,
'corp_post_data': None,
'corplist_reg': re.compile(r"/Jobs/Comp/(?P<corp_code>[^ ']+)' target='_blank'>(?P<name>[^<]+)<", re.S),
'corp_regs': (
re.compile(r'<div class="div_net_comper">电子信箱:<a[^>]+>[^<]*</a><br>单位地址:(?P<addr>[^<]+)<br>联系电话:(?P<contact_tel_no>[^联]+)联系人:(?P<contact_person>[^传<]+)', re.S),
),
'commit_each_times': 30,
'has_cookie': True,
'charset': 'utf8',
'model': CorpModel,
}
super().__init__(**config)
self.opener.headers = {
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'http://www.zjrc.com/Jobs/Search?js_keytype=1&js_key=%E7%BB%8F%E7%90%86&js_area=000000',
}
self.corplist_post_data_format = 'hs_keytype=1&hs_key=%E7%BB%8F%E7%90%86&hs_keyin=&hs_area=000000&hs_areaname=%E8%AF%B7%E9%80%89%E6%8B%A9%E5%9C%B0%E5%8C%BA&hs_sit=00&hs_sitname=%E8%AF%B7%E9%80%89%E6%8B%A9%E5%B2%97%E4%BD%8D%E7%B1%BB%E5%88%AB&hs_industry=00&hs_industryname=%E8%AF%B7%E9%80%89%E6%8B%A9%E8%A1%8C%E4%B8%9A&hs_expires=0&hs_expiresname=%E4%B8%8D%E9%99%90&hs_edu=00&hs_eduname=%E5%85%A8%E9%83%A8&hs_ctype=00&hs_ctypename=%E5%85%A8%E9%83%A8&hs_sex=A&hs_sexname=%E5%85%A8%E9%83%A8&hs_salary=0&hs_salaryname=%E5%85%A8%E9%83%A8&hs_wtype=0&hs_wtypename=%E5%85%A8%E9%83%A8&hs_st=0&hs_stname=%E5%85%A8%E9%83%A8&hs_page={page_no}&hs_list=0&hs_sorttype=&hs_record=30'
self.pages = 100
def get_next_page_url(self):
for page in range(0, self.pages):
self.corplist_post_data = self.corplist_post_data_format.format(page_no=page)
yield self.corplist_url
def fetch_corp(self, corp_info=None):
time.sleep(3)
return super().fetch_corp(corp_info)
def report(self):
fields = (
('名称', 'name'),
('地址', 'addr'),
('联系人', 'contact_person'),
('电话号码', 'contact_tel_no'),
('单位性质', 'nature'),
('行业', 'industry'),
('规模', 'scale'),
('信息来源', 'info_from'),
('更新日期', 'insert_date'),
('链接', self.corp_url),
)
super().report(fields)
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2008-2011 Istituto Nazionale di Fisica Nucleare (INFN)
#
# Licensed under the EUPL, Version 1.1 only (the "Licence").
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at:
#
# http://www.osor.eu/eupl/european-union-public-licence-eupl-v.1.1
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
#------------------------------------------------------------------------------
"""
WNoDeS process running on the bait.
"""
import ConfigParser
import SocketServer
import commands
import fnmatch
import logging
import optparse
import os
import pickle
import socket
import sys
import threading
import time
#import MySQLdb
#import re
#import random
#import subprocess
#import zipfile
#import logging.handlers
__short_name__ = os.path.basename(os.path.splitext(sys.argv[0])[0])
__dir_name__ = os.path.dirname(sys.argv[0])
try:
from wnodes.utils import utils
from wnodes.utils.utils import synchronized
try:
from wnodes.utils import wsocket
except ImportError:
sys.exit("%s: python module 'wsocket' not found." % __short_name__)
except ImportError:
sys.exit("%s: python module 'utils' not found." % __short_name__)
class WnodBait(wsocket.ClientRequestHandler):
""" Define the behavior of the WNoDeS bait. """
# lock used to rotate log-files
LOCK_LOG = threading.Lock()
# Lock used in order to access to the shared data structure batchBaitStatus
LOCK_BBS = threading.Lock()
# Lock used in order to serialize job operation
LOCK_RESOURCES = threading.Lock()
# Lock used in order to access to the shared data structure virtualMachines
LOCK_VM = threading.Lock()
# Lock used in order to access to the shared data structure vmImageAvailabe
LOCK_IMAGE = threading.Lock()
# Lock used in order to update status file wnodes_current_vm.log
LOCK_CURRENT_STATUS = threading.Lock()
# lock used to rotate log-files
LOCK_LOGFILE = threading.Lock()
def __init__(self, NS_HOST, NS_PORT):
wsocket.ClientRequestHandler.__init__(self, NS_HOST, NS_PORT)
self.NS_HOST = NS_HOST
self.NS_PORT = NS_PORT
# Gets configuration from WNoDeS Name Server
# and puts inside self.config
ret_code = self._retrieveConfig(self.NS_HOST, self.NS_PORT)
if ret_code[0] == 1:
sys.exit(ret_code[1])
try:
# import batch commands
if self.config['BATCH_SYSTEM_TYPE'].lower() == 'lsf':
try:
#from wnodes.utils.batch import lsfCmds
from wnodes.utils.batch_lsf import LsfCommands
except ImportError:
sys.exit("%s: python module 'batch' not found."
% __short_name__)
#self.batchCmd = lsfCmds('/etc/profile.d/lsf.sh')
self.batchCmd = LsfCommands(self.config['LSF_PROFILE'])
elif self.config['BATCH_SYSTEM_TYPE'].lower() == 'pbs' or 'torque':
try:
#from wnodes.utils.batch import pbsCmds
from wnodes.utils.batch_pbs import PbsCommands
except:
sys.exit("%s: python module 'batch' not found."
% __short_name__)
#self.batchCmd = pbsCmds()
self.batchCmd = PbsCommands()
else:
sys.exit('Batch system not supported')
except KeyError:
sys.exit('batchSystemType variable is not defined ' +
'in the configuration array')
# Define host-name
self.localhost = socket.gethostname().split('.')[0]
self.localdomain = '.'.join(socket.gethostname().split('.')[1:])
self.DEFAULT_LOGGING_INFO = logging.INFO
# set up logging
self.wnodes_logger = logging.getLogger('WnodLogger')
self.wnodes_logger.setLevel(self.DEFAULT_LOGGING_INFO)
formatter = logging.Formatter("%(asctime)s - " +
"%(levelname)s - " +
"%(message)s")
handler = utils.CompressedRotatingFileHandler(
os.path.join(
__dir_name__,
"/var/log/wnodes/bait",
self.config['LOG_FILE_NAME']),
maxBytes=self.config['MAX_LOG_FILE_SIZE'],
backupCount=self.config['MAX_COUNT_LOG_FILE'])
handler.setFormatter(formatter)
self.wnodes_logger.addHandler(handler)
# verify config
self._verifyMadatoryConfigParams()
# Define HV host-name
if self.config['ENABLE_MIXED_MODE'].upper() == 'NO':
msg = {'WhoIsTheHV': [self.localhost]}
OUTPUT = self.sendRequest(self.NS_HOST, self.NS_PORT, msg)
if OUTPUT[0] == 0:
OUTPUT = OUTPUT[1]
if OUTPUT[0] == 0:
self.HV_HOST = OUTPUT[1]
else:
sys.exit('I cannot retrieve HV HOSTNAME')
else:
sys.exit('I cannot retrieve HV HOSTNAME')
else:
self.HV_HOST = self.localhost
# Define HV port
self.HV_PORT = self.config['HV_PORT']
# Define vmMandatoryParameters
# MEM, CPU, BANDWIDTH
# STORAGE parameter mandatory only with LVM enabled
if self.config['USE_LVM'].upper() == 'YES':
self.vmMandatoryParameters = ['MEM', 'CPU', 'BANDWIDTH', 'STORAGE']
else:
self.vmMandatoryParameters = ['MEM', 'CPU', 'BANDWIDTH']
# Initialize local data structures
self.batchJobs = {}
self.virtualMachines = {}
self.vmImagesAvailable = {}
self.batchBaitStatus = ['START', '', 0, 0, 0, {}]
self.localDNS = {}
# Evaluate the wnodes_preexec return status
try:
self.PX_FAILED_RETURN_STATUS = int(self.config[
'PX_FAILED_RETURN_STATUS'])
except KeyError:
sys.exit('Cannot find PX_FAILED_RETURN_STATUS option, ' +
'in the bait configuration file. ' +
'Please add it in the name-server folder')
except:
sys.exit('Cannot change type to PX_FAILED_RETURN_STATUS variable' +
'(%s)' % str(self.config['PX_FAILED_RETURN_STATUS']) +
'. Please use a number')
def _changeBaitStatus(self, *arguments):
self.updateLog(' Function: %s . Variables: %s' % (utils.whoami(),
str(arguments)))
self.batchBaitStatus[0] = arguments[0]
self.batchBaitStatus[1] = arguments[1]
self.batchBaitStatus[2] = time.time()
def _checkResourcesAvailability(self, *arguments):
# Add all mandatory parameters, if the list provided is not complete
# Evaluate the availability of each parameter and cast it as an integer
# Initialize self.batchJobs in order to reserve some resources (?)
self.updateLog(' Function: %s . Variables: %s' % (utils.whoami(),
str(arguments)))
vmParameters = arguments[0]
batchJobId = vmParameters['BATCH_JOBID']
EXCEED = False
for param in self.vmMandatoryParameters:
# Check if every mandatory parameter has been specified
if not param in vmParameters:
self.updateLog('The following mandatory parameter: %s ' % param
+ 'has not been specified. Use default value',
'info', batchJobId)
vmParameters[param] = int(self.config['DEFAULT_VM_%s' % param])
# Force a cast to integer for each parameter value
vmParameters[param] = int(vmParameters[param])
# Check if every parameter does not exceed
if vmParameters[param] > int(self.config['MAX_VM_%s' % param]):
EXCEED = True
self.updateLog('%s' % str(vmParameters), 'info')
if not EXCEED:
self.updateLog('VM requested parameters do not exceed MAX values.',
'info')
self.updateLog('Check whether there are still available resources',
'info')
RESOURCE_AVAILABLE = self.accessResourceAvailable('GET',
vmParameters)
if RESOURCE_AVAILABLE[0] == 0:
self.updateLog('There are still resource available. ' +
'I can RESERVE them to the VM %s' % batchJobId,
'info', batchJobId)
self.batchJobs[batchJobId] = ['RESERVED', '', '', '',
time.time(), '', '', 0, 0, 2, '',
{'RESERVED': 1}, '']
return [0, vmParameters]
else:
# One or more parameter exceeded the available resources
return [1, 'There are not enough resources ' +
'to execute the virtual resources']
else:
# One or more parameter exceeded the max values
self.updateLog('VM request parameters exceed one (or more) ' +
'of MAX value, See below the details ', 'warning')
for k in self.vmMandatoryParameters:
if vmParameters[k] > int(self.config['MAX_VM_%s' % k]):
self.updateLog('Parameters WNoDeS_VM_%s:%s > MAX_VM_%s:%s'
% (k, str(vmParameters[k]),
k, str(self.config['MAX_VM_%s' % k])),
'error', batchJobId)
return [1, 'requested vm parameters exceeded the max values']
def _get_id_from_hostname(self, hostname):
"""
returns the virtual machine id from its hostname
"""
self.updateLog(' Function: %s .' % utils.whoami() +
' Variables: %s' % hostname, 'debug')
for key in self.virtualMachines:
if self.virtualMachines[key][1].split('.')[0] \
== hostname.split('.')[0]:
return key
def _get_jobId_from_hostname(self, hostname):
"""
returns the job_id related to the hostname
of a running VM.
"""
self.updateLog(' Function: %s .' % utils.whoami() +
' Variables: %s' % hostname, 'debug')
for key in self.virtualMachines:
if self.virtualMachines[key][1].split('.')[0] \
== hostname.split('.')[0]:
return self.virtualMachines[key][0]
@synchronized(LOCK_VM)
def _lookingForVirtualMachine(self, *arguments):
"""
return:
[self.vm_action_regenerate: string
FIRST_VM_ID: string(?)
REQUEST_NEW_ADDRESS: bool)
RESERVED_VM: list]
"""
self.updateLog(' Function: %s .' % utils.whoami() +
' Variables: %s' % str(arguments))
vmParameters = arguments[0]
batchJobId = vmParameters['BATCH_JOBID']
REGENERATE = 'NO'
mandatoryParam = self.vmMandatoryParameters[:]
if not vmParameters['TYPE'].upper() == 'BATCH_REAL':
mandatoryParam.append('IMG')
mandatoryParam.append('NETWORK_TYPE')
vm = [ID for ID in self.virtualMachines.keys()
if self.virtualMachines[ID][2][0] == 'OFF'
and self.virtualMachines[ID][2][1] == 'AVAILABLE']
self.updateLog('VM = %s' % str(vm))
if len(vm) > 0:
for ID in vm:
for param in mandatoryParam:
if (self.virtualMachines[ID][3][param] ==
vmParameters[param]):
REGENERATE = 'NO'
else:
REGENERATE = 'YES'
break
if REGENERATE == 'NO':
self.updateLog('Found a VirtaulMachine:%s '
% self.virtualMachines[ID][0] +
'with the same parameter. I Can RECYCLE it',
'info', batchJobId)
self.virtualMachines[ID][2] = ['OFF', 'RESERVED']
self.batchJobs[batchJobId][5] = 'RECYCLE'
return ['RECYCLE', ID, False, []]
else:
pass
if REGENERATE == 'YES':
self.updateLog('I did not found a VirtaulMachine:%s ' +
'with the same parameter. ' +
'I have to REGENERATE one or more available VM',
'info',
batchJobId)
self.batchJobs[batchJobId][5] = 'REGENERATE'
if not vmParameters['TYPE'].upper() == 'BATCH_REAL':
mandatoryParam.remove('IMG')
mandatoryParam.remove('NETWORK_TYPE')
if len(vm) == 1:
VM_ID_TO_REGENERATE = vm[0]
FOUND_ONE_VM = True
elif len(vm) > 1:
FOUND = True
for ID in vm:
VM_ID_TO_REGENERATE = ID
for param in mandatoryParam:
if (self.virtualMachines[ID][3][param] >=
vmParameters[param]):
if not FOUND:
FOUND = True
else:
FOUND = False
break
if FOUND:
FOUND_ONE_VM = True
break
else:
FOUND_ONE_VM = False
if FOUND_ONE_VM:
self.updateLog('Found a VirtualMachine:%s '
% self.virtualMachines[
VM_ID_TO_REGENERATE][1] +
'which matches requested parameters.' +
'I Can REGENERATE it',
'info', batchJobId)
self.virtualMachines[VM_ID_TO_REGENERATE][2] = ['OFF',
'RESERVED']
if not vmParameters['TYPE'].upper() == 'BATCH_REAL':
if (self.virtualMachines[VM_ID_TO_REGENERATE][3]
['NETWORK_TYPE'] ==
vmParameters['NETWORK_TYPE']):
return ['REGENERATE', VM_ID_TO_REGENERATE, False, []]
else:
return ['REGENERATE', VM_ID_TO_REGENERATE, True, []]
else:
return ['REGENERATE', VM_ID_TO_REGENERATE, False, []]
else:
self.updateLog('In order to satisfy this request I have ' +
'to destroy more than one VirtualMachine',
'info', batchJobId)
RESERVED_RESOURCES = {}
RESERVED_VM = []
REQUEST_NEW_ADDRESS = False
for param in mandatoryParam:
RESERVED_RESOURCES[param] = 0
for ID in vm:
for param in mandatoryParam:
RESERVED_RESOURCES[param] = \
(RESERVED_RESOURCES[param] +
self.virtualMachines[ID][3][param])
RESERVED_VM.append(ID)
self.updateLog('This VM %s ' % str(ID) +
'will be destroy ' +
'in order to free resources',
'info', batchJobId)
for param in mandatoryParam:
if (RESERVED_RESOURCES[param] >=
vmParameters[param]):
ENOUGH_RESOURCES = True
else:
ENOUGH_RESOURCES = False
break
if ENOUGH_RESOURCES:
break
else:
pass
for ID in RESERVED_VM:
if not vmParameters['TYPE'].upper() == 'BATCH_REAL':
if (self.virtualMachines[ID][3]['NETWORK_TYPE'] ==
vmParameters['NETWORK_TYPE']):
FIRST_VM_ID = ID
RESERVED_VM.remove(ID)
REQUEST_NEW_ADDRESS = False
break
else:
REQUEST_NEW_ADDRESS = True
else:
REQUEST_NEW_ADDRESS = True
if REQUEST_NEW_ADDRESS:
FIRST_VM_ID = RESERVED_VM.pop(0)
if not vmParameters['TYPE'].upper() == 'BATCH_REAL':
self.updateLog('This VMs %s will be destroy '
% str(RESERVED_VM), 'info', batchJobId)
self.updateLog('This VM %s will be regenerate'
% str(FIRST_VM_ID), 'info', batchJobId)
self.virtualMachines[FIRST_VM_ID][2] = ['OFF', 'RESERVED']
for ID in RESERVED_VM:
self.virtualMachines[ID][2] = ['OFF', 'DESTROYING']
return ['REGENERATE', FIRST_VM_ID,
REQUEST_NEW_ADDRESS, RESERVED_VM]
else:
pass
else:
self.updateLog('There are no VirtualMachines available, ' +
' so I create a new one', 'info', batchJobId)
self.batchJobs[batchJobId][5] = 'NEW'
return ['CREATE', '', 'YES', []]
def _retrieveConfig(self, ns_host, ns_port):
"""
Retrieve an updated BAIT configuration
sending a request to the NameServer
"""
msg = {'get_config_from_file': ['BAIT']}
msg = str(pickle.dumps(msg).replace('\n', '1%2'))
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ns_host, int(ns_port)))
sock.sendall(msg + '\n')
socketfile = sock.makefile('r', 0)
response = socketfile.readline().strip()
sock.shutdown(2)
sock.close()
try:
self.config = pickle.loads(response.replace('1%2', '\n'))[1]
return [0, 'Configuration updated']
# return [0, 'Configuration of host: % updated'
# % self.localhost]
except pickle.UnpicklingError:
msg = 'Data received cannot be loaded'
return [1, msg]
except Exception:
msg = ('Data received from cannot be loaded for this reason:' +
' %s, %s, %s' % ((sys.exc_info()[:])))
return [1, msg]
except socket.error:
msg = ('Server HOST %s:%s is unreachable. '
% (ns_host, str(ns_port)) +
'I cannot retrieve BAIT configuration')
return [1, msg]
def _verifyMadatoryConfigParams(self):
if type(self.config) == type({}):
self.mP = []
self.mandatoryParams = [
'BATCH_SYSTEM_TYPE',
'HV_PORT',
'BAIT_PORT',
'LOG_FILE_NAME',
'MAX_LOG_FILE_SIZE',
'MAX_COUNT_LOG_FILE',
'VM_UNREACH_TIMEOUT',
'STATUS_RETRY_COUNT',
'SCHEDULING_INTERVAL',
'RESERVATION_LENGTH',
'USE_LVM',
'TYPE',
'PX_FAILED_RETURN_STATUS',
'ENABLE_MIXED_MODE',
'MAX_VM_MEM',
'MAX_VM_BANDWIDTH',
'MAX_VM_CPU',
'MAX_VM_STORAGE',
'MIN_VM_MEM',
'MIN_VM_BANDWIDTH',
'MIN_VM_CPU',
'MIN_VM_STORAGE',
]
try:
if self.config['BATCH_SYSTEM_TYPE'].upper() == 'lsf':
self.mandatoryParams.append('LSF_PROFILE')
else:
pass
except KeyError:
pass
[ self.mP.append(p) for p in self.mandatoryParams if not self.config.has_key(p)]
if len(self.mP) > 0:
sys.exit('found missing mandatory config parameters %s' % str(self.mP))
else:
self.updateLog('configuration is reliable')
else:
sys.exit('config is not formatted as expected %s' % str(self.config))
@synchronized(LOCK_RESOURCES)
def accessResourceAvailable(self, *arguments):
ACTION = arguments[0]
vmParameters = arguments[1]
self.updateLog(' Function: %s . Variables: %s' % (utils.whoami(),
str(arguments)))
# Initialized local data structures
if self.config['USE_LVM'].upper() == 'YES':
self.vmMandatoryParameters = ['MEM', 'CPU', 'BANDWIDTH', 'STORAGE']
else:
self.vmMandatoryParameters = ['MEM', 'CPU', 'BANDWIDTH']
if ACTION == 'PUT':
# Released the resources defined in vmParameters
batchJobId = vmParameters['BATCH_JOBID']
self.updateLog('Releasing resources')
OPEN = 'YES'
self.updateLog('Put1:Resources Available: %s'
% str(self.batchBaitStatus[5]),
'info', batchJobId)
for i in self.vmMandatoryParameters:
self.batchBaitStatus[5][i] = (self.batchBaitStatus[5][i] +
vmParameters[i])
self.updateLog('The following parameter: %s=%s '
% (i, vmParameters[i]) +
'is being released', 'info', batchJobId)
if (self.batchBaitStatus[5][i] <
int(self.config['MIN_VM_%s' % i])):
OPEN = 'NO'
if OPEN == 'YES':
self.updateBaitBatchStatus(
['OPEN', 'Resources are greater than the MIN values'])
self.updateLog('Put2:Resources Available: %s'
% str(self.batchBaitStatus[5]),
'info', batchJobId)
return [0, 'Resources released']
elif ACTION == 'GET':
# Remove the resources defined in vmParameters
batchJobId = vmParameters['BATCH_JOBID']
self.updateLog('Acquiring resources')
RESOURCE_AVAILABLE = True
self.updateLog('Get: Resources Available: %s'
% str(self.batchBaitStatus[5]),
'info', batchJobId)
#Update available memory
msg = {'getResourceAvailable': [None]}
RESOURCES = self.sendRequest(self.HV_HOST, self.HV_PORT, msg)
if RESOURCES[0] == 0:
RESOURCES = RESOURCES[1]
self.batchBaitStatus[5]['MEM'] = RESOURCES[1]['MEM']
else:
self.batchBaitStatus[5]['MEM'] = 0
RESOURCE_AVAILABLE = False
for i in self.vmMandatoryParameters:
if self.batchBaitStatus[5][i] >= vmParameters[i]:
self.updateLog('The following resource parameter: %s=%s'
% (i, vmParameters[i]) +
' is available', 'info', batchJobId)
if not RESOURCE_AVAILABLE == False:
RESOURCE_AVAILABLE = True
else:
if i == 'MEM':
vm = [ID for ID in self.virtualMachines.keys()
if self.virtualMachines[ID][2][0] == 'OFF'
and self.virtualMachines[ID][2][1] == 'AVAILABLE']
if len(vm) > 0:
TOT_MEM_ALLOCATED_TO_VM = 0
for k in vm:
TOT_MEM_ALLOCATED_TO_VM = \
TOT_MEM_ALLOCATED_TO_VM + \
self.virtualMachines[k][3]['MEM']
if self.batchBaitStatus[5]['MEM'] + TOT_MEM_ALLOCATED_TO_VM >= vmParameters['MEM']:
self.updateLog('The following resource parameter: %s=%s'
% (i, vmParameters[i]) +
' is available', 'info', batchJobId)
RESOURCE_AVAILABLE = True
else:
self.updateLog('The following resource parameter: %s=%s'
% (i, vmParameters[i]) +
' is not available', 'info', batchJobId)
RESOURCE_AVAILABLE = False
else:
RESOURCE_AVAILABLE = False
else:
self.updateLog('The following resource parameter: %s=%s'
% (i, vmParameters[i]) +
' is not available', 'info', batchJobId)
RESOURCE_AVAILABLE = False
if RESOURCE_AVAILABLE:
# DA rivedere meglio se aprire sempre
CLOSE = False
RESOURCES_UNDER_MIN_VALUE = []
for i in self.vmMandatoryParameters:
self.batchBaitStatus[5][i] = \
int(self.batchBaitStatus[5][i]) - vmParameters[i]
if (self.batchBaitStatus[5][i] <
int(self.config['MIN_VM_%s' % i])):
RESOURCES_UNDER_MIN_VALUE.append(i)
CLOSE = True
self.updateLog('Get2:Resources Available: %s'
% str(self.batchBaitStatus[5]),
'info', batchJobId)
if CLOSE:
self.updateBaitBatchStatus(
['CLOSED_FULL',
'Resource %s is lower than the MIN value'
% str(RESOURCES_UNDER_MIN_VALUE)])
self.updateLog('Resources acquired')
for i in self.vmMandatoryParameters:
self.updateLog('%s %s is available'
% (i, self.batchBaitStatus[5][i]),
'info', batchJobId)
return [0, 'Resources acquired']
else:
self.updateLog('VM requested parameters exceeded one ' +
'of the available value. ' +
'See below the details ',
'warning', batchJobId)
for i in self.vmMandatoryParameters:
self.updateLog('Parameters WNoDeS_VM_%s:%s '
% (i, vmParameters[i]) +
'-- %s_AVAILABLE_VALUE:%s'
% (i, self.batchBaitStatus[5][i]))
return [1, 'Not enough resources to satisfy the request']
elif ACTION == 'SET':
# Set the resources defined in vmParameters
for i in self.vmMandatoryParameters:
self.batchBaitStatus[5][i] = vmParameters[i]
def checkSSH(self, host):
serverSocket = socket.socket()
serverSocket.settimeout(0.25)
try:
serverSocket.connect((host, 22))
return 0
except socket.error:
return 1
def destroyVMInstance(self, *arguments):
"""
This method will destroy selected VM instances
Input:
- VM list ID
Output:
- [0,1]
"""
try:
self.updateLog('destroyVMInstance arguments %s'
% arguments[0], "debug")
hostname = arguments[0][0]
vm_id = self._get_id_from_hostname(hostname)
job_id = self._get_jobId_from_hostname(hostname)
self.updateLog('Destroying VM' +
'named: %s, ' % hostname +
'ID: %s, ' % vm_id +
'jobId %s' % job_id,
"debug")
if self.batchCmd.bkill(' %s' % job_id):
msg = ('Virtual Machine ' +
'named: %s, ' % hostname +
'ID: %s, ' % vm_id +
'jobId %s' % job_id, +
'successfully destroyed')
return [0, msg]
else:
msg = ('Cannot find any VM ' +
'named: %s, ' % hostname +
'id: %s, ' % vm_id +
'jobId %s' % job_id, +
'which can be destroyed')
return [1, msg]
except:
msg = ('Error in function destroyVMInstance: %s, %s, %s'
% sys.exc_info()[:])
return [1, msg]
def getStatus(self, *arguments):
#Update available memory
msg = {'getResourceAvailable': [None]}
RESOURCES = self.sendRequest(self.HV_HOST, self.HV_PORT, msg)
if RESOURCES[0] == 0:
RESOURCES = RESOURCES[1]
self.batchBaitStatus[5]['MEM'] = RESOURCES[1]['MEM']
else:
self.batchBaitStatus[5]['MEM'] = 0
return [0, [self.batchBaitStatus,
self.batchJobs,
self.virtualMachines]]
def getConfig(self, *arguments):
"""
Return Current BAIT configuration
"""
try:
return [0, [self.config]]
except:
msg = "configuration doesn't exist"
self.updateLog(msg, 'error')
return [1, msg]
def reloadConfig(self, *arguments):
"""
Alert the bait in order to reload its configuration
"""
# Get configuration from WNoDeS Name Server
return self._retrieveConfig(self.NS_HOST, self.NS_PORT)
def ping(self, *arguments):
"""
Return bait version. Used to check bait process availability
"""
self.updateLog(' Function: %s . Variables: %s'
% (utils.whoami(), str(arguments)))
try:
from wnodes import bait
a = sys.version_info
return [0, {'Python': '%s.%s.%s' % (a[0], a[1], a[2]),
'bait': bait.get_version()}]
except:
self.updateLog('Ping: failed to get the version', 'info')
return [1, False]
def reportPreExecutionScript(self, *arguments):
self.updateLog(' Function: %s . Variables: %s'
% (utils.whoami(), str(arguments)))
PRE_EXEC_REPORT = arguments[0]
sourceAddress = arguments[1]
batchJobId = PRE_EXEC_REPORT[0]
PRE_EXEC_STATUS = PRE_EXEC_REPORT[1]
PRE_EXEC_DETAILS = PRE_EXEC_REPORT[2]
if (batchJobId in self.batchJobs and
sourceAddress in self.batchJobs[batchJobId][2]):
self.updateLog('Report from %s for job %s can be accepted'
% (sourceAddress, batchJobId))
self.batchJobs[batchJobId][9] = PRE_EXEC_STATUS
self.batchJobs[batchJobId][10] = PRE_EXEC_DETAILS
if PRE_EXEC_STATUS == 0:
return [0, '']
else:
self.updateStatusAndTimeStamp(batchJobId,
'READYtoRUN',
self.batchJobs[batchJobId][4])
return [self.PX_FAILED_RETURN_STATUS, '']
else:
self.updateLog('Report from %s for job %s cannot be accepted'
% (sourceAddress, batchJobId), 'warning')
if self.config['BATCH_SYSTEM_TYPE'].upper() == 'LSF':
return [self.PX_FAILED_RETURN_STATUS, '']
elif self.config['BATCH_SYSTEM_TYPE'].upper() == 'PBS':
self.updateLog('BATCH_SYSTEM_TYPE is %s'
% (self.config['BATCH_SYSTEM_TYPE']))
return [self.PX_FAILED_RETURN_STATUS, '']
def reportPostExecutionScript(self, *arguments):
self.updateLog(' Function: %s . Variables: %s'
% (utils.whoami(), str(arguments)))
batchJobId = arguments[0][0]
if batchJobId in self.batchJobs:
self.updateStatusAndTimeStamp(batchJobId,
'FINISHED',
self.batchJobs[batchJobId][4])
return [0, 'Job finished']
def requestVMInstance(self, *arguments):
"""
This method handles all the requests to create a VM.
Only the pre exec on the BAIT send this request.
Input:
- VM parameters is a dictionary with a parameter:value pairs
Output:
- [0,1]
"""
self.updateLog(' Function: %s . Variables: %s'
% (utils.whoami(), str(arguments)))
parameters = arguments[0]
sourceAddr = arguments[1]
batchJobId = parameters[0]
vmParameters = parameters[1]
# Check whether the job is already handled
if batchJobId in self.batchJobs:
return [self.PX_FAILED_RETURN_STATUS, '']
# Check if the type of the request is supported
SUPPORTED_JOB_TYPE = []
try:
for TYPE in self.config['TYPE'].split(';'):
SUPPORTED_JOB_TYPE.append(TYPE.upper())
if not vmParameters['TYPE'].upper() in SUPPORTED_JOB_TYPE:
self.updateLog('Job type is not supported by this BAIT %s - %s'
% (str(vmParameters['TYPE'].upper()),
str(SUPPORTED_JOB_TYPE)))
return [self.PX_FAILED_RETURN_STATUS, '']
except:
return [self.PX_FAILED_RETURN_STATUS, '']
# check if the job is still active in the cluster
bjobsOutput = self.batchCmd.bjobs(jobid=batchJobId)
if not bjobsOutput[0] == 0:
self.updateLog('%s: %s: batch job %s '
% (batchJobId, utils.whoami(), batchJobId) +
'does not belong to batch cluster. ' +
'Request cannot be accepted ',
"error", batchJobId)
return [self.PX_FAILED_RETURN_STATUS,
'Batch job does not belong to batch cluster']
else:
#check source address
if ((not (sourceAddr in self.localhost))
and (not (self.localhost in sourceAddr))):
self.updateLog('%s: %s: request rejected '
% (batchJobId, utils.whoami()) +
'because received from a host: %s '
% sourceAddr +
'different from the BAIT',
"error", batchJobId)
return [self.PX_FAILED_RETURN_STATUS,
'Request received from a non-enable host']
else:
# support for mixed mode. Run a batch job on the HV
if vmParameters['TYPE'].upper() == 'BATCH_REAL' and \
self.config['ENABLE_MIXED_MODE'].upper() == 'NO':
self.updateLog('Mixed mode is not enabled', 'info',
batchJobId)
return [self.PX_FAILED_RETURN_STATUS,
'Mixed mode is not enabled']
elif vmParameters['TYPE'].upper() == 'BATCH_REAL' and \
self.config['ENABLE_MIXED_MODE'].upper() == 'YES':
self.updateLog('New request to run a batch job on the HV',
'info', batchJobId)
#check resources
ACK = self._checkResourcesAvailability(vmParameters)
if ACK[0] == 0:
self.updateLog('There are resources available ' +
'to execute the job.',
'info', batchJobId)
vmParameters = ACK[1]
else:
self.updateLog('There are not resources available ' +
'to execute the job.',
'info', batchJobId)
return [self.PX_FAILED_RETURN_STATUS, ACK[1]]
# try to remove virtual resources
requestToHyperVisor = \
self._lookingForVirtualMachine(vmParameters)
ACTION = requestToHyperVisor[0]
VM_ID = requestToHyperVisor[1]
REQUEST_NEW_ADDRESS = requestToHyperVisor[2]
VM_TO_BE_DESTROYED = requestToHyperVisor[3]
if ACTION == 'REGENERATE' or ACTION == 'RECYCLE':
VM_TO_BE_DESTROYED.append(VM_ID)
MSG = {'do_destroy': [VM_TO_BE_DESTROYED, True]}
output = self.sendRequest(self.HV_HOST, self.HV_PORT, MSG)
# UPDATE self.batchJobs
self.batchJobs[batchJobId][1] = 'NoId'
self.batchJobs[batchJobId][2] = self.localhost
self.batchJobs[batchJobId][3] = vmParameters
jCK = GuardianAngel(self, batchJobId)
jCK.start()
if self.HV_HOST == self.localhost:
# the bait is a service running on the HV
self.updateStatusAndTimeStamp(
batchJobId, 'RUN', self.batchJobs[batchJobId][4])
return [0, 'VM successfully created']
else:
# The bait is running on a dedicated VM.
# So I need to migrate the job execution on the HV
vmParameters['RESERVATION_ID'] = \
('%s-%s' % (vmParameters['USER_DETAILS'][0],
batchJobId))
if self.config['BATCH_SYSTEM_TYPE'].upper() == 'LSF':
RES_START = time.strftime(
"%m:%d:%H:%M",
time.localtime(time.time() + 70))
RES_END = time.strftime(
"%m:%d:%H:%M",
time.localtime(
time.time() +
int(self.config['RESERVATION_LENGTH'])))
# breserveOption = (
# '-m "%s" '
# % vmParameters['HOSTNAME']
# + '-n 1 -o -b %s'
# % RES_START
# + ' -e %s'
# % RES_END
# + ' -N %s'
# % vmParameters['RESERVATION_ID']
# + ' -u %s'
# % vmParameters['USER_DETAILS'][0])
breserveOption = ('-n 1 -o -b %s -e %s'
% (RES_START, RES_END))
brsvaddoutput = self.batchCmd.breserve('add',
vmParameters['RESERVATION_ID'],
vmParameters['HOSTNAME'],
breserveOption,
vmParameters['USER_DETAILS'][0])
self.updateLog('%s' % str(brsvaddoutput),
'info', batchJobId)
elif self.config['BATCH_SYSTEM_TYPE'].upper() == 'PBS':
brsvaddoutput = [0, '']
if brsvaddoutput[0] == 0:
self.batchJobs[batchJobId][12] = \
vmParameters['RESERVATION_ID']
self.updateStatusAndTimeStamp(
batchJobId,
'READYtoMODIFY',
self.batchJobs[batchJobId][4])
self.batchJobs[batchJobId][3] = vmParameters
return [self.PX_FAILED_RETURN_STATUS,
'VM successfully created']
else:
self.updateStatusAndTimeStamp(
batchJobId,
'FINISHED',
self.batchJobs[batchJobId][4])
return [self.PX_FAILED_RETURN_STATUS,
'Failed to create a reservation']
self.updateLog('New request to instantiate a VM',
'info',
batchJobId)
#Create a Virtual Machine
else:
# set, if not, which vm image must be used
if not 'IMG' in vmParameters:
msg = {'selectVmType': [vmParameters['USER_DETAILS'],
self.localhost]}
output = self.sendRequest(self.NS_HOST,
self.NS_PORT,
msg)
if output[0] == 0:
output = output[1]
if output[0] == 0:
for key in output[1].keys():
vmParameters[key] = output[1][key]
else:
self.updateLog('Method error: %s' % output[1],
'error', batchJobId)
return [self.PX_FAILED_RETURN_STATUS,
'Method error: %s' % output[1]]
else:
self.updateLog('Communication error: %s'
% output[1], 'error', batchJobId)
return [self.PX_FAILED_RETURN_STATUS,
'Communication error: %s' % output[1]]
#check if the TAG has a vm image file associated
msg = {'get_image_info': [vmParameters['IMG']]}
BAIT_VM_FULL_PATH = self.sendRequest(self.NS_HOST,
self.NS_PORT,
msg)
if BAIT_VM_FULL_PATH[0] == 0:
BAIT_VM_FULL_PATH = BAIT_VM_FULL_PATH[1]
if BAIT_VM_FULL_PATH[0] == 0:
self.updateLog('%s: ' % batchJobId +
'Successfully got VM_TAG file name'
+ ' %s' % str(BAIT_VM_FULL_PATH),
'info', batchJobId)
else:
self.updateLog('%s' % BAIT_VM_FULL_PATH[1],
'error', batchJobId)
return [self.PX_FAILED_RETURN_STATUS,
'Method error: %s' % BAIT_VM_FULL_PATH[1]]
else:
self.updateLog('Communication error: %s'
% BAIT_VM_FULL_PATH[1],
'error', batchJobId)
return [self.PX_FAILED_RETURN_STATUS,
'Communication error: %s'
% BAIT_VM_FULL_PATH[1]]
#check resources
ACK = self._checkResourcesAvailability(vmParameters)
if ACK[0] == 0:
self.updateLog('%s: There are resources available '
% batchJobId +
'to execute the virtual resource. ' +
'Try to create it',
'info', batchJobId)
vmParameters = ACK[1]
else:
self.updateLog('%s: There are not resources available '
% batchJobId +
'to execute the virtual resource.',
'info', batchJobId)
return [self.PX_FAILED_RETURN_STATUS, ACK[1]]
# try to create the virtual resource
requestToHyperVisor = \
self._lookingForVirtualMachine(vmParameters)
ACTION = requestToHyperVisor[0]
VM_ID = requestToHyperVisor[1]
REQUEST_NEW_ADDRESS = requestToHyperVisor[2]
VM_TO_BE_DESTROYED = requestToHyperVisor[3]
if REQUEST_NEW_ADDRESS:
# Check hostname availability
if 'ENABLED_VLAN' in self.config:
self.updateLog('This HyperVisor support only ' +
'the following VLANs: %s'
% str(self.config['ENABLED_VLAN']),
'info', batchJobId)
# Find the ENABLED_VLAN for this bait
PIECE_MATCHED = False
HOST_GROUPS = []
for KEY in self.config.keys():
if KEY.startswith('HOST_GROUP'):
HOST_GROUPS.append(KEY)
for HOST_GROUP in HOST_GROUPS:
for HOST_PARSING_RULE in (self.config
[HOST_GROUP].split(';')):
HOSTNAME = self.localhost
if fnmatch.fnmatch(HOSTNAME,
HOST_PARSING_RULE):
PIECE_MATCHED = True
break
if PIECE_MATCHED:
HOST_GROUP = HOST_GROUP.split('_')
ENABLED_VLAN = ('ENABLED_VLAN_%s_%s'
% (HOST_GROUP[1],
HOST_GROUP[2]))
break
if PIECE_MATCHED == False:
return [self.PX_FAILED_RETURN_STATUS,
'I could not found a rule for my hostname '
+ '%s' % self.localhost]
elif not ENABLED_VLAN in self.config:
return [self.PX_FAILED_RETURN_STATUS,
'There is no an ENABLED_VLAN ' +
'for host group %s' % '_'.join(HOST_GROUP)]
msg = {'acquire_Node':
['VM_HOST',
self.HV_HOST,
vmParameters['NETWORK_TYPE'].upper(),
self.config[ENABLED_VLAN]]}
output = self.sendRequest(self.NS_HOST,
self.NS_PORT,
msg)
self.updateLog('Allocated host: %s'
% str(output), 'info', batchJobId)
if output[0] == 0:
vmNetParameters = output[1]
if vmNetParameters[0] == 1:
self.accessResourceAvailable('PUT',
vmParameters)
self.batchJobs.pop(batchJobId)
if ACTION == 'REGENERATE':
self.virtualMachines[VM_ID][2] = \
['OFF', 'AVAILABLE']
for ID in VM_TO_BE_DESTROYED:
self.virtualMachines[ID][2] = \
['OFF', 'AVAILABLE']
return [self.PX_FAILED_RETURN_STATUS,
vmNetParameters[1]]
else:
vmNetParameters = vmNetParameters[1]
vmParameters['VLAN'] = vmNetParameters[1]
vmParameters['MAC'] = vmNetParameters[2]
vmParameters['HOSTNAME'] = vmNetParameters[0]
vmParameters['VM_TYPE'] = 'VM_HOST'
else:
self.accessResourceAvailable('PUT', vmParameters)
self.batchJobs.pop(batchJobId)
if ACTION == 'REGENERATE':
self.virtualMachines[VM_ID][2] = \
['OFF', 'AVAILABLE']
for ID in VM_TO_BE_DESTROYED:
self.virtualMachines[ID][2] = \
['OFF', 'AVAILABLE']
return [self.PX_FAILED_RETURN_STATUS, output[1]]
if ACTION == 'CREATE':
MSG = {'do_%s' % ACTION.lower(): [vmParameters]}
else:
for PARAM in vmParameters.keys():
self.virtualMachines[VM_ID][3][PARAM] = \
vmParameters[PARAM]
MSG = {'do_%s' % ACTION.lower():
[VM_ID,
self.virtualMachines[VM_ID][3],
VM_TO_BE_DESTROYED,
REQUEST_NEW_ADDRESS]}
else:
for PARAM in vmParameters.keys():
self.virtualMachines[VM_ID][3][PARAM] = \
vmParameters[PARAM]
if ACTION == 'REGENERATE':
MSG = {'do_%s' % ACTION.lower():
[VM_ID,
self.virtualMachines[VM_ID][3],
VM_TO_BE_DESTROYED,
REQUEST_NEW_ADDRESS]}
else:
MSG = {'do_%s' % ACTION.lower():
[VM_ID,
self.virtualMachines[VM_ID][3],
VM_TO_BE_DESTROYED]}
# Send the request to the HV to instantiates the VM
output = self.sendRequest(self.HV_HOST, self.HV_PORT, MSG)
if output[0] == 0:
output = output[1]
if output[0] == 0:
self.updateLog(
'Virtual resource successfully created. %s'
% str(output[1]),
'info',
batchJobId)
vmId = output[1][0]
self.updateLog('5 %s'
% str(self.virtualMachines.keys()))
self.virtualMachines[vmId] = output[1][1]
self.updateLog('5 %s'
% str(self.virtualMachines.keys()))
if ACTION == 'REGENERATE':
for ID in output[2]:
self.updateLog(
'4 %s'
% str(self.virtualMachines.keys()))
self.virtualMachines.pop(ID)
self.updateLog(
'4 %s'
% str(self.virtualMachines.keys()))
vmParameters = self.virtualMachines[vmId][3]
# UPDATE self.batchJobs
self.batchJobs[batchJobId][1] = vmParameters['ID']
self.batchJobs[batchJobId][2] = \
vmParameters['HOSTNAME']
jCK = GuardianAngel(self, batchJobId)
jCK.start()
if vmParameters['TYPE'] == 'BATCH':
# vmParameters['RESERVATION_ID'] = \
# ('%s'
# % (str(random.randint(1, 10000000))))
vmParameters['RESERVATION_ID'] = \
('%s-%s'
% (vmParameters['USER_DETAILS'][0],
batchJobId))
if (self.config['BATCH_SYSTEM_TYPE'].upper() ==
'LSF'):
# vmParameters['RESERVATION_ID'] = \
# ('%s-%s'
# % (vmParameters['USER_DETAILS'][0],
# batchJobId))
RES_START = time.strftime(
"%m:%d:%H:%M",
time.localtime(time.time()
+ 70))
RES_END = time.strftime(
"%m:%d:%H:%M",
time.localtime(time.time() + int(
self.config['RESERVATION_LENGTH'])))
# breserveOption = (
# '-m "%s" -n 1 -o -b %s'
# % (vmParameters['HOSTNAME'],
# RES_START) +
# ' -e %s -N %s -u %s'
# % (RES_END,
# vmParameters['RESERVATION_ID'],
# vmParameters['USER_DETAILS'][0]))
breserveOption = ('-n 1 -o -b %s -e %s'
% (RES_START, RES_END))
# brsvaddoutput = commands.getstatusoutput(
# 'source /etc/profile.d/lsf.sh; '
# + 'brsvadd -m "%s" '
# % vmParameters['HOSTNAME'] +
# '-n 1 -o -b %s -e %s -N %s -u %s'
# % (RES_START, RES_END,
# vmParameters['RESERVATION_ID'],
# vmParameters['USER_DETAILS'][0]))
brsvaddoutput = self.batchCmd.breserve(
'add',
vmParameters['RESERVATION_ID'],
vmParameters['HOSTNAME'],
breserveOption,
vmParameters['USER_DETAILS'][0])
self.updateLog('%s'
% str(brsvaddoutput),
'info',
batchJobId)
elif (self.config['BATCH_SYSTEM_TYPE'].upper()
== 'PBS'):
brsvaddoutput = [0, '']
if brsvaddoutput[0] == 0:
self.batchJobs[batchJobId][12] = \
vmParameters['RESERVATION_ID']
self.updateStatusAndTimeStamp(
batchJobId,
'READYtoMODIFY',
self.batchJobs[batchJobId][4])
self.virtualMachines[\
vmParameters['ID']][3] = vmParameters
return [self.PX_FAILED_RETURN_STATUS,
'VM successfully created']
else:
self.updateStatusAndTimeStamp(
batchJobId,
'FINISHED',
self.batchJobs[batchJobId][4])
return [
self.PX_FAILED_RETURN_STATUS,
'Failed creating a reservation']
elif vmParameters['TYPE'] == 'CLOUD':
self.updateStatusAndTimeStamp(
batchJobId,
'READYforSETUP',
self.batchJobs[batchJobId][4])
self.virtualMachines[vmParameters['ID']][3] = \
vmParameters
return [0, 'VM successfully created']
else:
self.updateLog('Method error: %s'
% output[1],
'error', batchJobId)
if REQUEST_NEW_ADDRESS:
msg = {'release_Node':
[vmParameters['HOSTNAME'],
self.HV_HOST,
vmParameters['VM_TYPE']]}
output = self.sendRequest(self.NS_HOST,
self.NS_PORT,
msg)
self.updateLog('Release output %s'
% str(output))
self.accessResourceAvailable('PUT', vmParameters)
self.batchJobs.pop(batchJobId)
return [self.PX_FAILED_RETURN_STATUS,
'Method error: %s' % output[1]]
else:
self.updateLog('Communication error: %s'
% output[1],
'error', batchJobId)
if REQUEST_NEW_ADDRESS:
msg = {'release_Node':
[vmParameters['HOSTNAME'],
self.HV_HOST,
vmParameters['VM_TYPE']]}
output = self.sendRequest(self.NS_HOST,
self.NS_PORT, msg)
self.updateLog('Release output %s'
% str(output))
self.accessResourceAvailable('PUT', vmParameters)
self.accessResourceAvailable('PUT', vmParameters)
self.batchJobs.pop(batchJobId)
return [self.PX_FAILED_RETURN_STATUS,
'Communication error: %s' % output[1]]
def setConfig(self, ns_host, ns_port):
"""
Return Current BAIT configuration
"""
config = {}
msg = {'get_config_from_file': ['BAIT']}
msg = str(pickle.dumps(msg).replace('\n', '1%2'))
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ns_host, int(ns_port)))
sock.sendall(msg + '\n')
socketfile = sock.makefile('r', 0)
response = socketfile.readline().strip()
sock.shutdown(2)
sock.close()
try:
config = pickle.loads(response.replace('1%2', '\n'))
except pickle.UnpicklingError:
sys.exit('Data received cannot be loaded')
except Exception:
sys.exit('Data received from cannot be loaded for this reason:'
+ ' %s, %s, %s' % (sys.exc_info()[:]))
except socket.error:
sys.exit('Server HOST %s:%s is unreachable. '
% (ns_host, ns_port) +
'Cannot retrieve BAIT configuration')
return config
@synchronized(LOCK_LOG)
def updateLog(self, msg, level=None, jobid=None):
"""
Emit a string to the WNoDeS log file.
Thread safe.
"""
if level is None:
level = self.DEFAULT_LOGGING_INFO
else:
level = utils.LOG_LEVELS.get(level, logging.NOTSET)
msg = '%s - %s' % (jobid, msg)
self.wnodes_logger.log(level, msg)
def updateStatusAndTimeStamp(self, *arguments):
""" Update time stamp in batchJobs dictionary """
self.updateLog(' Function: %s . Variables: %s'
% (utils.whoami(), str(arguments)))
batchJobId = arguments[0]
newStatus = arguments[1]
lastTS = arguments[2]
self.batchJobs[batchJobId][0] = newStatus
self.batchJobs[batchJobId][4] = time.time()
timeSpentLastStatus = self.batchJobs[batchJobId][4] - lastTS
self.batchJobs[batchJobId][7] = (self.batchJobs[batchJobId][7] +
timeSpentLastStatus)
if newStatus in self.batchJobs[batchJobId][11]:
self.batchJobs[batchJobId][11][newStatus] = \
self.batchJobs[batchJobId][11][newStatus] + 1
else:
self.batchJobs[batchJobId][11][newStatus] = 1
@synchronized(LOCK_BBS)
def updateBaitBatchStatus(self, *arguments):
self.updateLog(' Function: %s . Variables: %s'
% (utils.whoami(), str(arguments)))
NEW_STATUS = arguments[0][0]
COMMENT = arguments[0][1]
# NEW_STATUS can be: CLOSED_FULL; CLOSED_ADMIN, START, OPEN, OPEN_ADMIN
# ACTION can be: OPEN, CLOSE
PREVIOUS_STATUS = self.batchBaitStatus[0]
if NEW_STATUS == 'CLOSED_FULL':
if PREVIOUS_STATUS == 'OPEN_ADMIN' or PREVIOUS_STATUS == 'OPEN':
self._changeBaitStatus(NEW_STATUS, COMMENT)
# badmin = self.batchCmd.badmin('hclose',
# '%s.%s'
# % (self.localhost,
# self.localdomain))
valid_host = self.localhost
if self.localdomain not in valid_host:
valid_host += '.' + self.localdomain
badmin = self.batchCmd.badmin('hclose', '%s' % valid_host)
# commands.getstatusoutput(\
# 'source /etc/profile.d/lsf.sh; badmin.org hclose %s'
# % self.localhost)
return [0, 'Status changed in %s' % NEW_STATUS]
else:
self.updateLog('The current status: %s '
% PREVIOUS_STATUS +
'cannot be changed to the new status: %s'
% NEW_STATUS)
return [0, 'host already in this status']
elif NEW_STATUS == 'CLOSED_ADMIN':
if (PREVIOUS_STATUS == 'OPEN_ADMIN'
or PREVIOUS_STATUS == 'OPEN'
or PREVIOUS_STATUS == 'START'):
self._changeBaitStatus(NEW_STATUS, COMMENT)
# badmin = self.batchCmd.badmin('hclose',
# '%s.%s'
# % (self.localhost,
# self.localdomain))
valid_host = self.localhost
if self.localdomain not in valid_host:
valid_host += '.' + self.localdomain
badmin = self.batchCmd.badmin('hclose',
'%s'
% valid_host)
# commands.getstatusoutput(\
# 'source /etc/profile.d/lsf.sh; badmin.org hclose %s'
# % self.localhost)
return [0, 'Status changed in %s' % NEW_STATUS]
elif PREVIOUS_STATUS == 'CLOSED_FULL':
self._changeBaitStatus(NEW_STATUS, COMMENT)
return [0, 'Status changed in %s' % NEW_STATUS]
else:
self.updateLog('The current status: %s '
% PREVIOUS_STATUS +
'cannot be changed to the new status: %s'
% NEW_STATUS)
return [0, 'host already in this status']
elif NEW_STATUS == 'OPEN':
if PREVIOUS_STATUS == 'START' or PREVIOUS_STATUS == 'CLOSED_FULL':
self._changeBaitStatus(NEW_STATUS, COMMENT)
# badmin = self.batchCmd.badmin(\
# 'hopen', '%s.%s'
# % (self.localhost, self.localdomain))
valid_host = self.localhost
if self.localdomain not in valid_host:
valid_host += '.' + self.localdomain
badmin = self.batchCmd.badmin('hopen', '%s' % valid_host)
# commands.getstatusoutput(\
# 'source /etc/profile.d/lsf.sh; badmin.org hopen %s'
# % self.localhost)
return [0, 'Status changed in %s' % NEW_STATUS]
elif PREVIOUS_STATUS == 'OPEN_ADMIN':
self._changeBaitStatus(NEW_STATUS, COMMENT)
return [0, 'Status changed in %s' % NEW_STATUS]
else:
self.updateLog('The current status: %s '
% PREVIOUS_STATUS +
'cannot be changed to the new status: %s'
% NEW_STATUS)
return [0, 'host already in this status']
elif NEW_STATUS == 'OPEN_ADMIN':
if PREVIOUS_STATUS == 'START' or PREVIOUS_STATUS == 'CLOSED_ADMIN':
self._changeBaitStatus(NEW_STATUS, COMMENT)
# badmin = self.batchCmd.badmin(
# 'hopen',
# '%s.%s'
# % (self.localhost, self.localdomain))
valid_host = self.localhost
if self.localdomain not in valid_host:
valid_host += '.' + self.localdomain
badmin = self.batchCmd.badmin('hopen', '%s' % valid_host)
# commands.getstatusoutput(
# 'source /etc/profile.d/lsf.sh; ' +
# 'badmin.org hopen %s'
# % self.localhost)
return [0, 'Status changed in %s' % NEW_STATUS]
else:
self.updateLog('The current status: %s '
% PREVIOUS_STATUS +
'cannot be changed to the new status: %s'
% NEW_STATUS)
return [0, 'host already in this status']
else:
return [0, 'Nothing to do']
@synchronized(LOCK_VM)
def updateVMStatus(self, *arguments):
"""
Update in a synchronized way self.virtualMachines local data structure
"""
self.updateLog(' Function: %s . Variables: %s' % (utils.whoami(),
str(arguments)))
UPDATED_VirtualMachines = arguments[0]
self.updateLog('3 %s' % str(self.virtualMachines.keys()))
if type(UPDATED_VirtualMachines) == type({}):
self.virtualMachines = UPDATED_VirtualMachines
self.updateLog('3 %s' % str(self.virtualMachines.keys()))
def updateConfig(self, *arguments):
"""
Update one or more key of the config dictionary.
params[1] MUST be a dictionary
"""
configParameters = arguments[0]
try:
if len(configParameters.keys()) > 0:
self.updateLog('New configuration parameters available')
for param in configParameters.keys():
try:
self.updateLog('NEW Param: %s; OLD Param: %s'
% (configParameters[param],
self.config[param]))
self.config[param] = configParameters[param]
except:
self.updateLog('NEW Param: %s is not supported'
% (configParameters[param]))
return 0
else:
self.updateLog('Received dictionary is EMPTY')
return 1
except AttributeError:
self.updateLog('Received string is non formatted as expected. ' +
'It is not a Dictionary. "%s"'
% str(configParameters))
return 1
class GuardianAngel(threading.Thread):
""" A thread will be instantiated for each VM.
This tread handles all those status
which are not managed from a WNoDeS component.
"""
def __init__(self, bait_instance, batchJobId):
threading.Thread.__init__(self)
self.batchJobId = batchJobId
self.wb = bait_instance
def run(self):
self.wb.updateLog('Thread started to handle status change for job %s'
% self.batchJobId)
LAST_JOB_STATUS = ''
while True:
if self.batchJobId in self.wb.batchJobs:
VM_ID = self.wb.batchJobs[self.batchJobId][1]
if VM_ID in self.wb.virtualMachines:
vmParameters = self.wb.virtualMachines[VM_ID][3]
else:
vmParameters = self.wb.batchJobs[self.batchJobId][3]
bjobsOutput = self.wb.batchCmd.bjobs(jobid=self.batchJobId)
batchJobStatus = bjobsOutput[1].split()[2]
if batchJobStatus == LAST_JOB_STATUS:
time.sleep(15)
else:
LAST_JOB_STATUS = batchJobStatus
self.wb.updateLog('Job status: %s ' % str(batchJobStatus),
'info',
self.batchJobId)
if batchJobStatus in ['DONE', 'EXIT', 'UNKWN', 'ZOMBIE']:
self.wb.updateLog('Job already finished %s'
% str(self.batchJobId),
'info',
self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'FINISHED',
self.wb.batchJobs[self.batchJobId][4])
else:
pass
if self.wb.batchJobs[self.batchJobId][0] == 'READYtoMODIFY':
time.sleep(int(
self.wb.config['SCHEDULING_INTERVAL']) / 5)
if self.wb.config['BATCH_SYSTEM_TYPE'].upper() == 'LSF':
# bmodoutput = self.wb.batchCmd.bmod(
# '-U %s %s'
# % (vmParameters['RESERVATION_ID'],
# self.batchJobId))
bmodoutput = self.wb.batchCmd.bmod(
'-U %s'
% vmParameters['RESERVATION_ID'],
self.batchJobId)
elif self.wb.config['BATCH_SYSTEM_TYPE'].upper() == 'PBS':
while True:
bjobsOutput = self.wb.batchCmd.bjobs(
jobid=self.batchJobId)
batchJobStatus = bjobsOutput[1].split()[2]
if batchJobStatus == 'PEND':
# bmodoutput = self.wb.batchCmd.breserve('add',
# self.batchJobId, vmParameters['HOSTNAME'],
# vmParameters['RESERVATION_ID'])
bmodoutput = self.wb.batchCmd.breserve('add',
self.batchJobId, vmParameters['HOSTNAME'])
break
else:
time.sleep(5)
self.wb.updateLog('%s' % str(bmodoutput),
'info', self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'READYforSETUP',
self.wb.batchJobs[self.batchJobId][4])
elif self.wb.batchJobs[self.batchJobId][0] == 'READYforSETUP':
self.wb.updateLog('Host %s is ready for setup'
% vmParameters['HOSTNAME'],
'info',
self.batchJobId)
"""
CHANGE STATUS FROM READYforSETUP
to SETUPDONE or HOSTunREACH or JOBREQUEUE
"""
while True:
PING_HOST = commands.getstatusoutput(
'ping -c2 %s' % vmParameters['HOSTNAME'])
SSH_HOST = self.wb.checkSSH(vmParameters['HOSTNAME'])
if PING_HOST[0] == 0 and SSH_HOST == 0:
self.wb.updateLog('Host %s is UP'
% vmParameters['HOSTNAME'],
'info',
self.batchJobId)
# Does the VM require a specific setup ?
VM_SETUP = False
for VM_PARAM_NAME in vmParameters.keys():
if 'VM_CONFIG_' in VM_PARAM_NAME:
VM_SETUP = True
break
else:
VM_SETUP = False
if VM_SETUP:
self.wb.updateLog(
'There is at least one VM setup action. ' +
'Send a request to HV ' +
'in order to setup the VM proprerly',
'info',
self.batchJobId)
msg = {'vmSetup':
[self.wb.batchJobs
[self.batchJobId][1], '']}
output = self.wb.sendRequest(self.wb.HV_HOST,
self.wb.HV_PORT,
msg)
if output[0] == 0:
output = output[1]
if output[0] == 0:
self.wb.updateLog('Virtual resource ' +
'setup successfully completed. %s'
% str(output[1]),
'info',
self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'SETUPDONE',
self.wb.batchJobs
[self.batchJobId][4])
else:
self.wb.updateLog('Remote method ' +
'error: %s' % output[1],
'error',
self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'JOBREQUEUE',
self.wb.batchJobs
[self.batchJobId][4])
else:
self.wb.updateLog('Communication error: %s'
% output[1],
'error',
self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'JOBREQUEUE',
self.wb.batchJobs[self.batchJobId][4])
break
else:
self.wb.updateLog('There is no setup action' +
' for this VM. Proceed with the next step',
'info',
self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'SETUPDONE',
self.wb.batchJobs[self.batchJobId][4])
break
else:
self.wb.updateLog('Host %s is not still UP'
% vmParameters['HOSTNAME'],
'info',
self.batchJobId)
try:
TIMEOUT = int(vmParameters['UNREACH_TIMEOUT'])
except KeyError:
TIMEOUT = int(
self.wb.config['VM_UNREACH_TIMEOUT'])
if ((time.time() -
self.wb.batchJobs[self.batchJobId][4])
> TIMEOUT):
self.wb.updateLog('Host %s is not still UP. '
% vmParameters['HOSTNAME']
+ 'TIMEOUT',
'info',
self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'HOSTunREACH',
self.wb.batchJobs[self.batchJobId][4])
break
else:
time.sleep(1)
elif self.wb.batchJobs[self.batchJobId][0] == 'SETUPDONE':
self.wb.updateLog('Virtual resource is now available',
'info',
self.batchJobId)
if vmParameters['TYPE'] == 'CLOUD':
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'HOST_AVAILABLE',
self.wb.batchJobs[self.batchJobId][4])
if vmParameters['TYPE'] == 'BATCH':
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'READYtoRUN',
self.wb.batchJobs[self.batchJobId][4])
# self.wb.batchCmd.badmin('hopen', '%s.%s' % (
# self.wb.batchJobs[self.batchJobId][2],
# self.wb.localdomain))
valid_host = self.wb.batchJobs[self.batchJobId][2]
if self.wb.localdomain not in valid_host:
valid_host += '.' + self.wb.localdomain
self.wb.batchCmd.badmin('hopen', '%s' % valid_host)
elif self.wb.batchJobs[self.batchJobId][0] == 'HOSTunREACH':
self.wb.updateLog('Virtual resource is unreachable',
'info', self.batchJobId)
self.wb.updateLog('%s' % str(vmParameters),
'info', self.batchJobId)
if not (self.wb.batchJobs
[self.batchJobId][11]['HOSTunREACH']
> int(self.wb.config['STATUS_RETRY_COUNT'])):
msg = {'do_regenerate': [vmParameters['ID'],
vmParameters,
[],
False]}
output = self.wb.sendRequest(self.wb.HV_HOST,
self.wb.HV_PORT, msg)
if output[0] == 0:
output = output[1]
if output[0] == 0:
self.wb.updateLog('Virtual Resource ' +
'successfully regenerated. %s'
% str(output[1]),
'info',
self.batchJobId)
vmId = output[1][0]
self.wb.updateLog(
'2 %s' %
str(self.wb.virtualMachines.keys()))
self.wb.virtualMachines[vmId] = output[1][1]
self.wb.updateLog(
'2 %s'
% str(self.wb.virtualMachines.keys()))
vmParameters = self.wb.virtualMachines[vmId][3]
self.wb.batchJobs[self.batchJobId][1] = \
vmParameters['ID']
self.wb.batchJobs[self.batchJobId][2] = \
vmParameters['HOSTNAME']
self.wb.updateLog('Proceed with the next step',
'info', self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'READYforSETUP',
self.wb.batchJobs[self.batchJobId][4])
else:
self.wb.updateLog('Remote method error: %s'
% output[1], 'error',
self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'JOBREQUEUE',
self.wb.batchJobs[self.batchJobId][4])
else:
self.wb.updateLog('Communication error: %s'
% output[1],
'error',
self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'JOBREQUEUE',
self.wb.batchJobs[self.batchJobId][4])
else:
self.wb.updateLog('The job %s has hit the RETRY LIMIT '
% self.wb.batchJobs[self.batchJobId][2]
+ 'COUNT in the the following state: HOSTunREACH. '
+ 'I will requeue it',
'info', self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'JOBREQUEUE',
self.wb.batchJobs[self.batchJobId][4])
elif self.wb.batchJobs[self.batchJobId][0] == 'JOBREQUEUE':
""" At this point we have to:
1. Release RESOURCE
2. Destroy the VM and Release the hostname
3. Remove the job
"""
self.wb.accessResourceAvailable('PUT', vmParameters)
msg = {'do_destroy': [[vmParameters['ID']], True]}
output = self.wb.sendRequest(self.wb.HV_HOST,
self.wb.HV_PORT,
msg)
self.wb.updateLog('1 %s'
% str(self.wb.virtualMachines.keys()))
if output[1][0] == 0:
self.wb.virtualMachines.pop(vmParameters['ID'])
self.wb.updateLog('1 %s'
% str(self.wb.virtualMachines.keys()))
if vmParameters['TYPE'] == 'BATCH':
# bmod = self.wb.batchCmd.bmod('-Un %s'
# % self.batchJobId)
bmod = self.wb.batchCmd.bmod('-Un', self.batchJobId)
self.wb.updateLog('%s'
% str(bmod),
'info', self.batchJobId)
elif vmParameters['TYPE'] == 'CLOUD':
bkill = self.wb.batchCmd.bkill(' %s' % self.batchJobId)
# bmod = commands.getstatusoutput\
# ('source /etc/profile.d/lsf.sh; bmod -Un %s'
# % self.batchJobId)
if self.wb.config['BATCH_SYSTEM_TYPE'].upper() == 'LSF':
rm_reservation = self.wb.batchCmd.breserve(
'del', '%s'
% vmParameters['RESERVATION_ID'])
elif self.wb.config['BATCH_SYSTEM_TYPE'].upper() == 'PBS':
rm_reservation = self.wb.batchCmd.breserve(
'del',
self.batchJobId)
# rm_reservation = commands.getstatusoutput(
# 'source /etc/profile.d/lsf.sh; \
# brsvdel %s'
# % vmParameters['RESERVATION_ID'])
self.wb.batchJobs.pop(self.batchJobId)
# commands.getstatusoutput('source /etc/profile.d/lsf.sh; '
# + 'brequeue %s'
# % self.batchJobId)
# commands.getstatusoutput('source /etc/profile.d/lsf.sh; '
# + 'btop %s'
# % self.batchJobId)
self.wb.updateLog('%s' % str(rm_reservation),
'info', self.batchJobId)
elif self.wb.batchJobs[self.batchJobId][0] == 'READYtoRUN':
if self.wb.batchJobs[self.batchJobId][9] == 2:
if (time.time() - self.wb.batchJobs[self.batchJobId][4]
> int(self.wb.config['VM_UNREACH_TIMEOUT'])):
if not int(
self.wb.batchJobs
[self.batchJobId][11]['READYtoRUN']) >\
int(self.wb.config['STATUS_RETRY_COUNT']):
self.wb.updateLog('Preexec script for job %s '
% self.wb.batchJobs[self.batchJobId][2]
+ 'has not been yet executed. '
+ 'Force job execution',
'info',
self.batchJobId)
# brunOutput = self.wb.batchCmd.brun(
# self.wb.batchJobs[self.batchJobId][2],
# self.wb.localdomain, self.batchJobId)
valid_host = \
self.wb.batchJobs[self.batchJobId][2]
if self.wb.localdomain not in valid_host:
valid_host += '.' + self.wb.localdomain
self.wb.updateLog('valid host %s'
% valid_host,
'info',
self.batchJobId)
brunOutput = self.wb.batchCmd.brun(
valid_host,
self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'READYtoRUN',
self.wb.batchJobs[self.batchJobId][4])
else:
self.wb.updateLog(
'The job %s has hit '
% self.wb.batchJobs[self.batchJobId][2]
+ 'the RETRY LIMIT COUNT in the the '
+ 'following state:'
+ 'PENDING FORCE JOB EXECUTION',
'info',
self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'JOBREQUEUE',
self.wb.batchJobs[self.batchJobId][4])
else:
time.sleep(1)
elif self.wb.batchJobs[self.batchJobId][9] == 1:
if int(self.wb.batchJobs
[self.batchJobId][11]['READYtoRUN']) >\
int(self.wb.config['STATUS_RETRY_COUNT']):
self.wb.updateLog(
'The job %s has hit the RETRY LIMIT COUNT '
% self.wb.batchJobs[self.batchJobId][2]
+ 'in the the following state:'
+ 'PENDING PREEXEC ECEUTION FAILED',
'info',
self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'JOBREQUEUE',
self.wb.batchJobs[self.batchJobId][4])
else:
pass
elif self.wb.batchJobs[self.batchJobId][9] == 0:
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'RUN',
self.wb.batchJobs[self.batchJobId][4])
elif self.wb.batchJobs[self.batchJobId][0] == 'RUN':
self.wb.updateLog('Let me try whether the host %s '
% self.wb.batchJobs[self.batchJobId][2]
+ 'is still reacheable or not',
'info', self.batchJobId)
PING_HOST = commands.getstatusoutput('ping -c2 %s'
% self.wb.batchJobs[self.batchJobId][2])
SSH_HOST = self.wb.checkSSH(
self.wb.batchJobs[self.batchJobId][2])
if PING_HOST[0] == 0 and SSH_HOST == 0:
self.wb.updateLog('Host %s is really reachable'
% self.wb.batchJobs[self.batchJobId][2],
'info', self.batchJobId)
bjobsOutput = self.wb.batchCmd.bjobs(
jobid=self.batchJobId)
# bjobsOutput = commands.getstatusoutput(
# 'source /etc/profile.d/lsf.sh; ' +
# 'bjobs -w %s | grep %s'
# % (self.batchJobId,
# self.batchJobId))
batchJobStatus = bjobsOutput[1].split()[2]
if batchJobStatus == 'RUN':
self.wb.updateLog('Batch job %s is really RUNNING'
% self.batchJobId, 'info',
self.batchJobId)
time.sleep(60)
elif batchJobStatus == 'PEND':
self.wb.updateLog('Batch job %s is still PENDING'
% self.batchJobId, 'info',
self.batchJobId)
if (self.wb.config['BATCH_SYSTEM_TYPE'].upper()
== 'PBS'):
time.sleep(60)
self.wb.updateLog('Using PBS as batch system '
+ 'we are experiecing a strange behaviour.'
+ ' Pre exec report has been sent '
+ 'but the job is not really running, '
+ 'thus I have to force it again ',
'info', self.batchJobId)
# brunOutput = self.wb.batchCmd.brun(
# self.wb.batchJobs[self.batchJobId][2],
# self.wb.localdomain,
# self.batchJobId)
valid_host = (self.wb.batchJobs
[self.batchJobId][2])
if self.wb.localdomain not in valid_host:
valid_host += '.' + self.wb.localdomain
self.wb.updateLog('valid host %s'
% valid_host,
'info',
self.batchJobId)
brunOutput = self.wb.batchCmd.brun(
valid_host, self.batchJobId)
elif (batchJobStatus == 'DONE'
or batchJobStatus == 'EXIT'):
self.wb.updateLog('Batch job %s is TERMINATED'
% self.batchJobId,
'info',
self.batchJobId)
self.wb.updateStatusAndTimeStamp(
self.batchJobId,
'FINISHED',
self.wb.batchJobs[self.batchJobId][4])
elif (batchJobStatus == 'UNKWN'
or batchJobStatus == 'ZOMBIE'):
self.wb.updateLog('Batch job %s '
% self.batchJobId
+ 'is in a bad STATE %s'
% batchJobStatus,
'info',
self.batchJobId)
else:
self.wb.updateLog('Unknown STATUS %s:'
% batchJobStatus,
'info', self.batchJobId)
pass
else:
self.wb.updateLog('While the job %s '
% self.batchJobId
+ 'is running the VM %s '
% vmParameters['HOSTNAME']
+ 'become UNREACHABLE',
'error',
self.batchJobId)
elif self.wb.batchJobs[self.batchJobId][0] == 'FINISHED':
self.wb.accessResourceAvailable('PUT', vmParameters)
if vmParameters['TYPE'] == 'BATCH':
if (int(self.wb.config['RESERVATION_LENGTH']) \
> self.wb.batchJobs[self.batchJobId][7]):
if (self.wb.config['BATCH_SYSTEM_TYPE'].upper() \
== 'LSF'):
rm_reservation = self.wb.batchCmd.breserve(
'del', '%s'
% vmParameters['RESERVATION_ID'])
elif (self.wb.config['BATCH_SYSTEM_TYPE'].upper() \
== 'PBS'):
rm_reservation = self.wb.batchCmd.breserve(
'del', self.batchJobId)
self.wb.updateLog('%s'
% str(rm_reservation),
'info',
self.batchJobId)
try:
self.wb.config['ALWAYS_DESTROY']
except:
self.wb.config['ALWAYS_DESTROY'] = 'NO'
if self.wb.config['ALWAYS_DESTROY'].upper() == 'NO':
valid_host = self.wb.batchJobs[self.batchJobId][2]
if self.wb.localdomain not in valid_host:
valid_host += '.' + self.wb.localdomain
self.wb.batchCmd.badmin('hclose', '%s' % valid_host)
self.wb.virtualMachines[vmParameters['ID']][2] = \
['OFF', 'AVAILABLE']
self.wb.batchJobs.pop(self.batchJobId)
msg = {'vmUpdateStatus':
[vmParameters['ID'],
self.wb.virtualMachines[vmParameters['ID']]]}
output = self.wb.sendRequest(
self.wb.HV_HOST,
self.wb.HV_PORT,
msg
)
elif self.wb.config['ALWAYS_DESTROY'].upper() == 'YES':
msg = {'do_destroy': [[vmParameters['ID']], True]}
output = self.wb.sendRequest(
self.wb.HV_HOST,
self.wb.HV_PORT,
msg
)
self.wb.batchJobs.pop(self.batchJobId)
else:
pass
elif vmParameters['TYPE'] == 'CLOUD':
msg = {'do_destroy':
[[vmParameters['ID']], True]}
output = self.wb.sendRequest(self.wb.HV_HOST,
self.wb.HV_PORT, msg)
self.wb.batchJobs.pop(self.batchJobId)
elif vmParameters['TYPE'] == 'BATCH_REAL':
self.wb.batchJobs.pop(self.batchJobId)
else:
time.sleep(2)
else:
self.wb.updateLog('Batch job %s is not managed any more. '
% self.batchJobId
+ 'Thread can be killed',
'info',
self.batchJobId)
break
def main():
"""
Entry point for the WNoDeS process running on the bait.
"""
# parse options from command line
p = optparse.OptionParser()
p.add_option('-d', '--daemonize', action='store_true', dest='daemon',
help='daemonize process')
p.add_option('-c', '--configFile',
dest='configFile',
default=os.path.join(__dir_name__,
'/etc/wnodes/bait',
'wnodes.ini'),
help='set configuration file [default: %default]',
metavar='FILE')
(options, args) = p.parse_args() # @UnusedVariable
# check the presence of a previous wnodes_bait process pid file
pid = "/var/run/%s.pid" % __short_name__
if os.path.isfile(pid):
try:
stale_pid = int(open(pid).readline().strip("\n"))
except ValueError:
sys.exit("%s: cannot read pid file."
% __short_name__)
try:
os.kill(stale_pid, 0)
sys.exit("%s: wnodes_bait already running: Killed"
% __short_name__)
except OSError:
sys.stderr.write("%s: stale pid found. Cannot be killed\n"
% __short_name__)
# se non si puo' killare il precedenre wnod_bait
# non si dovrebbe uscire?
# sys.exit("%s: stale pid found. Cannot be killed\n"
# % __short_name__)
# load Nameserver host & port
# reading the configuration file
try:
if os.path.isfile(options.configFile):
conf = ConfigParser.RawConfigParser()
try:
conf.read(options.configFile)
except ConfigParser.MissingSectionHeaderError:
sys.stdout.write('There is no SECTION header [NameServer] ' +
'in configuration file)\n')
sys.exit(1)
except:
sys.stdout.write('Error reading configuration: %s\n'
% str(sys.exc_info()[0]))
sys.exit(1)
try:
NS_HOST = conf.get('NAMESERVER', 'NS_HOST')
NS_PORT = conf.get('NAMESERVER', 'NS_PORT')
except ConfigParser.NoSectionError:
sys.stdout.write('There is no SECTION ' +
'for WNoDeS Name Server host\n')
sys.exit(1)
except:
sys.stdout.write('Error reading configuration: %sn\n'
% str(sys.exc_info()[0]))
sys.exit(1)
else:
sys.stdout.write('Configuration file location is not present: %s\n'
% options.configFile)
sys.exit(1)
except IOError:
sys.stdout.write('IOEroor')
sys.exit(1)
# initialize wb object from class WnodeBait
# (inherited from wsocket.ClientRequestHandler)
wb = WnodBait(NS_HOST, NS_PORT)
if options.daemon is True:
# daemonize the current process
utils.daemonize(stdout="/tmp/%s.stdout"
% __short_name__,
stderr="/tmp/%s.stderr"
% __short_name__)
# write the process pid inside the pid.file
of = open(pid, "w")
of.write("%i\n" % os.getpid())
of.close()
# ask to the the HV the available resouces
msg = {'getResourceAvailable': [None]}
RESOURCES = wb.sendRequest(wb.HV_HOST, wb.HV_PORT, msg)
while True:
if RESOURCES[0] == 0:
RESOURCES = RESOURCES[1]
# get slot number available on this host
if wb.config['BATCH_SYSTEM_TYPE'].lower() == 'lsf':
try:
bhost = wb.batchCmd.bhost(wb.localhost)[1].split()
slots = bhost[3]
slots = int(slots)
except:
slots = 0
RESOURCES[1]['CPU'] = slots
elif (wb.config['BATCH_SYSTEM_TYPE'].lower() == 'pbs' or
wb.config['BATCH_SYSTEM_TYPE'].lower() == 'torque'):
try:
from xml.etree import ElementTree
except ImportError:
try:
from elementtree import ElementTree
except ImportError:
slots = 0
try:
bhost = wb.batchCmd.bhost('%s.%s' %
(wb.localhost,
wb.localdomain))[1]
bhost_info = ElementTree.fromstring(bhost)
slots = 0
for i in bhost_info.getiterator():
if i.tag == 'np':
slots = int(i.text)
break
except:
slots = 0
RESOURCES[1]['CPU'] = slots
wb.updateLog(str(RESOURCES))
break
else:
wb.updateLog('Connection error requesting available resources',
'error')
time.sleep(5)
msg = {'getResourceAvailable': [None]}
RESOURCES = wb.sendRequest(wb.HV_HOST, wb.HV_PORT, msg)
msg = {'getVMAvailable': [None]}
VMS = wb.sendRequest(wb.HV_HOST, wb.HV_PORT, msg)
msg = {'getImageAvailable': [None]}
IMAGES = wb.sendRequest(wb.HV_HOST, wb.HV_PORT, msg)
if VMS[0] == 0:
VMS = VMS[1]
else:
wb.updateLog('Connection error requesting available VM', 'error')
VMS = VMS[1]
if IMAGES[0] == 0:
IMAGES = IMAGES[1]
else:
wb.updateLog('Connection error requesting available IMAGES', 'error')
IMAGES = IMAGES[1]
if RESOURCES[0] == 0 or VMS[0] == 0 or IMAGES[0] == 0:
wb.updateLog('Successfully retrived the RESOURCES, VMS and IMAGES' +
' available from HV:%s' % wb.HV_HOST)
wb.accessResourceAvailable('SET', RESOURCES[1])
# Is this the right way ???
wb.virtualMachines = VMS[1]
CURRENT_STATUS = 'DATA AVAILABLE UPDATED'
wb.updateBaitBatchStatus(['OPEN',
'Everything is OK, ' +
'the BAIT process can start'])
else:
wb.updateLog('There was a problem retriving Information from the HV:%s'
% wb.HV_HOST)
CURRENT_STATUS = 'DATA NOT AVAILABLE'
wb.updateBaitBatchStatus(['CLOSE_ADMIN',
'There is a problem with DATA. ' +
'BAIT process cannot start'])
wb.updateLog('###### BAIT Server Starting ... ######')
wb.updateLog('######')
wb.updateLog('###### CURRENT HV: %s' % wb.HV_HOST)
wb.updateLog('###### CURRENT DATA STATUS: %s' % CURRENT_STATUS)
wb.updateLog('###### CURRENT BAIT STATUS: %s : %s'
% (wb.batchBaitStatus[0], wb.batchBaitStatus[1]))
wb.updateLog('######')
wb.updateLog('###### CURRENT VM AVAILABLE OM THE HV')
for ID in wb.virtualMachines.keys():
wb.updateLog(('###### -- %s : %s')
% (str(ID), str(wb.virtualMachines[ID])))
wb.updateLog('######')
wb.updateLog('###### CURRENT RESOURCES ON THE HV')
for res in wb.batchBaitStatus[5].keys():
wb.updateLog(('###### -- %s : %s')
% (str(res), str(wb.batchBaitStatus[5][res])))
wb.updateLog('######')
wb.updateLog('###### %s' % str(wb.batchBaitStatus))
wb.updateLog('######################################')
serviceDispatchTable = {
'destroyVMInstance': wb.destroyVMInstance,
'getConfig': wb.getConfig,
'getStatus': wb.getStatus,
'ping': wb.ping,
'reloadConfig': wb.reloadConfig,
'requestVMInstance': wb.requestVMInstance,
'reportPostExecutionScript': wb.reportPostExecutionScript,
'reportPreExecutionScript': wb.reportPreExecutionScript,
'updateBaitBatchStatus': wb.updateBaitBatchStatus,
'updateConfig': wb.updateConfig,
}
wsocket.initServerRequestHandler(wb, serviceDispatchTable)
BaitServerHandler = wsocket.ServerRequestHandler
srv = SocketServer.ThreadingTCPServer(('',
int(wb.config['BAIT_PORT'])),
BaitServerHandler)
srv.daemon_threads = True
srv.allow_reuse_address = True
srv.serve_forever()
if __name__ == "__main__":
main()
|
__author__ = "Christian Kongsgaard"
__license__ = "MIT"
__version__ = "0.0.1"
# -------------------------------------------------------------------------------------------------------------------- #
# Imports
# Module imports
import cmf
from datetime import datetime
from datetime import timedelta
import numpy as np
import xml.etree.ElementTree as ET
import os
import xmltodict
import pymesh as pm
# Livestock imports
# -------------------------------------------------------------------------------------------------------------------- #
# CMF Functions and Classes
class CMFModel:
def __init__(self, folder):
self.folder = folder
self.mesh_path = None
self.weather_dict = {}
self.trees_dict = {}
self.ground_dict = {}
self.boundary_dict = {}
self.solver_settings = None
self.outputs = None
self.solved = False
self.results = {}
def load_cmf_files(self, delete_after_load=True):
def load_weather(folder, delete):
# look for file
if os.path.isfile(folder + '/weather.xml'):
weather_path = folder + '/weather.xml'
else:
raise FileNotFoundError('Cannot find weather.xml in folder: ' + str(folder))
# Parse file
weather_tree = ET.tostring(ET.parse(weather_path).getroot())
weather = xmltodict.parse(weather_tree)
weather_dict = {}
# convert to correct format
for w_key in weather['weather'].keys():
lst0 = eval(weather['weather'][w_key])
if isinstance(lst0, dict):
lst1 = {}
for dict_key in lst0.keys():
lst1[dict_key] = [float(i) for i in lst0[dict_key]]
else:
lst1 = lst0
weather_dict[w_key] = lst1
# delete file
if delete:
os.remove(weather_path)
return weather_dict
def load_tree(folder, delete):
# look for file
if os.path.isfile(folder + '/trees.xml'):
tree_path = folder + '/trees.xml'
else:
tree_path = None
if not tree_path:
return None
else:
# Parse file
tree_tree = ET.tostring(ET.parse(tree_path).getroot())
trees = xmltodict.parse(tree_tree)
tree_dict = {}
# convert to correct format
for tree_key in trees['tree'].keys():
tree_dict[str(tree_key)] = {}
for t in trees['tree'][str(tree_key)].keys():
tree_dict[str(tree_key)][str(t)] = eval(trees['tree'][str(tree_key)][str(t)])
# delete file
if delete:
os.remove(tree_path)
return tree_dict
def load_ground(folder, delete):
# look for file
if os.path.isfile(folder + '/ground.xml'):
ground_path = folder + '/ground.xml'
else:
raise FileNotFoundError('Cannot find ground.xml in folder: ' + str(folder))
# Parse file
ground_tree = ET.tostring(ET.parse(ground_path).getroot())
grounds = xmltodict.parse(ground_tree)
ground_dict = {}
# convert to correct format
for ground in grounds['ground'].keys():
ground_dict[str(ground)] = {}
for g in grounds['ground'][ground]:
try:
ground_dict[str(ground)][str(g)] = eval(grounds['ground'][ground][g])
except NameError:
ground_dict[str(ground)][str(g)] = grounds['ground'][ground][g]
# delete file
if delete:
os.remove(ground_path)
return ground_dict
def load_mesh(folder):
# look for file
if os.path.isfile(folder + '/mesh.obj'):
mesh_path = folder + '/mesh.obj'
else:
raise FileNotFoundError('Cannot find mesh.obj in folder: ' + str(folder))
return mesh_path
def load_outputs(folder, delete):
# look for file
if os.path.isfile(folder + '/outputs.xml'):
output_path = folder + '/outputs.xml'
else:
raise FileNotFoundError('Cannot find outputs.xml in folder: ' + str(folder))
# Parse file
output_tree = ET.tostring(ET.parse(output_path).getroot())
outputs = xmltodict.parse(output_tree)
output_dict = {}
# convert to correct format
for out in outputs['output'].keys():
output_dict[str(out)] = eval(outputs['output'][out])
# delete file
if delete:
os.remove(output_path)
return output_dict
def load_solver_info(folder, delete):
# look for file
if os.path.isfile(folder + '/solver.xml'):
solver_path = folder + '/solver.xml'
else:
raise FileNotFoundError('Cannot find solver.xml in folder: ' + str(folder))
# Parse file
solver_tree = ET.tostring(ET.parse(solver_path).getroot())
solver = xmltodict.parse(solver_tree)
solver_dict = {}
# convert to correct format
for setting in solver['solver']:
solver_dict[setting] = eval(solver['solver'][setting])
# delete file
if delete:
os.remove(solver_path)
return solver_dict
def load_boundary(folder, delete):
# look for file
if os.path.isfile(folder + '/boundary_condition.xml'):
boundary_path = folder + '/boundary_condition.xml'
else:
boundary_path = None
if not boundary_path:
return None
else:
# Parse file
boundary_tree = ET.tostring(ET.parse(boundary_path).getroot())
boundaries = xmltodict.parse(boundary_tree)
boundary_dict = {}
# convert to correct format
for bc_key in boundaries['boundary_conditions'].keys():
boundary_dict[str(bc_key)] = {}
for bc in boundaries['boundary_conditions'][bc_key]:
if bc == 'flux':
fluxes = list(float(flux)
for flux in boundaries['boundary_conditions'][bc_key][bc].split(','))
boundary_dict[bc_key][bc] = fluxes
else:
boundary_dict[bc_key][bc] = boundaries['boundary_conditions'][bc_key][bc]
# delete file
if delete:
os.remove(boundary_path)
#print('load', boundary_dict)
return boundary_dict
# Load files and assign data to variables
self.weather_dict = load_weather(self.folder, delete_after_load)
self.trees_dict = load_tree(self.folder, delete_after_load)
self.ground_dict = load_ground(self.folder, delete_after_load)
self.mesh_path = load_mesh(self.folder)
self.outputs = load_outputs(self.folder, delete_after_load)
self.solver_settings = load_solver_info(self.folder, delete_after_load)
self.boundary_dict = load_boundary(self.folder, delete_after_load)
return True
def mesh_to_cells(self, cmf_project, mesh_path, delete_after_load=True):
"""
Takes a mesh and converts it into CMF cells
:param mesh_path: Path to mesh file
:param cmf_project: CMF project object
:return: True
"""
# Load mesh
mesh = pm.load_mesh(mesh_path)
mesh.enable_connectivity()
# Initialize mesh data
mesh.add_attribute('face_centroid')
mesh.add_attribute('face_index')
mesh.add_attribute('face_area')
cen_pts = mesh.get_attribute('face_centroid')
face_index = mesh.get_attribute('face_index')
face_area = mesh.get_attribute('face_area')
faces = mesh.faces
vertices = mesh.vertices
# Helper functions
def face_vertices(face_index):
"""
Returns the vertices of a face
:param face_index: Face index (int)
:return: v0, v1, v2
"""
face = faces[int(face_index)]
v0 = vertices[face[0]]
v1 = vertices[face[1]]
v2 = vertices[face[2]]
return v0, v1, v2
def face_face_edge(face0, face1):
"""
Returns the width of the edge between to faces
:param face0: Face index
:param face1: Face index
:return: float value with the edge with
"""
# Get vertices
v = []
v0 = face_vertices(int(face0))
v1 = face_vertices(int(face1))
# Find out which edge is shared
for vertex in v0:
equal = np.equal(vertex, v1)
if np.sum(equal) > 0:
v.append(vertex)
else:
pass
# Compute the width of the edge
dx = abs(v[0][0] - v[1][0])
dy = abs(v[0][1] - v[1][1])
dz = abs(v[0][2] - v[1][2])
edge_width = np.sqrt(dx**2 + dy**2 + dz**2)
return edge_width
# Construct centroid list
centroids = []
i = 0
while i < len(cen_pts):
for j in range(0, len(face_index)):
centroids.append([face_index[j], np.array([cen_pts[i], cen_pts[i+1], cen_pts[i+2]])])
i += 3
# Create cells
for i in range(0, len(centroids)):
x, y, z = centroids[i][1]
a = float(face_area[i])
cmf_project.NewCell(x=float(x), y=float(y), z=float(z), area=a, with_surfacewater=True)
# Connect cells
for face in face_index:
adjacent_faces = mesh.get_face_adjacent_faces(int(face))
for adj in adjacent_faces:
width = face_face_edge(face, adj)
if width:
cmf_project[face].topology.AddNeighbor(cmf_project[adj], width)
else:
pass
if delete_after_load:
os.remove(mesh_path)
return True
def add_tree(self, cmf_project, cell_index, property_dict):
"""Adds a tree to the model"""
cell = cmf_project.cells[int(cell_index)]
self.set_vegetation_properties(cell, property_dict)
name = 'canopy_'+str(cell_index)
cell.add_storage(name, 'C')
cmf.Rainfall(cell.canopy, cell, False, True)
cmf.Rainfall(cell.surfacewater, cell, True, False)
cmf.RutterInterception(cell.canopy, cell.surfacewater, cell)
cmf.CanopyStorageEvaporation(cell.canopy, cell.evaporation, cell)
return True
def set_vegetation_properties(self, cell_: cmf.Cell, property_dict: dict):
cell_.vegetation.Height = float(property_dict['height'])
cell_.vegetation.LAI = float(property_dict['lai'])
cell_.vegetation.albedo = float(property_dict['albedo'])
cell_.vegetation.CanopyClosure = float(property_dict['canopy_closure'])
cell_.vegetation.CanopyParExtinction = float(property_dict['canopy_par'])
cell_.vegetation.CanopyCapacityPerLAI = float(property_dict['canopy_capacity'])
cell_.vegetation.StomatalResistance = float(property_dict['stomatal_res'])
cell_.vegetation.RootDepth = float(property_dict['root_depth'])
cell_.vegetation.fraction_at_rootdepth = float(property_dict['root_fraction'])
cell_.vegetation.LeafWidth = float(property_dict['leaf_width'])
return True
def configure_cells(self, cmf_project: cmf.project, cell_properties_dict: dict):
"""Configure the cells"""
# Helper functions
def install_connections(cell_, evapotranspiration_method):
# Install connections
cell_.install_connection(cmf.Richards)
cell_.install_connection(cmf.GreenAmptInfiltration)
if evapotranspiration_method == 'penman_monteith':
# Install Penman & Monteith method to calculate evapotranspiration_potential
cell_.install_connection(cmf.PenmanMonteithET)
elif evapotranspiration_method == 'shuttleworth_wallace':
# Install Shuttleworth-Wallace method to calculate evapotranspiration
cell_.install_connection(cmf.ShuttleworthWallace)
return True
def retention_curve(r_curve_: dict):
"""
Converts a dict of retention curve parameters into a CMF van Genuchten-Mualem retention curve.
:param r_curve_: dict
:return: CMF retention curve
"""
curve = cmf.VanGenuchtenMualem(r_curve_['K_sat'], r_curve_['phi'], r_curve_['alpha'], r_curve_['n'],
r_curve_['m'])
curve.l = r_curve_['l']
return curve
# Convert retention curve parameters into CMF retention curve
r_curve = retention_curve(cell_properties_dict['retention_curve'])
for cell_index in cell_properties_dict['face_indices']:
cell = cmf_project.cells[int(float(cell_index))]
# Add layers
for i in range(0, len(cell_properties_dict['layers'])):
cell.add_layer(float(cell_properties_dict['layers'][i]), r_curve)
install_connections(cell, cell_properties_dict['et_method'])
self.set_vegetation_properties(cell, cell_properties_dict['vegetation_properties'])
if cell_properties_dict['manning']:
cell.surfacewater.set_nManning(float(cell_properties_dict['manning']))
if cell_properties_dict['puddle_depth']:
cell.surfacewater.puddledepth = cell_properties_dict['puddle_depth']
# Set initial saturation
cell.saturated_depth = cell_properties_dict['saturated_depth']
# Connect fluxes
cmf.connect_cells_with_flux(cmf_project, cmf.Darcy)
cmf.connect_cells_with_flux(cmf_project, cmf.KinematicSurfaceRunoff)
return True
def create_stream(self, shape, shape_param, outlet):
"""Create a stream"""
# ShapeParam(Tri) = [length, bankSlope, x, y, z, intialWaterDepth]
# ShapeParam(Rec) = [length, width, x, y, z, intialWaterDepth]
reaches = []
# Create stream
if shape == 0:
for i in range(len(shape_param)):
reach_shape = cmf.TriangularReach(shape_param[i][0],shape_param[i][1])
reaches.append([self.project.NewReach(shape_param[i][2], shape_param[i][3], shape_param[i][4], reach_shape, False)])
reaches[-1].depth(shape_param[5])
# Connect reaches
if not reaches:
pass
elif len(reaches) == len(shape_param):
channel_out = self.project.NewOutlet(outlet[0], outlet[1], outlet[2])
reaches[-1].set_downstream(channel_out)
else:
reaches[-2].set_downstream(reaches[-1])
elif shape == 1:
for i in range(len(shape_param)):
reach_shape = cmf.RectangularReach(shape_param[i][0],shape_param[i][1])
reaches.append([self.project.NewReach(shape_param[i][2], shape_param[i][3], shape_param[i][4], reach_shape, False)])
reaches[-1].depth(shape_param[5])
# Connect reaches
if not reaches:
pass
elif len(reaches) == len(shape_param):
channel_out = self.project.NewOutlet(outlet[0], outlet[1], outlet[2])
reaches[-1].set_downstream(channel_out)
else:
reaches[-2].set_downstream(reaches[-1])
else:
return None
def create_weather(self, cmf_project):
"""Creates weather for the project"""
# Helper functions
def create_time_series(analysis_length, time_step=1.0):
# Start date is the 1st of January 2017 at 00:00
start = cmf.Time(1, 1, 2017, 0, 0)
step = cmf.h * time_step
# Create time series
return cmf.timeseries(begin=start, step=step, count=analysis_length)
def weather_to_time_series(weather):
# Create time series
t_series = create_time_series(self.solver_settings['analysis_length'])
w_series = create_time_series(self.solver_settings['analysis_length'])
rh_series = create_time_series(self.solver_settings['analysis_length'])
sun_series = create_time_series(self.solver_settings['analysis_length'])
rad_series = create_time_series(self.solver_settings['analysis_length'])
rain_series = create_time_series(self.solver_settings['analysis_length'])
ground_temp_series = create_time_series(self.solver_settings['analysis_length'])
# add data
for i in range(len(weather['temp'])):
t_series[i] = (weather['temp'][i])
w_series[i] = (weather['wind'][i])
rh_series[i] = (weather['rel_hum'][i])
sun_series[i] = (weather['sun'][i])
rad_series[i] = (weather['rad'][i])
rain_series[i] = (weather['rain'][i])
ground_temp_series[i] = (weather['ground_temp'][i])
return {'temp': t_series, 'wind': w_series, 'rel_hum': rh_series, 'sun': sun_series, 'rad': rad_series,
'rain': rain_series, 'ground_temp': ground_temp_series}
def get_weather_for_cell(cell_id, project_weather_dict):
# Initialize
cell_weather_dict_ = {}
location_dict = {}
# Find weather matching cell ID
for weather_type in project_weather_dict.keys():
# Try for weather type having the same weather for all cells
try:
cell_weather_dict_[weather_type] = project_weather_dict[weather_type]['all']
# Accept that some have one for each cell
except KeyError:
cell_weather_dict_[weather_type] = project_weather_dict[weather_type]['cell_' + str(cell_id)]
# Accept latitude, longitude and time zone
except TypeError:
location_dict[weather_type] = project_weather_dict[weather_type]
# Convert to time series
cell_weather_series = weather_to_time_series(cell_weather_dict_)
return cell_weather_series, location_dict
def create_weather_station(cmf_project_, cell_id, weather, location):
# Add cell rainfall station to the project
rain_station = cmf_project_.rainfall_stations.add(Name='cell_' + str(cell_id) + ' rain',
Data=weather['rain'],
Position=(0, 0, 0))
# Add cell meteo station to the project
meteo_station = cmf_project_.meteo_stations.add_station(name='cell_' + str(cell_id) + ' weather',
position=(0, 0, 0),
latitude=location['latitude'],
longitude=location['longitude'],
tz=location['time_zone'])
meteo_station.T = weather['temp']
meteo_station.Tmax = meteo_station.T.reduce_max(meteo_station.T.begin, cmf.day)
meteo_station.Tmin = meteo_station.T.reduce_min(meteo_station.T.begin, cmf.day)
meteo_station.Windspeed = weather['wind']
meteo_station.rHmean = weather['rel_hum']
meteo_station.Sunshine = weather['sun']
meteo_station.Rs = weather['rad']
meteo_station.Tground = weather['ground_temp']
return rain_station, meteo_station
def connect_weather_to_cells(cell_, rain_station, meteo_station):
rain_station.use_for_cell(cell_)
meteo_station.use_for_cell(cell_)
# Run create weather helper functions
for cell_index in range(0, len(cmf_project.cells)):
cell = cmf_project.cells[cell_index]
cell_weather_dict, project_location = get_weather_for_cell(cell_index, self.weather_dict)
cell_rain, cell_meteo = create_weather_station(cmf_project, cell_index, cell_weather_dict, project_location)
connect_weather_to_cells(cell, cell_rain, cell_meteo)
def create_boundary_conditions(self, cmf_project):
# Helper functions
def set_inlet(boundary_condition_, cmf_project_):
# Get the correct cell and layer
if int(boundary_condition_['layer']) == 0:
cell_layer = cmf_project_.cells[int(boundary_condition_['cell'])].surfacewater
else:
cell_layer = cmf_project_.cells[
int(boundary_condition_['cell'])].layers[
int(boundary_condition_['layer'])]
# Create inlet
inlet = cmf.NeumannBoundary.create(cell_layer)
#print('set_inlet', inlet)
# if flux is a list then convert to time series
if len(boundary_condition_['flux']) > 1:
inlet_flux = np.array(list(float(flux)
for flux in boundary_condition_['flux']))
inlet.set_flux(cmf.timeseries.from_array(begin=datetime(2017, 1, 1),
step=timedelta(hours=1),
data=inlet_flux))
else:
inlet.flux = boundary_condition_['flux'][0]
def set_outlet(boundary_condition_, index_, cmf_project_):
x, y, z = boundary_condition_['location'].split(',')
outlet = cmf_project_.NewOutlet('outlet_' + str(index_), float(x), float(y), float(z))
cell = cmf_project_.cells[int(boundary_condition_['cell'])]
if boundary_condition_['layer'] == 'all':
for l in cell.layers:
# create a Darcy connection with 10m flow width between each soil layer and the outlet
cmf.Darcy(l, outlet, FlowWidth=float(boundary_condition_['flow_width']))
cmf.KinematicSurfaceRunoff(cell.surfacewater, outlet, float(boundary_condition_['flow_width']))
elif boundary_condition_['layer'] == 0:
cmf.KinematicSurfaceRunoff(cell.surfacewater, outlet, float(boundary_condition_['flow_width']))
else:
layer = cell.layers[int(boundary_condition_['layer'])]
cmf.Darcy(layer, outlet, FlowWidth=float(boundary_condition_['flow_width']))
def set_boundary_condition(boundary_condition_, bc_index, cmf_project_):
print('set_boundary_condition', boundary_condition_)
if boundary_condition_['type'] == 'inlet':
set_inlet(boundary_condition_, cmf_project_)
elif boundary_condition_['type'] == 'outlet':
set_outlet(boundary_condition_, bc_index, cmf_project_)
else:
raise ValueError('Boundary type should be either inlet or outlet. Given value was: '
+ str(boundary_condition_['type']))
# Loop through the boundary conditions and assign them
for index, boundary_condition in enumerate(self.boundary_dict.keys()):
#print('\nloop')
#print(index)
#print(boundary_condition)
set_boundary_condition(self.boundary_dict[boundary_condition], index, cmf_project)
def config_outputs(self, cmf_project):
"""Function to set up result gathering dictionary"""
out_dict = {}
for cell_index in range(0, len(cmf_project.cells)):
cell_name = 'cell_' + str(cell_index)
out_dict[cell_name] = {}
# Set all cell related outputs
for cell_output in self.outputs['cell']:
out_dict[cell_name][str(cell_output)] = []
for layer_index in range(0, len(cmf_project.cells[cell_index].layers)):
layer_name = 'layer_' + str(layer_index)
out_dict[cell_name][layer_name] = {}
# Set all layer related outputs
for layer_output in self.outputs['layer']:
out_dict[cell_name][layer_name][str(layer_output)] = []
self.results = out_dict
def gather_results(self, cmf_project, time):
for cell_index in range(0, len(cmf_project.cells)):
cell_name = 'cell_' + str(cell_index)
for out_key in self.results[cell_name].keys():
# Collect cell related results
if out_key == 'evaporation':
evap = cmf_project.cells[cell_index].evaporation
flux_at_time = 0
for flux, node in evap.fluxes(time):
flux_at_time += flux
self.results[cell_name][out_key].append(flux_at_time)
# sw = cmf.ShuttleworthWallace(cmf_project.cells[cell_index])
# sw.refresh(time)
# evap_sum = sw.AIR + sw.GER + sw.GIR
# self.results[cell_name][out_key].append(evap_sum)
if out_key == 'transpiration':
transp = cmf_project.cells[cell_index].transpiration
flux_at_time = 0
for flux, node in transp.fluxes(time):
flux_at_time += flux
self.results[cell_name][out_key].append(flux_at_time)
# self.results[cell_name][out_key].append(cmf_project.cells[cell_index].transpiration)
# self.results[cell_name][out_key].append(cmf.ShuttleworthWallace(cmf_project.cells[cell_index]).ATR_sum)
if out_key == 'surface_water_volume':
volume = cmf_project.cells[cell_index].get_surfacewater().volume
self.results[cell_name][out_key].append(volume)
if out_key == 'surface_water_flux':
water = cmf_project.cells[cell_index].get_surfacewater()
flux_and_node = []
for flux, node in water.fluxes(time):
flux_and_node.append((flux, node))
self.results[cell_name][out_key].append(flux_and_node)
if out_key == 'heat_flux':
self.results[cell_name][out_key].append(cmf_project.cells[cell_index].heat_flux(time))
if out_key == 'aerodynamic_resistance':
self.results[cell_name][out_key].append(
cmf_project.cells[cell_index].get_aerodynamic_resistance(time))
for layer_index in range(0, len(cmf_project.cells[cell_index].layers)):
layer_name = 'layer_' + str(layer_index)
for out_key in self.results[cell_name][layer_name].keys():
# Collect layer related results
if out_key == 'potential':
self.results[cell_name][layer_name][out_key].append(
cmf_project.cells[cell_index].layers[layer_index].potential)
if out_key == 'theta':
self.results[cell_name][layer_name][out_key].append(
cmf_project.cells[cell_index].layers[layer_index].theta)
if out_key == 'volumetric_flux':
layer = cmf_project.cells[cell_index].layers[layer_index].get_3d_flux(time)
"""
flux_and_node = []
for flux, node in layer.fluxes(time):
flux_and_node.append((flux, node))
"""
self.results[cell_name][layer_name][out_key].append(layer)
if out_key == 'volume':
self.results[cell_name][layer_name][out_key].append(
cmf_project.cells[cell_index].layers[layer_index].volume)
if out_key == 'wetness':
self.results[cell_name][layer_name][out_key].append(
cmf_project.cells[cell_index].layers[layer_index].wetness)
# else:
# raise ValueError('Unknown result to collect. Result to collect was: ' + str(out_key))
def print_solver_time(self, solver_time, start_time, last_time, step):
if self.solver_settings['verbosity']:
now = datetime.now()
elapsed_time = now - start_time
time_per_step = elapsed_time.total_seconds()/(step+1)
time_left = timedelta(seconds=(time_per_step * (self.solver_settings['analysis_length'] - step)))
# Print statements:
solver_timer_print = 'Solver Time: ' + str(solver_time)
elapsed_time_print = 'Elapsed Time: ' + str(elapsed_time)
current_time_step_print = 'Current Time Step: ' + str(now - last_time)
estimated_time_left_print = 'Estimated Time Left: ' + str(time_left)
print(solver_timer_print, '\t',
elapsed_time_print, '\t',
current_time_step_print, '\t',
estimated_time_left_print)
return now
else:
if step == 0:
print('Simulation started')
elif step == self.solver_settings['analysis_length']:
print('Simulation ended')
def solve(self, cmf_project, tolerance):
"""Solves the model"""
# Create solver, set time and set up results
solver = cmf.CVodeIntegrator(cmf_project, tolerance)
solver.t = cmf.Time(1, 1, 2017)
self.config_outputs(cmf_project)
# Save initial conditions to results
self.gather_results(cmf_project, solver.t)
# Set timer
start_time = datetime.now()
step = 0
last = start_time
# Run solver and save results at each time step
for t in solver.run(solver.t,
solver.t + timedelta(hours=self.solver_settings['analysis_length']),
timedelta(hours=float(self.solver_settings['time_step']))):
self.gather_results(cmf_project, t)
last = self.print_solver_time(t, start_time, last, step)
step += 1
self.solved = True
return True
def save_results(self):
"""Saves the computed results to a xml file"""
if not self.solved:
print('Project not solved!')
return None
else:
result_root = ET.Element('result')
for cell in self.results.keys():
cell_tree = ET.SubElement(result_root, str(cell))
for result_key in self.results[cell].keys():
if result_key.startswith('layer'):
layer_tree = ET.SubElement(cell_tree, str(result_key))
for layer_result_key in self.results[cell][result_key].keys():
data = ET.SubElement(layer_tree, str(layer_result_key))
data.text = str(self.results[cell][result_key][layer_result_key])
else:
data = ET.SubElement(cell_tree, str(result_key))
data.text = str(self.results[cell][result_key])
result_tree = ET.ElementTree(result_root)
result_tree.write(self.folder + '/results.xml', xml_declaration=True)
return True
def run_model(self):
"""Runs the model with everything"""
# Initialize project
project = cmf.project()
self.load_cmf_files()
# Add cells and properties to them
self.mesh_to_cells(project, self.mesh_path)
for key in self.ground_dict.keys():
self.configure_cells(project, self.ground_dict[str(key)])
if self.trees_dict:
for key in self.trees_dict.keys():
self.add_tree(project,
self.trees_dict[str(key)]['face_index'],
self.trees_dict[str(key)]['property'])
# Create the weather
self.create_weather(project)
# Create boundary conditions
if self.boundary_dict:
self.create_boundary_conditions(project)
# Run solver
self.solve(project, self.solver_settings['tolerance'])
# Save the results
self.save_results()
return project
|
from kivy.app import App
from kivy.factory import Factory
from kivy.uix.filechooser import FileChooser
from kivy.utils import *
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.actionbar import *
from kivy.uix.popup import Popup
from kivy.uix.button import ButtonBehavior
from kivy.uix.image import Image
from kivy.uix.textinput import TextInput
from kivy.uix.spinner import Spinner
from kivy.uix.button import Button
from kivy.uix.scatter import Scatter
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
# from core import Steganography
# Window.size=(405,720) # full-hd
# Window.clearcolor = (255,12,34,0.5)
class FileDialog(FloatLayout):
pass
class Root(BoxLayout):
@staticmethod
def hex(s):
return get_color_from_hex(s)
def load(self):
content = FileDialog()
self._popup = Popup(title="Load an Image", content=content, size_hint=(0.9, 0.9))
self._popup.open()
class Header(ActionBar):
pass
class ImageButton(ButtonBehavior, Image):
pass
class LoadImage(FileChooser):
pass
class Main(App):
pass
class Selector(Label):
pass
class Preview(BoxLayout):
pass
class Header(Label):
pass
Factory.register("Root",cls=Root)
Factory.register("Header",cls=Header)
Factory.register("Preview",cls=Preview)
if __name__ == '__main__':
Main().run()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.terraform.goals.tailor import PutativeTerraformTargetsRequest
from pants.backend.terraform.goals.tailor import rules as terraform_tailor_rules
from pants.backend.terraform.target_types import TerraformModuleTarget
from pants.core.goals.tailor import AllOwnedSources, PutativeTarget, PutativeTargets
from pants.core.goals.tailor import rules as core_tailor_rules
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
def test_find_putative_targets() -> None:
rule_runner = RuleRunner(
rules=[
*core_tailor_rules(),
*terraform_tailor_rules(),
QueryRule(PutativeTargets, [PutativeTerraformTargetsRequest, AllOwnedSources]),
QueryRule(AllOwnedSources, ()),
],
target_types=[
TerraformModuleTarget,
],
)
rule_runner.write_files(
{
"prod/terraform/owned-module/BUILD": "terraform_module()",
"prod/terraform/owned-module/versions.tf": "",
"prod/terraform/unowned-module/versions.tf": "",
}
)
pts = rule_runner.request(
PutativeTargets,
[
PutativeTerraformTargetsRequest(
("prod/terraform/owned-module", "prod/terraform/unowned-module")
),
AllOwnedSources(["prod/terraform/owned-module/versions.tf"]),
],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
TerraformModuleTarget,
"prod/terraform/unowned-module",
"unowned-module",
("versions.tf",),
),
]
)
== pts
)
|
# Generated by Django 3.1.5 on 2021-01-30 20:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reporter', '0002_auto_20210130_2017'),
]
operations = [
migrations.AlterField(
model_name='gpxfile',
name='gpx_file',
field=models.FileField(upload_to='gpx_files'),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.