text stringlengths 8 6.05M |
|---|
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy import optimize
def func(t, A, tau, C):
"""Function to fit data."""
return A*np.exp(-tau*t) + C
# Column names
time = 'time'
curr_0 = 'I=0.15'
curr_1 = 'I=0.25'
curr_2 = 'I=0.35'
curr_3 = 'I=0.45'
column_names = [time, curr_0, curr_1, curr_2, curr_3]
data = pd.read_csv("data/decay.csv", header=None,
names=column_names)
current = np.array([0.15, 0.25, 0.35, 0.45])
current_error = np.array([0.01, 0.02, 0.02, 0.03])
time_error = 0.5*np.ones(10)
# Optimized parameters
gamma_0 = 0.0
delta_gamma_0 = 0.0
gamma_1 = 0.0
delta_gamma_1 = 0.0
gamma_2 = 0.0
delta_gamma_2 = 0.0
gamma_3 = 0.0
delta_gamma_3 = 0.0
guess = [14.0, 0.01, 0.01]
param_fit, pcov = optimize.curve_fit(func, data[time], data[curr_0],
p0=guess, sigma=0.1)
perr = np.sqrt(np.diag(pcov))
gamma_0 = 2*param_fit[1]
delta_gamma_0 = perr[1]
y_fit_0 = func(data[time], param_fit[0], param_fit[1], param_fit[2])
param_fit, pcov = optimize.curve_fit(func, data[time], data[curr_1],
p0=guess, sigma=0.1)
perr = np.sqrt(np.diag(pcov))
gamma_1 = 2*param_fit[1]
delta_gamma_1 = perr[1]
y_fit_1 = func(data[time], param_fit[0], param_fit[1], param_fit[2])
param_fit, pcov = optimize.curve_fit(func, data[time], data[curr_2],
p0=guess, sigma=0.1)
perr = np.sqrt(np.diag(pcov))
gamma_2 = 2*param_fit[1]
delta_gamma_2 = perr[1]
y_fit_2 = func(data[time], param_fit[0], param_fit[1], param_fit[2])
param_fit, pcov = optimize.curve_fit(func, data[time], data[curr_3],
p0=guess, sigma=0.1)
perr = np.sqrt(np.diag(pcov))
gamma_3 = 2*param_fit[1]
delta_gamma_3 = perr[1]
y_fit_3 = func(data[time], param_fit[0], param_fit[1], param_fit[2])
print 'Optimized gamma parameters for damping currents:'
print 'I = 0.15, gamma = {} +/- {}'.format(gamma_0, delta_gamma_0)
print 'I = 0.25, gamma = {} +/- {}'.format(gamma_1, delta_gamma_1)
print 'I = 0.35, gamma = {} +/- {}'.format(gamma_2, delta_gamma_2)
print 'I = 0.45, gamma = {} +/- {}'.format(gamma_3, delta_gamma_3)
gammas = np.array([gamma_0, gamma_1, gamma_2, gamma_3])
gamma_errors = np.array([delta_gamma_0, delta_gamma_1,
delta_gamma_2, delta_gamma_3])
coeff, residual, _, _, _ = np.polyfit(current, gammas,
deg=2, full=True)
y_fit = coeff[0]*current**2 + coeff[1]*current + coeff[2]
# Plot data
decay, ax = plt.subplots()
gamma_plot, ax1 = plt.subplots()
ax.plot(data[time].as_matrix(), data[curr_0], label=r"$\ I = 0.15 A$",
linestyle="", marker="o", color='blue')
ax.plot(data[time].as_matrix(), y_fit_0, label='', linestyle="--", color='blue')
ax.plot(data[time], data[curr_1], label=r"$\ I = 0.25 A$", linestyle="",
marker="o", color='red')
ax.plot(data[time].as_matrix(), y_fit_1, label='', linestyle="--", color='red')
ax.plot(data[time], data[curr_2], label=r"$\ I = 0.35 A$", linestyle="",
marker="o", color='green')
ax.plot(data[time].as_matrix(), y_fit_2, label='', linestyle="--", color='green')
ax.plot(data[time], data[curr_3], label=r"$\ I = 0.45 A$", linestyle="",
marker="o", color='orange')
ax.plot(data[time].as_matrix(), y_fit_3, label='', linestyle="--",
color='orange')
ax.set(title="Torsion Pendulum damping current decay",
xlabel="Time (s)", label='', ylabel="Amplitude (cm)")
ax.legend(loc="lower left")
ax.grid(True)
ax1.errorbar(current, gammas, marker='^', label='Computed', linestyle='',
color='grey',
xerr=[current_error, current_error],
yerr=[gamma_errors, gamma_errors])
ax1.plot(current, y_fit, label='Fit', linestyle='--', color='orange')
ax1.set(title="Damping rate vs. Electromagnet current",
xlabel="Current (A)", label='', ylabel=r"Damping rate - $\gamma$")
ax1.legend(loc="upper left")
ax1.grid(True)
decay.savefig("plots/decay.png", dpi=300)
gamma_plot.savefig("plots/gamma.png", dpi=300)
|
from Testing import ZopeTestCase as ztc
from collective.cart.core.content.product import ProductAnnotations
from collective.cart.core.interfaces import IAddableToCart
from collective.cart.core.interfaces import IProduct
from collective.cart.core.tests.base import FUNCTIONAL_TESTING
from hexagonit.testing.browser import Browser
from plone.app.testing import TEST_USER_ID
from plone.app.testing import setRoles
from plone.testing import layered
from zope.annotation.interfaces import IAnnotations
from zope.interface import alsoProvides
from zope.testing import renormalizing
import doctest
import manuel.codeblock
import manuel.doctest
import manuel.testing
import re
import transaction
import unittest2 as unittest
FLAGS = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_NDIFF | doctest.REPORT_ONLY_FIRST_FAILURE
CHECKER = renormalizing.RENormalizing([
# Normalize the generated UUID values to always compare equal.
(re.compile(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'), '<UUID>'),
])
def setUp(self):
layer = self.globs['layer']
self.globs.update({
'portal': layer['portal'],
'portal_url': layer['portal'].absolute_url(),
'browser': Browser(layer['app']),
})
ztc.utils.setupCoreSessions(layer['app'])
portal = self.globs['portal']
browser = self.globs['browser']
portal_url = self.globs['portal_url']
browser.setBaseUrl(portal_url)
browser.handleErrors = True
portal.error_log._ignored_exceptions = ()
setRoles(portal, TEST_USER_ID, ['Manager'])
portal.invokeFactory(
'CartFolder',
'cfolder',
)
cfolder = portal.cfolder
cfolder.reindexObject()
portal.invokeFactory(
'Document',
'document01',
title='Document01',
)
document01 = portal.document01
document01.reindexObject()
alsoProvides(document01, IAddableToCart)
IAnnotations(document01)['collective.cart.core'] = ProductAnnotations()
product01 = IProduct(document01)
product01.price = 10.0
product01.stock = 20
product01.unlimited_stock = False
product01.max_addable_quantity = 30
portal.invokeFactory(
'Document',
'document02',
title='Document02',
)
document02 = portal.document02
document02.reindexObject()
alsoProvides(document02, IAddableToCart)
IAnnotations(document02)['collective.cart.core'] = ProductAnnotations()
product02 = IProduct(document02)
product02.price = 5.0
product02.unlimited_stock = True
transaction.commit()
def DocFileSuite(testfile, flags=FLAGS, setUp=setUp, layer=FUNCTIONAL_TESTING):
"""Returns a test suite configured with a test layer.
:param testfile: Path to a doctest file.
:type testfile: str
:param flags: Doctest test flags.
:type flags: int
:param setUp: Test set up function.
:type setUp: callable
:param layer: Test layer
:type layer: object
:rtype: `manuel.testing.TestSuite`
"""
m = manuel.doctest.Manuel(optionflags=flags, checker=CHECKER)
m += manuel.codeblock.Manuel()
return layered(
manuel.testing.TestSuite(m, testfile, setUp=setUp, globs=dict(layer=layer)),
layer=layer)
def test_suite():
return unittest.TestSuite([
# DocFileSuite('functional/anonymous_functional.txt'),
])
|
#!/usr/bin/python
import pjsua as pj
import threading
import datetime
from keypad import RaspiBoard
import time
from threading import Timer
LOG_LEVEL_PJSIP = 3
#SIP_SERVER="192.168.137.1"
#SIP_SERVER="192.168.137.139"
SIP_SERVER="localhost"
SIP_USER="entrada"
SIP_PASS="kxgs8zn6TwM7"
SIP_REALM="asterisk"
SIP_LOCAL_PORT=5072
SIP_EXT_TO_CALL=100
keyboard=None
def log(msg):
print "[",datetime.datetime.now(), "] ", msg
def pj_log(level, msg, length):
msg = msg.replace("\n","\n\t")
print "[PJ] " + msg,
class DBCallCallback(pj.CallCallback):
def __init__(self, call=None):
pj.CallCallback.__init__(self, call)
def on_media_state(self):
print "***** ON MEDIA STATE " , self.call.info()
print self.call.info().media_state
if self.call.info().media_state == pj.MediaState.ACTIVE:
# Connect the call to sound device
call_slot = self.call.info().conf_slot
pj.Lib.instance().conf_connect(call_slot, 0)
pj.Lib.instance().conf_connect(0, call_slot)
print "Media is now active"
else:
print "Media is inactive"
def on_state(self):
global keyboard
print "**** ON STATE ", self.call
print self.call.dump_status()
def on_dtmf_digit(self,digits):
global keyboard
print "*** RECEIVED DIGIT %s" %digits
if (digits=="9"):
keyboard.setTimedOutput(2,True,0.2)
if (digits=="8"):
print "entering if for button 8 + call_slot: "
call_slot = self.call.info().conf_slot
player_id = pj.Lib.instance().create_player("./eight.wav")
print "Wav player id is: ", player_id
player_slot = pj.Lib.instance().player_get_slot(player_id)
print player_slot, call_slot
pj.Lib.instance().conf_connect(player_slot, call_slot)
sleep(2)
pj.Lib.instance().player_destroy(player_id)
class DBAccountCallback(pj.AccountCallback):
sem = None
def __init__(self, account = None):
pj.AccountCallback.__init__(self, account)
def wait(self):
self.sem = threading.Semaphore(0,verbose=True)
self.sem.acquire()
def on_reg_state(self):
if self.sem:
if self.account.info().reg_status >= 200:
self.sem.release()
def on_incoming_call(self, call):
cb = DBCallCallback(call)
call.set_callback(cb)
call.answer(200,'')
class DoorStation:
lib = None
acc = None
acc_cb = None
_call = None
def __init__(self):
lib = pj.Lib()
try:
ua= pj.UAConfig()
ua.user_agent = "DoorBerry UA"
#ua.max_calls = 1
mc = pj.MediaConfig()
# mc.no_vad = False
mc.ec_tail_len = 100
mc.clock_rate = 8000
lib.init(ua_cfg = ua, log_cfg = pj.LogConfig(level=LOG_LEVEL_PJSIP, callback=pj_log), media_cfg=mc)
lib.create_transport(pj.TransportType.UDP, pj.TransportConfig(SIP_LOCAL_PORT))
lib.start()
# temporary workaround on RPi
#pj.Lib.instance().set_snd_dev(1, 0)
acc_cfg = pj.AccountConfig()
acc_cfg.id = "sip:" + SIP_USER + "@" + SIP_SERVER
acc_cfg.reg_uri = "sip:" + SIP_SERVER
acc_cfg.auth_cred = [ pj.AuthCred(SIP_REALM, SIP_USER, SIP_PASS) ]
acc_cfg.allow_contact_rewrite = False
self.acc = lib.create_account(acc_cfg)
log("Account created")
except pj.Error, e:
log("Exception: " + str(e))
def print_media_cfg(self,mc):
print "no vad",mc.no_vad
print "audio frame type ", mc.audio_frame_ptime
print "channel count ",mc.channel_count
print "clock rate ",mc.clock_rate
print "ec options ",mc.ec_options
print "ec tail len ",mc.ec_tail_len
print "ilbc mode ",mc.ilbc_mode
print "jb max ",mc.jb_max
print "jb min ",mc.jb_min
print "max media ports ",mc.max_media_ports
print "no vad ",mc.no_vad
print "ptime ",mc.ptime
print "quality ",mc.quality
print "snd clock rate ",mc.snd_clock_rate
def start(self):
self.acc_cb = DBAccountCallback(self.acc)
self.acc.set_callback(self.acc_cb)
#self.acc_cb.wait()
def call(self):
if (self._call != None and self._call.is_valid()):
print "call in progress -> SKIP"
return
self._call = self.acc.make_call("sip:%d@%s" %(SIP_EXT_TO_CALL,SIP_SERVER), DBCallCallback())
print "make_call completed"
def hangup(self):
print "hangup Called"
if (self._call != None and self._call.is_valid()):
self._call.hangup()
print "Hanging up call!"
def stop(self):
try:
self.acc.delete()
self.acc = None
#self.lib = None
except pj.Error, e:
log("Exception: " + str(e))
class DoorBerry:
station = None
# global keyboard
# keyboard = None
# Making keyboard global so that it can be accessed from the other classes
def __init__(self):
global keyboard
self.station = DoorStation()
keyboard = RaspiBoard()
def run(self):
try:
#station = DoorStation()
self.station.start()
#keyboard = RaspiBoard()
global keyboard
log("entering main loop")
while True:
key = keyboard.keyPressed()
if(key == 0):
time.sleep(0.2)
continue
log("Selected extension =" + str(key))
if(key == 1):
try:
log("calling extension 1")
self.station.call()
except pj.Error, ee:
print ee
time.sleep(2)
except Exception, e:
O.IN, pull_up_down=GPIO.PUD_UPkeyboard.setOut(2,False)
print e
|
import numpy as np
from sklearn.datasets import load_boston
dataset = load_boston()
x = dataset.data
y = dataset.target
print(x.shape, y.shape) # (506, 13) (506, )
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=45)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=45)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
x_val = scaler.transform(x_val)
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input
input1 = Input(shape=(13,))
dense1 = Dense(128, activation='relu')(input1)
dense2 = Dense(64, activation='relu')(dense1)
dense3 = Dense(64, activation='relu')(dense2)
dense4 = Dense(64, activation='relu')(dense3)
dense5 = Dense(64, activation='relu')(dense4)
output1 = Dense(1)(dense5)
model = Model(inputs=input1, outputs=output1)
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
modelpath = '../data/modelcheckpoint/k46_boston_{epoch:02d}-{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
es = EarlyStopping(monitor='val_loss', patience=20, mode='auto')
model.fit(x_train, y_train, epochs=4000, batch_size=8, validation_data=(x_val, y_val), verbose=2, callbacks=[es,cp])
loss, mae = model.evaluate(x_test, y_test)
print('loss :', loss)
print('MAE :', mae)
y_predict = model.predict(x_test)
from sklearn.metrics import mean_squared_error, r2_score
def rmse(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print('RMSE :', rmse(y_test, y_predict))
r2 = r2_score(y_test, y_predict)
print('R2 :', r2)
|
import cv2
import numpy
from matplotlib import pyplot
def display_colored_balls():
original_image = cv2.cv2.imread("colored_balls.jpg", cv2.cv2.IMREAD_GRAYSCALE)
_, mask = cv2.cv2.threshold(original_image, 220, 255, cv2.cv2.THRESH_BINARY_INV)
kernal = numpy.ones((2,2), numpy.uint8)
dilation = cv2.cv2.dilate(mask, kernal, iterations=2)
erosion = cv2.cv2.erode(mask, kernal, iterations=1)
opening = cv2.cv2.morphologyEx(mask, cv2.cv2.MORPH_OPEN, kernal)
closing = cv2.cv2.morphologyEx(mask, cv2.cv2.MORPH_CLOSE, kernal)
morphologicalGradient = cv2.cv2.morphologyEx(mask, cv2.cv2.MORPH_GRADIENT, kernal)
tophat = cv2.cv2.morphologyEx(mask, cv2.cv2.MORPH_TOPHAT, kernal)
titles = ['image', 'mask', 'dilation', 'erosion', 'opening','closing','morphologicalGradient', 'tophat']
images = [original_image, mask, dilation, erosion, opening, closing,morphologicalGradient, tophat]
for i in range(8):
pyplot.subplot(3, 3, i+1), pyplot.imshow(images[i], 'gray')
pyplot.title(titles[i])
pyplot.xticks([]), pyplot.yticks([])
pyplot.show()
def display_writing():
original_image = cv2.cv2.imread("text.png", cv2.cv2.IMREAD_GRAYSCALE)
kernal = numpy.ones((2,2), numpy.uint8)
dilation = cv2.cv2.dilate(original_image, kernal, iterations=2)
erosion = cv2.cv2.erode(original_image, kernal, iterations=1)
opening = cv2.cv2.morphologyEx(original_image, cv2.cv2.MORPH_OPEN, kernal)
closing = cv2.cv2.morphologyEx(original_image, cv2.cv2.MORPH_CLOSE, kernal)
morphologicalGradient = cv2.cv2.morphologyEx(original_image, cv2.cv2.MORPH_GRADIENT, kernal)
tophat = cv2.cv2.morphologyEx(original_image, cv2.cv2.MORPH_TOPHAT, kernal)
titles = ['image', 'dilation', 'erosion', 'opening','closing','morphologicalGradient', 'tophat']
images = [original_image, dilation, erosion, opening, closing,morphologicalGradient, tophat]
for i in range(7):
pyplot.subplot(3, 3, i+1), pyplot.imshow(images[i], 'gray')
pyplot.title(titles[i])
pyplot.xticks([]), pyplot.yticks([])
pyplot.show()
display_colored_balls()
display_writing() |
import logging
import os
from functools import partialmethod
from django.conf import settings
from django.db import models
from django.utils.html import mark_safe
from preview_generator.manager import PreviewManager
def _tohtml(obj, previewfield):
previewfile = getattr(obj, previewfield.name)
originalfile = getattr(obj, previewfield.filefieldname)
try:
previewfile and previewfile.file and originalfile and originalfile.file
except FileNotFoundError:
return None
if previewfile:
return mark_safe(
f'<a href="{originalfile.url}"><img src={previewfile.url} width="{previewfile.width}" height="{previewfile.height}"/></a>'
)
if originalfile:
return mark_safe(
f'<a href="{originalfile.url}"><i class="material-icons">open_in_browser</i></a>'
)
return None
class FilePreviewField(models.ImageField):
def __init__(self, filefieldname, width=200, height=200, **kwargs):
kwargs["editable"] = False
kwargs["default"] = ""
self.filefieldname = filefieldname
self.width = width
self.height = height
super().__init__(**kwargs)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
setattr(
cls,
"get_%s_display" % self.name,
partialmethod(_tohtml, previewfield=self),
)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
args = [self.filefieldname] + args
kwargs["width"] = self.width
kwargs["height"] = self.height
return name, path, args, kwargs
def pre_save(self, model_instance, add):
setattr(model_instance, self.attname, self._generate_preview(model_instance))
return super().pre_save(model_instance, add)
def _generate_preview(self, model_instance):
if not getattr(model_instance, self.filefieldname):
return ""
try:
PREVIEW_MANAGER = PreviewManager(
os.path.join(settings.MEDIA_ROOT, "filepreviews"), create_folder=True
)
return PREVIEW_MANAGER.get_jpeg_preview(
getattr(model_instance, self.filefieldname).path,
width=self.width,
height=self.height,
)[len(settings.MEDIA_ROOT) + 1 :]
except Exception:
logging.getLogger(__name__).exception(
f"Error while generating file preview of {model_instance} on field {self.filefieldname}"
)
return ""
|
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from Auth.views import CompanyViewSet
router = DefaultRouter()
router.register(r'company', CompanyViewSet, base_name='companies')
urlpatterns = [
url(r'^', include(router.urls))
]
|
# @Title: Pow(x, n) (Pow(x, n))
# @Author: 2464512446@qq.com
# @Date: 2020-11-16 16:52:56
# @Runtime: 40 ms
# @Memory: 13.5 MB
class Solution:
def myPow(self, x: float, n: int) -> float:
if x == 1 or x == 0:
return x
if n < 0:
x,n = 1/x,-n
res = 1
while n:
if n & 1:
res = x * res
x = x *x
n = n >> 1
return res
|
#========================================================================#
# Generate LSWT "completeness monitoring" plots in batch mode
#------------------------------------------------------------------------#
# Creates the following plots:
# 1. Temporal check - barplot for each dekad, showing daily number of
# granules processed, can be run in NRT or FINAL
# mode. See function for details.
# 2. Spatial check - map showing the location (midpoint) of each
# granule processed.
# The plots can be made at any time during L3-PDP, but after all the
# L2P granules have been processed.
#------------------------------------------------------------------------#
# R. Maidment
#========================================================================#
#------------------------------------------------------------------------#
# Import modules
#------------------------------------------------------------------------#
from datetime import datetime as dt
from datetime import timedelta
import os
import os.path
import time
import logging
import config_lswt as config
import lswt_operational as so
import subprocess
#------------------------------------------------------------------------#
# Create log file (created when script is executed)
#------------------------------------------------------------------------#
logfile = so.log_output(config.log_opsdir,'l2p_run_plots_batch')
#------------------------------------------------------------------------#
# Determine date(s) to process
#------------------------------------------------------------------------#
dekad_info = so.get_date(dt.now().replace(microsecond=0), config.lag, config.buffer, config.latency, config.l2p_latency, config.capdates, config.check_startdate)
dekad_info.get_dateinfo()
#------------------------------------------------------------------------#
# Prepare inputs and submit job
#------------------------------------------------------------------------#
logging.info("Batch process, attempting to create plots ...")
plotslogpath_day = os.path.join(config.log_opsdir, str(dt.now().year), '%02d' % dt.now().month, '%02d' % dt.now().day)
job = ['bsub',
'-q', 'short-serial',
'-W', '24:00',
'-o', os.path.join(plotslogpath_day,'%J.out'),
'-e', os.path.join(plotslogpath_day,'%J.err'),
'-R', 'rusage[tmp=1250]',
'python2.7',
os.path.join(config.homedir,'lswt_l2p_monitoring_checks.py')]
# 11th Jul to 31st Oct
pdekad = ['2019-02-DK1']
pdekad_start = [(2019, 02, 01)]
pdekad_end = [(2019, 02, 10)]
# 1st April to 10th July
pdekad = ['2019-1-DK1','2019-1-DK2','2019-1-DK3',
'2019-2-DK1','2019-2-DK2','2019-2-DK3',
'2019-3-DK1','2019-3-DK2','2019-3-DK3',
'2019-4-DK1']
pdekad_start = [(2019, 1, 1),(2019, 1, 11),(2019, 1, 21),
(2019, 2, 1),(2019, 2, 11),(2019, 2, 21),
(2019, 3, 1),(2019, 3, 11),(2019, 3, 21),
(2019, 4, 1)]
pdekad_end = [(2019, 1, 10),(2019, 1, 20),(2019, 1, 31),
(2019, 2, 10),(2019, 2, 20),(2019, 2, 28),
(2019, 3, 10),(2019, 3, 20),(2019, 3, 31),
(2019, 4, 10)]
for i, pdek in enumerate(pdekad):
#config.l2path = '/group_workspaces/jasmin2/nceo_uor/output/v2.6.1-123-g1d17a3a/l2p/SLSTRA'
#config.l2path = '/group_workspaces/jasmin2/nceo_uor/output/v2.6.1-123-g1d17a3a_noSLSTRA/l2p/SLSTRA/'
dekad_info.pdekad = pdek
dekad_info.pdekad_start = dt.strptime("%s-%s-%s" % pdekad_start[i], '%Y-%m-%d')
dekad_info.pdekad_end = dt.strptime("%s-%s-%s" % pdekad_end[i], '%Y-%m-%d')
# Temporal check in FINAL mode
plottype = "temporal"
mode = "FINAL"
plotname = os.path.join(config.plotspath,
str(dekad_info.pdekad_start.year),
'%02d' % dekad_info.pdekad_start.month,
"LSWT_temporal_completeness_monitoring_" + dekad_info.pdekad + "_FINAL.png")
arguments = ['--plottype ' + plottype,
'--plotspath ' + config.plotspath,
'--l1path ' + config.l1path,
'--l2path ' + config.l2path,
'--dekad ' + dekad_info.pdekad,
'--dekad_start ' + dt.strftime(dekad_info.pdekad_start,'%Y-%m-%d'),
'--dekad_end ' + dt.strftime(dekad_info.pdekad_end,'%Y-%m-%d'),
'--cglops_filename ' + config.cglops_filename,
'--mode ' + mode]
command = job + arguments
logging.info("Submitting the following arguments for temporal (FINAL) checks: %s" % command)
result = subprocess.check_output(so.tidyup_job(command), shell=True)
time.sleep(10)
logging.info("Job_ID: %s" % result)
logging.info("Job_PATH: <%s>" % plotslogpath_day)
#convert LSWT_temporal_completeness_monitoring_2018-4-DK2_FINAL.png LSWT_temporal_completeness_monitoring_2018-4-DK3_FINAL.png +append LSWT_temporal_completeness_monitoring_2018-4_FINAL.png
#convert LSWT_temporal_completeness_monitoring_2018-5-DK1_FINAL.png LSWT_temporal_completeness_monitoring_2018-5-DK2_FINAL.png LSWT_temporal_completeness_monitoring_2018-5-DK3_FINAL.png +append LSWT_temporal_completeness_monitoring_2018-5_FINAL.png
#convert LSWT_temporal_completeness_monitoring_2018-6-DK1_FINAL.png LSWT_temporal_completeness_monitoring_2018-6-DK2_FINAL.png LSWT_temporal_completeness_monitoring_2018-6-DK3_FINAL.png +append LSWT_temporal_completeness_monitoring_2018-6_FINAL.png
#convert LSWT_temporal_completeness_monitoring_2018-7-DK1_FINAL.png LSWT_temporal_completeness_monitoring_2018-7-DK2_FINAL.png LSWT_temporal_completeness_monitoring_2018-7-DK3_FINAL.png +append LSWT_temporal_completeness_monitoring_2018-7_FINAL.png
#convert LSWT_temporal_completeness_monitoring_2018-8-DK1_FINAL.png LSWT_temporal_completeness_monitoring_2018-8-DK2_FINAL.png LSWT_temporal_completeness_monitoring_2018-8-DK3_FINAL.png +append LSWT_temporal_completeness_monitoring_2018-8_FINAL.png
#convert LSWT_temporal_completeness_monitoring_2018-9-DK1_FINAL.png LSWT_temporal_completeness_monitoring_2018-9-DK2_FINAL.png LSWT_temporal_completeness_monitoring_2018-9-DK3_FINAL.png +append LSWT_temporal_completeness_monitoring_2018-9_FINAL.png
#convert LSWT_temporal_completeness_monitoring_2018-10-DK1_FINAL.png LSWT_temporal_completeness_monitoring_2018-10-DK2_FINAL.png LSWT_temporal_completeness_monitoring_2018-10-DK3_FINAL.png +append LSWT_temporal_completeness_monitoring_2018-10_FINAL.png
|
import json
import scrapy
from scrapy.http import Request
from scrapy.loader import ItemLoader
from scrapy.item import Item, Field
from tripadvisor_review.items import TripadvisorReviewItem
from scrapy.selector import Selector
class ReviewsScraper(scrapy.Spider):
name = "restaurantreviews"
allowed_domains = ["tripadvisor.ie"]
f = open("/home/fina/Desktop/tripadvisor_review/tripadvisor_review/tripadvisor_url.json")
data = json.load(f)
start_urls = [d['url'] for d in data if 'url' in d]
def parse(self, response):
reviews = Selector(response).css('div.non_hotels_like')
for review in reviews:
item = TripadvisorReviewItem()
item['restaurant'] = response.xpath("//h1[contains(@class, 'heading_title')]/text()").extract()
item['city'] = response.css('div.blEntry span.locality::text').extract()
item['rating'] = response.css('div.rating span.overallRating::text').extract()
item['price'] = response.css('div.rating_and_popularity span.header_tags::text').extract()
item['title_review'] = response.css('div.quote span.noQuotes::text').extract()
item['review_desc'] = response.css('div.entry p.partial_entry::text').extract()
yield item
|
#game_functions.py
import sys
from time import sleep#pause the game for a while
import pygame
from bullet import Bullet
from alien import Alien
def check_keydown_events(event,infrompy_settings, screen, ship, bullets):
"""Respond to keypresses"""
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE:
#Create a new bullet and add to bullet group
new_bullet = Bullet(infrompy_settings, screen, ship)
bullets.add(new_bullet)
def check_keyup_events(event, ship):
"""Respond to keyup releases"""
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
def check_events(infrompy_settings, screen, stats, sb, play_button, ship, aliens, bullets):
"""Respond to key presses and mouse events"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, infrompy_settings, screen, ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
elif event.type == pygame.MOUSEBUTTONDOWN:
print("Mouse clicked!")
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(infrompy_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y)
def check_play_button(infrompy_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y):
""" Start a new game if play button has been clicked """
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:#play_button.rect.collidepoint(mouse_x, mouse_y):
#reset the game settings
infrompy_settings.initialize_dynamic_settings()
print(stats.game_active)
#hide the mouse
pygame.mouse.set_visible(False)
#reset the game stats
stats.reset_stats()
stats.game_active = True
# Reset the scoreboard images
sb.prep_score()
sb.prep_high_score()
sb.prep_level()
sb.prep_ships()
# Empty the list of aliens and bullets
aliens.empty()
bullets.empty()
#create a new fleet and center the ship
create_fleet(infrompy_settings, screen, ship, aliens)
ship.center_ship()
def update_screen(infrompy_settings, screen, stats, sb, ship, aliens, bullets, play_button):
"""Update the images on the screen and flip to the new screen"""
# Redraw screen during each pass through the loop
screen.fill(infrompy_settings.bg_color)
#Redraw all the bullets behind the ship and aliens
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
#alien.blitme()
aliens.draw(screen)
# Draw score info
sb.show_score()
# Draw the play button if the game is inactive
if not stats.game_active:
play_button.draw_button()
# make the most recently drawn screen visible
pygame.display.flip()
def update_bullets(infrompy_settings, screen, stats, sb, ship, aliens, bullets):
"""Update position of bullets and remove old bullets"""
bullets.update()
#remove bullets that have left the game screen
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
# print(len(bullets))
check_bullet_alien_collisions(infrompy_settings, screen, stats, sb, ship, aliens, bullets)
def check_bullet_alien_collisions(infrompy_settings, screen, stats, sb, ship, aliens, bullets):
""" Respond to bullet alien collisions"""
# Remove any bullets and aliens that have collided
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
for aliens in collisions.values():
stats.score += infrompy_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
if len(aliens) == 0:
# if the alien fleet is destroyed, move up one level, destroy existing bullets, speed up and create new fleet
bullets.empty()
infrompy_settings.increase_speed()
# Increase level
stats.level += 1
sb.prep_level()
create_fleet(infrompy_settings, screen, ship, aliens)
def create_fleet(infrompy_settings,screen,ship,aliens):
""" Create a fleet of aliens """
# Create an alien and find the number of aliens in a row
alien = Alien(infrompy_settings, screen)
number_aliens_x = get_number_aliens_x(infrompy_settings, alien.rect.width)
number_rows = get_number_rows(infrompy_settings, ship.rect.height, alien.rect.height)
# Create the fleet of aliens
for row_number in range (number_rows):
for alien_number in range(number_aliens_x):
create_alien(infrompy_settings, screen, aliens, alien_number, row_number)
def get_number_aliens_x(infrompy_settings, alien_width):
"""determine the number of aliens that will fit in a row"""
avaliable_space_x = infrompy_settings.screen_width - 2 * alien_width
number_aliens_x = int(avaliable_space_x / (2 * alien_width))
return number_aliens_x
def create_alien(infrompy_settings, screen, aliens, alien_number, row_number):
""" Create an alien and place it in a row"""
alien = Alien(infrompy_settings, screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def get_number_rows(infrompy_settings, ship_height, alien_height):
"""determine the number of rows of aliens that fit the screen"""
avaliable_space_y = (infrompy_settings.screen_height - (3 * alien_height) - ship_height)
number_rows = int(avaliable_space_y / (2 * alien_height))
return number_rows
def check_fleet_edges(infrompy_settings, aliens):
"""Respond if any aliens have reached the edge of the screen"""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(infrompy_settings, aliens)
break
def change_fleet_direction(infrompy_settings,aliens):
"""Drop the fleet and change the fleets directions"""
for alien in aliens.sprites():
alien.rect.y += infrompy_settings.fleet_drop_speed
infrompy_settings.fleet_direction *= -1
def ship_hit(infrompy_settings, stats, sb, screen, ship, aliens, bullets):
"""respond to a ship being hit"""
if stats.ships_left > 0:
#Decrement ships_left
stats.ships_left -= 1
#update scoreboard
sb.prep_ships()
# Empty the list of aliens and bullets
aliens.empty()
bullets.empty()
# Create a new fleet and center the ship
create_fleet(infrompy_settings, screen,ship,aliens)
ship.center_ship()
#pause
sleep(0.5)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
def check_aliens_bottom(infrompy_settings,screen, stats, sb, ship, aliens, bullets):
""" Check if aliens have reached the bottom of the screen"""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
# Treat this the same as if the ship got hit
ship_hit(infrompy_settings, stats, sb, screen, ship, aliens, bullets)
break
def update_aliens(infrompy_settings, stats, sb, screen, ship, aliens, bullets):
""" Check if fleet has reached edge and update the position of all aliens """
check_fleet_edges(infrompy_settings, aliens)
aliens.update()
# look for alien/ship collisions
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(infrompy_settings, stats, sb, screen, ship, aliens, bullets)
#print("Your ship has been hit")
# look for aliens hitting the bottom of the screen
check_aliens_bottom(infrompy_settings, screen, stats, sb, ship, aliens, bullets)
def check_high_score(stats, sb):
"""check to see if there's a new high score """
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score() |
# Generated by Django 2.2.5 on 2019-12-07 08:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trips', '0014_auto_20191207_0736'),
]
operations = [
migrations.AddField(
model_name='selectedtrip',
name='total',
field=models.CharField(blank=True, max_length=300),
),
]
|
import PIL as p
import PIL.ImageTk as ptk
class Picture():
def __init__(self,path):
self.image = p.Image.open(path)
self.width, self.height = self.image.size
self.w=int(self.width/2)
self.h=int(self.height/2)
self.new_size=self.image.resize((self.w,self.h))
self.img=ptk.PhotoImage(self.new_size) |
from .GPR_meta_mll import GPRegressionMetaLearned
from .GPR_meta_vi import GPRegressionMetaLearnedVI
from .GPR_meta_svgd import GPRegressionMetaLearnedSVGD
from .GPR_mll import GPRegressionLearned
from .MAML import MAMLRegression
from .NPR_meta import NPRegressionMetaLearned |
import boto.swf.layer2 as swf
from boto.swf.exceptions import SWFWorkflowExecutionAlreadyStartedError
import json
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler('/var/log/postmash/redisworker.log')
file_handler.setLevel(logging.INFO)
logger = logging.getLogger('redisworker')
logger.addHandler(file_handler)
def enqueue_work(jsondata):
logger.warn('enqueuing work {data}'.format(data=jsondata))
#some dumb retries for workflow ID clashes
for _ in range(5):
try:
swf.WorkflowType(name='PostMashWorkflow', domain='PostMashDomain',version='1.0', task_list='PostMashTasks').start(input=jsondata)
break
except SWFWorkflowExecutionAlreadyStartedError:
logger.warn('had a workflow ID clash')
logger.warn('work enqueued')
|
# Write a program to generate the following arithmetic examples.
# Hints:
# (1) Divide-and-conquer: what simpler problem do you need to solve? (2) Consider using strings to build numbers and then convert.
# (3) The range iterator may be helpful.
# read input variables par and score
par = int(input("Enter the par value in the range 3 to 5: "))
while(par < 3 or par > 5):
print("You have entered the invalid par value!!!")
par = int(input("Enter the par value in the range 3 to 5: "))
score = int(input("Enter the user Score: "))
# check whether the par is equal to the score
if par == score:
print("Par")
# Check whether the score is less than the par
elif score < par:
if score == par-3:
print("Albatross")
elif score == par-2:
print("Eagle")
elif score == par-1:
print("Birdie")
else:
print("You are not allowed to score less than 3 under par")
# check whether the score is greater than the par
elif score > par:
if score == par+1:
print("Bogey")
elif score == par+2:
print("Double Bogey")
elif score == par+3:
print("Tryiple Bogey")
elif score > par+3:
print("bad hole")
# end of program
|
import os
from pydub import AudioSegment
dir = "neg"
out_dir = "neg_new"
count = 1
for filename in os.listdir(dir):
print (filename)
src = dir+"/"+filename
dst = str(count)+".wav"
count = count + 1
# convert wav to mp3
sound = AudioSegment.from_mp3(src)
sound.export(dst, format="wav")
print (count) |
from PIL import Image
import numpy as np
import json
import os
W, H = 0, 1
RED, GREEN, BLUE = 0, 1, 2
sqsize = 50 #maybe add formula to auto calculate square size?? (probably optimize for about 1600 squares)
imgfp = 'InputImages/earth.png' #later modify to allow multiple images
imgname = imgfp.split('.')[0].split('/')[1]
rgbindexfp = 'ImageSets/%s_set/Indexes/rgb_index.json' % (imgname)
img = Image.open(imgfp)
img = img.convert('RGB')
newWidth = img.size[W]-(img.size[W] % sqsize)
newHeight = img.size[H]-(img.size[H] % sqsize)
img = img.crop((0, 0, newWidth, newHeight))
outimg = img.copy()
print 'Width: %spx\nHeight: %spx' % (img.size[W],img.size[H])
#implicit calculation of average pixels: Image.BOX averages in same manner, passed directly into array
#no significant performance difference
avgpixels = np.array(img.resize((img.size[W]/sqsize, img.size[H]/sqsize), resample = Image.BOX))
imgarr = np.zeros((avgpixels.shape[0], avgpixels.shape[1]), dtype='S14')
diffarr = np.zeros((avgpixels.shape[0], avgpixels.shape[1]))
with open(rgbindexfp, 'r') as f:
rgbindex = json.load(f)
for x in range(len(avgpixels)):
for y in range(len(avgpixels[x])):
mindiff = 195076 #max difference possible + 1
mindiffpic = ''
pixrgb = [avgpixels[x,y,RED], avgpixels[x,y,GREEN], avgpixels[x,y,BLUE]]
fps = os.listdir('ImageSets/%s_set' % imgname)
fps = [i for i in fps if '.' in i]
for fp in fps:
picrgb = rgbindex[fp]
diff = sum([(pixrgb[i]-picrgb[i])**2 for i in range(3)])
if diff < mindiff:
mindiff = diff
mindiffpic = fp
imgarr[x,y] = mindiffpic
diffarr[x,y] = mindiff
for x in range(len(imgarr)):
for y in range(len(imgarr[x])):
smimg = Image.open('ImageSets/%s_set/' % (imgname) + imgarr[x,y]).resize((sqsize, sqsize))
box = (y*sqsize, x*sqsize, y*sqsize + sqsize, x*sqsize + sqsize)
outimg.paste(smimg, box)
diffarr = np.sqrt(diffarr)
diffarr *= 255.0/np.max(diffarr)
diffimg = Image.fromarray(diffarr.astype('uint8'), mode = 'L').resize((img.size[W], img.size[H]))
img.show()
outimg.show()
diffimg.show()
diffimg.save('DifferenceMaps/%s_diff.png' % (imgname))
outimg.save('Photomosaics/%s_photomosaic.png' % (imgname))
|
# -*- coding: utf-8 -*-
from utils.operation_log import logger
from utils.operation_profile import get_web_data
from pages.web.baidu_page.baiduMainPage import baiduMainPage
class TestBaiduSearch:
"""测试百度搜索流程用例"""
def setup(self):
self.BAIDUURL = get_web_data("BAIDU", "BAIDUURL")
def test_search_auto(self, web_driver):
"""测试百度auto结果流程"""
logger.info(self.test_search_auto.__doc__)
web_driver.get(self.BAIDUURL)
baidu_main_page = baiduMainPage(web_driver, logger=logger)
baidu_main_page.baidu_search_input("airtest")
baidu_main_page.baidu_search_button()
res = baidu_main_page.baidu_search_results().text
assert "百度为您找到相关结果" in res
|
def test():
a, b, c = 1, 2, 32
line = 'HelloWorld'
print(test.__code__.co_nlocals)
|
from .pagination import QueryTypePagination
|
N = int(input())
group = list(map(int, input().split()))
# 오름차순 버전
answer = 0 # 총 그룹 수
count = 0 # 단위 그룹 수
group.sort()
for i in group:
count += 1
if count >= i:
answer += 1
count = 0
print(answer)
# 내림차순 버전
# group.sort(reverse=True)
# answer = []
# temp = []
# while True:
# if len(group) == 0:
# break
# for i in range(group[0]):
# temp.append(group.pop(0))
# answer.append(temp)
# print(len(answer))
# N 모험가 수
# group 모험가 각각의 공포도 |
from django.shortcuts import render,Http404
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from .models import Bitly
from django.views import View
def redirect(request, keys=None, *args, **kwargs):
try:
obj=Bitly.objects.get(keys=keys)
except Bitly.DoesNotExist:
raise Http404("Page not found")
return HttpResponseRedirect(obj.website)
def index(request):
if request.method=='GET':
print(request.GET)
if request.method=='POST':
# print(request.POST)
website = request.POST['website']
obj, created=Bitly.objects.get_or_create(website=website)
new_obj = Bitly.objects.get(website=obj)
new_context={
"object": new_obj,
"created": created,
}
print(new_obj.keys)
print(new_obj.website)
render(request, "shortner/success.html",new_context)
return render(request, 'shortner/index.html', {})
# Create your views here.
|
# coding=utf-8
import json
import requests
from logbook import Logger
log = Logger(__name__)
dice10k_url = "http://localhost:3000"
def call_counter(func):
def helper(*args, **kwargs):
helper.calls += 1
return func(*args, **kwargs)
helper.calls = 0
helper.__name__ = func.__name__
return helper
def create_game() -> dict:
response = json.loads(requests.post(f"{dice10k_url}/games").content)
log.debug(f"Create game response: {json.dumps(response, indent=2)}")
return response
@call_counter
def fetch_game(game_id: str) -> dict:
# TODO make this call less.
log.info(f"fetch_game counter: {fetch_game.calls}")
response = json.loads(requests.get(f"{dice10k_url}/games/{game_id}").content)
log.debug(f"Fetch game response: {json.dumps(response, indent=2)}")
return response
def start_game(game_id: str) -> dict:
response = json.loads(requests.put(f"{dice10k_url}/games/{game_id}/start").content)
log.debug(f"Start game response: {json.dumps(response, indent=2)}")
return response
def add_player(game_id: str, name: str) -> dict:
response = json.loads(
requests.post(
f"{dice10k_url}/games/{game_id}/players", json={"name": name}
).content
)
log.debug(f"Add player response: {response}")
return response
def roll(game_id: str, user_id: str, steal: bool) -> dict:
if steal:
payload = {"steal": True}
else:
payload = {}
response = json.loads(
requests.post(
f"{dice10k_url}/games/{game_id}/players/{user_id}/roll", json=payload
).content
)
log.debug(f"Roll response: {response}")
return response
def send_keepers(game_id: str, user_id: str, picks: list) -> dict:
response = json.loads(
requests.post(
f"{dice10k_url}/games/{game_id}/players/{user_id}/keep",
json={"keepers": picks},
).content
)
log.debug(f"Keep response: {response}")
return response
def pass_turn(game_id: str, user_id: str) -> dict:
response = json.loads(
requests.post(f"{dice10k_url}/games/{game_id}/players/{user_id}/pass").content
)
log.debug(f"Pass response: {response}")
return response
|
"""
created by Nagaj at 04/05/2021
"""
|
# -*- coding: utf-8 -*-
from src.functions.Functions import Functions as Selenium
import unittest
class Test_012(Selenium, unittest.TestCase):
def setUp(self):
Selenium.abrir_navegador(self, "https://www.mercadolibre.com.ar/registration")
Selenium.get_json_file(self, "mercadolibre_ar")
def test_012(self):
Selenium.save_variable_scenary(self, "DNI", "DNI") # Texto vacio
Selenium.save_variable_scenary(self, "titulo", "titulo")
Selenium.new_window(self, "https://www.google.com/")
Selenium.get_json_file(self, "Google")
Selenium.switch_to_windows_name(self, "Google")
texto = Selenium.get_variable_scenary(self, "titulo")
Selenium.get_elements(self, "txt_busqueda").send_keys(texto)
Selenium.esperar(3)
def tearDown(self):
Selenium.tearDown(self)
if __name__ == '__main__':
unittest.main()
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from .base_handlers import (ArtifactSummaryAJAX, ArtifactAJAX,
ArtifactSummaryHandler)
__all__ = ['ArtifactSummaryAJAX', 'ArtifactAJAX', 'ArtifactSummaryHandler']
|
###This performs the topology profiling on a set of introgressed blocks that are annotated
###with mutation motifs.
###Mutation motifs can be retrieved using vcftools or bcftools by e.g. cutting down VCFs
###to the 4 relevant individuals [Target,Deni,Nean,HumanOutgroup] where HumanOutgroup is
###a human sequence (for our introgression analysis I used a Russian sample,
###LP6005441-DNA_G10); trimming down to the introgressed regions; and counting the
###occurence of each of the 16 possible mutation motifs.
###The function calculateMotifs_BEDonBCF.py can be used to retrieve input files.
import numpy as np
def topology_analysis(topology_files_in = ['./example_topoMotifCounts_CDNH.txt']):
"""
This is an extended topology analysis.
It reads in a set of full-topology-annotated chunks. It converts to mismatches, and calculates pairwise distances between each chunk set.
It then tries to assign consistency of each chunk with the 15 possible coalescent trees.
"""
#The format is [name, motifs, mismatch, pairwise, topology support]
#I assume we are [Chunk,Deni,Nean,Human]
#(Note that this motif order is different from the published analysis)
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#motifs: 0000 0001 0010 0011 0100 0101 0110 0111 1000 1001 1010 1011 1100 1101 1110 1111
# 0 1 2 3 4 5 6 7 8 9
#pairwise: CDen CNea CHum CAnc DNea DHum DAnc NHum NAnc HAnc
#topology: (H,(N,(D,C))), (H,(D,(N,C))), (H,(C,(D,N))),
# (N,(H,(D,C))), (N,(D,(H,C))), (N,(C,(D,H))),
# (D,(N,(H,C))), (D,(H,(N,C))), (D,(C,(H,N))),
# (C,(N,(D,H))), (C,(D,(N,H))), (C,(H,(D,N))),
# ((H,N),(D,C)), ((H,D),(N,C)), ((H,C),(D,N))
topo_order = ['(H,(N,(D,C)))', '(H,(D,(N,C)))', '(H,(C,(D,N)))', '(N,(H,(D,C)))', '(N,(D,(H,C)))', '(N,(C,(D,H)))', '(D,(N,(H,C)))', '(D,(H,(N,C)))', '(D,(C,(H,N)))', '(C,(N,(D,H)))', '(C,(D,(N,H)))', '(C,(H,(D,N)))', '((H,N),(D,C))', '((H,D),(N,C))', '((H,C),(D,N))']
inconsistency_threshold = 0.10
branch_length_equality_factor = 1.5 #If 1.5 (or 2/3.), only reject if a pair of equal branches are have a 1:1.5 or 1.5:1 ratio
min_unmasked = 20000
topology_focus = [[0], [12]] #Optional assessment of mismatch in specific subset of topologies. Here I check divergence in topologies (H,(N,(D,C))) [0] and ((H,N),(D,C)) [12].
#topology_focus = []
set_names = ['example_topos']
ind_column = 4
chromosomes_to_exclude = []
mismatch_keys = ['0000', '0001', '0010', '0011', '0100', '0101', '0110', '0111', '1000', '1001', '1010', '1011', '1100', '1101', '1110', '1111']
pairwise_keys = [[0,1],[0,2],[0,3],[0,'anc'],[1,2],[1,3],[1,'anc'],[2,3],[2,'anc'],[3,'anc']]
#Each topology has four consistency tests:
#1. That the mismatches are in the correct order. Here, the notation is [0,...,9] with [[a],[b],[c]] indicating a < b < c and [[a,b],[c]] indicating \bar{a,b} < x
#2. The the (average) total branch lengths are too. Here, the notation is [0,...,15] with [[[a]],[[b,d],[c,d]]] indicating a < \bar{b+d,c+d}. NB that the outer list is averaging and the inner list is summation.
#3. That the number of inconsistent motifs is low. Here, the notation [0,...,15] with the list indicating the motifs (see mismatch_keys) that are inconsistent with the proposed topology. We require that under inconsistency_threshold motifs are inconsistent with a topology to accept that topology.
#4. That branches that should be of equal length given the topology are approximately equal. The notation is [0,...,15] indicating motifs (see mismatch_keys) that contribute to different branches under the assumption of the proposed topology. An input of [[[a],[b]], [[c,d],[e]]] would have two conditions - that abs(ln(a/b)) < abs(ln(branch_length_equality_factor)) and that abs(ln((c+d)/e)) < abs(ln(branch_length_equality_factor)). I.e. [[[motifs contributing to branch 1],[motifs contributing to branch 2 that should be equal to branch 1]], [[motifs contributing to branch 3], [motifs contributing to branch 4, that should be equal to branch 3]], ...other conditions...]
topo_conditions = [[[[[0],[4],[2]],[[0],[4],[5]],[[0],[4],[7]],[[0],[1],[2]],[[0],[1],[5]],[[0],[1],[7]]], [[[[4],[8]],[[2]]], [[[2],[4,12],[8,12]],[[1]]]], [3,5,7,9,11,13,6,10], [[[4],[8]], [[4,12],[2]], [[8,12],[2]], [[4,12,14],[1]], [[8,12,14],[1]]]],
[[[[1],[0],[2]],[[1],[0],[5]],[[1],[0],[7]],[[1],[4],[2]],[[1],[4],[5]],[[1],[4],[7]]], [[[[2],[8]],[[4]]], [[[4],[2,10],[8,10]],[[1]]]], [3,5,7,9,11,13,6,12], [[[2],[8]], [[2,10],[4]], [[8,10],[4]], [[2,10,14],[1]], [[8,10,14],[1]]]],
[[[[4],[0],[2]],[[4],[0],[5]],[[4],[0],[7]],[[4],[1],[2]],[[4],[1],[5]],[[4],[1],[7]]], [[[[2],[4]],[[8]]], [[[8],[2,6],[4,6]],[[1]]]], [3,5,7,9,11,13,10,12], [[[2],[4]], [[2,6],[8]], [[4,6],[8]], [[2,6,14],[1]], [[4,6,14],[1]]]],
[[[[0],[2],[1]],[[0],[2],[4]],[[0],[2],[7]],[[0],[5],[1]],[[0],[5],[4]],[[0],[5],[7]]], [[[[4],[8]],[[1]]], [[[1],[4,12],[8,12]],[[2]]]], [3,6,7,10,11,14,5,9], [[[4],[8]], [[4,12],[1]], [[8,12],[1]], [[4,12,13],[2]], [[8,12,13],[2]]]],
[[[[2],[0],[1]],[[2],[0],[4]],[[2],[0],[7]],[[2],[5],[1]],[[2],[5],[4]],[[2],[5],[7]]], [[[[1],[8]],[[4]]], [[[4],[1,9],[8,9]],[[2]]]], [3,6,7,10,11,14,5,12], [[[1],[8]], [[1,9],[4]], [[8,9],[4]], [[1,9,13],[2]], [[8,9,13],[2]]]],
[[[[5],[0],[1]],[[5],[0],[4]],[[5],[0],[7]],[[5],[2],[1]],[[5],[2],[4]],[[5],[2],[7]]], [[[[1],[4]],[[8]]], [[[8],[1,5],[4,5]],[[2]]]], [3,6,7,10,11,14,9,12], [[[1],[4]], [[1,5],[8]], [[4,5],[8]], [[1,5,13],[2]], [[4,5,13],[2]]]],
[[[[2],[1],[0]],[[2],[1],[4]],[[2],[1],[5]],[[2],[7],[0]],[[2],[7],[4]],[[2],[7],[5]]], [[[[1],[8]],[[2]]], [[[2],[1,9],[8,9]],[[4]]]], [5,6,7,12,13,14,3,10], [[[1],[8]], [[1,9],[2]], [[8,9],[2]], [[1,9,11],[4]], [[8,9,11],[4]]]],
[[[[1],[2],[0]],[[1],[2],[4]],[[1],[2],[5]],[[1],[7],[0]],[[1],[7],[4]],[[1],[7],[5]]], [[[[2],[8]],[[1]]], [[[1],[2,10],[8,10]],[[4]]]], [5,6,7,12,13,14,3,9], [[[2],[8]], [[2,10],[1]], [[8,10],[1]], [[2,10,11],[4]], [[8,10,11],[4]]]],
[[[[7],[1],[0]],[[7],[1],[4]],[[7],[1],[5]],[[7],[2],[0]],[[7],[2],[4]],[[7],[2],[5]]], [[[[1],[2]],[[8]]], [[[8],[1,3],[2,3]],[[4]]]], [5,6,7,12,13,14,9,10], [[[1],[2]], [[1,3],[8]], [[2,3],[8]], [[1,3,11],[4]], [[2,3,11],[4]]]],
[[[[5],[4],[0]],[[5],[4],[1]],[[5],[4],[2]],[[5],[7],[0]],[[5],[7],[1]],[[5],[7],[2]]], [[[[1],[4]],[[2]]], [[[2],[1,5],[4,5]],[[8]]]], [9,10,11,12,13,14,3,6], [[[1],[4]], [[1,5],[2]], [[4,5],[2]], [[1,5,7],[8]], [[4,5,7],[8]]]],
[[[[7],[4],[0]],[[7],[4],[1]],[[7],[4],[2]],[[7],[5],[0]],[[7],[5],[1]],[[7],[5],[2]]], [[[[1],[2]],[[4]]], [[[4],[1,3],[2,3]],[[8]]]], [9,10,11,12,13,14,5,6], [[[1],[2]], [[1,3],[4]], [[2,3],[4]], [[1,3,7],[8]], [[2,3,7],[8]]]],
[[[[4],[5],[0]],[[4],[5],[1]],[[4],[5],[2]],[[4],[7],[0]],[[4],[7],[1]],[[4],[7],[2]]], [[[[2],[4]],[[1]]], [[[1],[2,6],[4,6]],[[8]]]], [9,10,11,12,13,14,3,5], [[[2],[4]], [[2,6],[1]], [[4,6],[1]], [[2,6,7],[8]], [[4,6,7],[8]]]],
[[[[7],[1]],[[7],[2]],[[7],[4]],[[7],[5]],[[0],[1]],[[0],[2]],[[0],[4]],[[0],[5]]], [[[[1],[2]],[[4,12],[8,12]]], [[[4],[8]],[[1,3],[2,3]]]], [7,11,13,14,5,6,9,10], [[[1],[2]], [[4],[8]], [[1,3],[4,12]], [[1,3],[8,12]], [[2,3],[4,12]], [[2,3],[8,12]]]],
[[[[5],[0]],[[5],[2]],[[5],[4]],[[5],[7]],[[1],[0]],[[1],[2]],[[1],[4]],[[1],[7]]], [[[[1],[4]],[[2,10],[8,10]]], [[[2],[8]],[[1,5],[4,5]]]], [7,11,13,14,3,6,9,12], [[[1],[4]], [[2],[8]], [[1,5],[2,10]], [[1,5],[8,10]], [[4,5],[2,10]], [[4,5],[8,10]]]],
[[[[2],[0]],[[2],[1]],[[2],[5]],[[2],[7]],[[4],[0]],[[4],[1]],[[4],[5]],[[4],[7]]], [[[[1],[8]],[[2,6],[4,6]]], [[[2],[4]],[[1,9],[8,9]]]], [7,11,13,14,3,5,10,12], [[[1],[8]], [[2],[4]], [[1,9],[2,6]], [[1,9],[4,6]], [[8,9],[2,6]], [[8,9],[4,6]]]]]
chunks_list = []
chunks_list_motifs = []
for topo_in in topology_files_in:
chunks_topo = []
chunks_topo_motifs = []
with open(topo_in, 'rb') as f_topo_in:
for line in f_topo_in:
split_line = line[0:-1].split('\t')
chunk_motif = np.array(split_line[ind_column + 1:ind_column + 1 + 16], dtype = float)
if np.sum(chunk_motif) < min_unmasked or split_line[0] in chromosomes_to_exclude:
pass
else:
chunks_topo.append(['_'.join(split_line[0:ind_column + 1]), np.array(split_line[ind_column + 1:ind_column + 1 + 16], dtype = float), np.zeros(16, dtype = float), np.zeros(10, dtype = float), np.zeros(15, dtype = bool)])
chunks_topo_motifs.append(chunk_motif)
chunks_topo_motifs = np.array(chunks_topo_motifs)
chunks_list.append(chunks_topo)
chunks_list_motifs.append(chunks_topo_motifs)
#Fill out the mismatches
for D_idx in range(len(chunks_list)):
num_chunks = len(chunks_list[D_idx])
for chunk_idx in range(num_chunks):
bases = float(np.sum(chunks_list[D_idx][chunk_idx][1]))
chunks_list[D_idx][chunk_idx][2] = chunks_list[D_idx][chunk_idx][1] / bases
#Fill out the pairwise differences
for D_idx in range(len(chunks_list)):
num_chunks = len(chunks_list[D_idx])
for chunk_idx in range(num_chunks):
for pair_idx in range(len(pairwise_keys)):
pair = pairwise_keys[pair_idx]
tot_mismatch = 0.0
if pair[1] == 'anc':
for mis_idx in range(16):
if mismatch_keys[mis_idx][pair[0]] == '1':
tot_mismatch += chunks_list[D_idx][chunk_idx][2][mis_idx]
chunks_list[D_idx][chunk_idx][3][pair_idx] = tot_mismatch
else:
for mis_idx in range(16):
if mismatch_keys[mis_idx][pair[0]] != mismatch_keys[mis_idx][pair[1]]:
tot_mismatch += chunks_list[D_idx][chunk_idx][2][mis_idx]
chunks_list[D_idx][chunk_idx][3][pair_idx] = tot_mismatch
topo_conditions_to_test_list = [['mismatch_order'], ['branch_length_order'], ['inconsistency_threshold'], ['branch_length_equality']]
for topo_conditions_to_test in topo_conditions_to_test_list:
#Fill out the topology consistencies!
print topo_conditions_to_test
for D_idx in range(len(chunks_list)):
print '\t'.join(['set'] + topo_order + ["", "total"])
num_chunks = len(chunks_list[D_idx])
for chunk_idx in range(num_chunks):
for topo_idx in range(len(topo_conditions)):
topo_condition = topo_conditions[topo_idx]
keep_topo = True
if 'mismatch_order' in topo_conditions_to_test:
for condition in topo_condition[0]:
vals = []
for case in condition:
vals.append(np.average([chunks_list[D_idx][chunk_idx][3][i] for i in case]))
if np.sum(np.diff(vals) > 0) != len(vals) - 1:
keep_topo = False
break
if 'branch_length_order' in topo_conditions_to_test and keep_topo == True:
#Continue checking to confirm the branch lengths are consistent
for condition in topo_condition[1]:
vals = []
for case in condition:
#Average these vals
to_av = []
for to_sum in case:
to_av.append(np.sum([chunks_list[D_idx][chunk_idx][2][i] for i in to_sum]))
vals.append(np.average(to_av))
if np.sum(np.diff(vals) > 0) != len(vals) - 1:
keep_topo = False
break
if 'inconsistency_threshold' in topo_conditions_to_test and keep_topo == True:
inconsistent_motifs = np.array(topo_condition[2])
inconsistent = np.sum(chunks_list[D_idx][chunk_idx][2][inconsistent_motifs])
consistent = np.sum(chunks_list[D_idx][chunk_idx][2][np.setdiff1d(np.arange(1,15), inconsistent_motifs)])
if inconsistent > consistent * inconsistency_threshold:
keep_topo = False
if 'branch_length_equality' in topo_conditions_to_test and keep_topo == True:
for condition in topo_condition[3]:
vals = []
for to_sum in condition:
vals.append(np.sum([chunks_list[D_idx][chunk_idx][2][i] for i in to_sum]))
if abs(np.log(vals[0] / float(vals[1]))) > abs(np.log(branch_length_equality_factor)):
keep_topo = False
break
if keep_topo == True:
#Topology is valid!
chunks_list[D_idx][chunk_idx][4][topo_idx] = True
else:
chunks_list[D_idx][chunk_idx][4][topo_idx] = False
print "Topology counts, allowing overlapping cases (%s):" %(','.join(topo_conditions_to_test))
topo_consistency_list = []
for i in range(len(chunks_list)):
topo_consistency = np.array([chunks_list[i][j][4] for j in range(len(chunks_list[i]))])
topo_consistency_list.append(topo_consistency)
print '\t'.join([set_names[i]] + ['%.3f' %(j) for j in np.sum(topo_consistency,0) / float(np.sum(np.sum(topo_consistency,1)>0))]) + '\t||\t%d' %(np.sum(np.sum(topo_consistency,1)>0))
print "Topology counts, not allowing overlapping cases (%s):" %(','.join(topo_conditions_to_test))
for i in range(len(chunks_list)):
print '\t'.join([set_names[i]] + ['%.3f' %(j) for j in np.sum(topo_consistency_list[i][np.sum(topo_consistency_list[i],1)==1],0) / float(len(topo_consistency_list[i][np.sum(topo_consistency_list[i],1)==1]))]) + '\t||\t%d' %(len(topo_consistency_list[i][np.sum(topo_consistency_list[i],1)==1]))
print '\t'.join(["set"] + ["0000","0001","0010","0011","0100","0101","0110","0111","1000","1001","1010","1011","1100","1101","1110","1111","","total"])
for tops in topology_focus:
print "Median x1e4 divergence of with evidence of topology %s" %(','.join([topo_order[top] for top in tops]))
for i in range(len(chunks_list)):
valid_mask = np.sum([topo_consistency_list[i][::,top] == True for top in tops], axis = 0, dtype = bool)
motifs = np.array([chunks_list[i][j][2] for j in range(len(chunks_list[i]))])[valid_mask]
print '\t'.join([set_names[i]] + ['%.2f' %(j * 1e4) for j in np.median(motifs, 0)]) + '\t||\t%.2f|%d' %(np.sum(valid_mask) / float(len(valid_mask)), np.sum(valid_mask))
#print '\t'.join(['set'] + ["ChuDen","ChuNea","ChuHum","ChuAnc","DenNea","DenHum","DenAnc","NeaHum","NeaAnc","HumAnc","","total"])
print '\n'
return chunks_list, topo_consistency_list
|
#https://learnmeabitcoin.com/guide/target
CIBLE_MAX = ((2**16-1)*2**208)
def cibleToDifficulte(cible):
return CIBLE_MAX/cible
#initial target (Block 0)
target0 = "00000000ffff0000000000000000000000000000000000000000000000000000"
#current target (Block 614,308)
target614308 = "000000000000000000130c780000000000000000000000000000000000000000"
#should be 1
diff0 = cibleToDifficulte(int(target0, 16))
print(diff0)
#current difficulties
diff614308 = cibleToDifficulte(int(target614308, 16))
print(diff614308)
|
import os
from ctypes import windll
import pygame
import pymunk
class Window:
def __init__(self, fullscreen=True):
pygame.init()
screen_size = pygame.display.Info()
self.__width = screen_size.current_w
self.__height = screen_size.current_h
windll.user32.SetProcessDPIAware()
true_res = (windll.user32.GetSystemMetrics(0), windll.user32.GetSystemMetrics(1))
if screen_size.current_w < 1920 and screen_size.current_h < 1080:
raise Exception("Kauf dir mal nen besseren Monitor, alles kleiner als 1080p ist eine Zumutung.")
elif screen_size.current_w != true_res[0] and screen_size.current_h != true_res[1]:
self.__width = true_res[0]
self.__height = true_res[1]
elif screen_size.current_w > 1920 and screen_size.current_h > 1080:
self.__width = 1920
self.__height = 1080
fullscreen = False
if not fullscreen:
self.__screen = pygame.display.set_mode((self.__width, self.__height))
pygame.display.set_caption("Self driving car")
pygame.display.set_icon(pygame.image.load(os.path.join("pictures", "icon.png")))
else:
self.__screen = pygame.display.set_mode((self.__width, self.__height), pygame.FULLSCREEN)
pygame.mouse.set_visible(False)
self.__center = (self.__width // 2, self.__height // 2)
self.__clock = pygame.time.Clock()
self.__space = pymunk.Space()
self.__space.damping = 0.00001
static = [
pymunk.Segment(self.__space.static_body, (-1, -1), (-1, self.__height + 1), 0),
pymunk.Segment(self.__space.static_body, (-1, self.__height + 1), (self.__width + 1, self.__height + 1), 0),
pymunk.Segment(self.__space.static_body, (self.__width + 1, self.__height + 1), (self.__width + 1, -1), 0),
pymunk.Segment(self.__space.static_body, (-1, -1), (self.__width + 1, -1), 0)
]
self.__space.add(static)
self.__font_fps = pygame.font.SysFont("Consolas", 25)
self.__font_message = pygame.font.SysFont("Calibri", 50)
self.__messages = []
help_font = pygame.font.SysFont("Consolas", 25)
help_text = ["Arrows to move",
"E to edit track",
"R to reset",
"D to show sensors",
"F to toggle player AI",
"G to spawn new AI car",
"C to toggle collisions",
"H to toggle help"]
help_renders = [help_font.render(line, True, pygame.Color("white")) for line in help_text]
self.__help = [(line, (1600, y)) for line, y in zip(help_renders, range(25, 35 * len(help_text), 35))]
self.__display_help = True
def __del__(self):
pygame.quit()
def update(self):
self.__screen.blit(self.__font_fps.render("{:.0f}".format(self.__clock.get_fps()), True, pygame.Color("white")), (15, 15))
for message in self.__messages:
self.__screen.blit(message[0], message[1])
self.__messages.remove(message)
if self.__display_help:
self.__screen.blits(self.__help)
self.__space.step(1/60)
self.__clock.tick(60)
pygame.display.flip()
def message(self, text, pos, color="black", background=None):
message = self.__font_message.render(text, True, pygame.Color(color), pygame.Color(background) if background is not None else None)
rect = message.get_rect(center=pos)
self.__messages.append((message, rect))
def instant_message(self, text, pos, color="black", background=None):
self.message(text, pos, color, background)
message, rect = self.__messages.pop()
self.__screen.blit(message, rect)
pygame.display.flip()
def toggle_help(self):
self.__display_help = not self.__display_help
@property
def screen(self):
return self.__screen
@property
def space(self):
return self.__space
@property
def width(self):
return self.__width
@property
def height(self):
return self.__height
@property
def center(self):
return self.__center
|
import bisect
import time
from collections import deque
class Scheduler(object):
""""A timer based on EventLoop class"""
@classmethod
def instance(cls):
if not hasattr(cls, "_instance"):
cls._instance = cls()
return cls._instance
def __init__(self):
self.tasks = deque()
self.running = False
def add_task(self, deadline, callback):
"""Add task into queue. Tasks are orderly"""
task = Task(deadline, callback)
bisect.insort(self.tasks, task)
self.running = True
def cancel_task(self):
"""Cancel scheduler: TODO"""
pass
class Task(object):
__slots__ = ["deadline", "callback"]
def __init__(self, deadline, callback):
self.deadline = time.time() + deadline # Unit for seconds
self.callback = callback
def __eq__(self, other):
return self.deadline == other.deadline
def __lt__(self, other):
return self.deadline < other.deadline
def __le__(self, other):
return self.deadline <= other.deadline
|
# _*_coding:utf-8_*_
# Author:Topaz
import scrapy
import hashlib
from scrapy.selector import Selector
from scrapy.http import Request
from scrapy.http.cookies import CookieJar
import json
from selenium import webdriver
import os
class ZhiHuSpider(scrapy.Spider):
name = 'zhihu'
allow_domains = ["zhihu.com"]
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}
cookie_dict = {}
def start_requests(self):
# print(self.headers)
start_url = 'https://www.zhihu.com/'
req = Request(url=start_url,
headers=self.headers,
callback=self.login,)
# print('=====Start_requests',req)
yield req
def login(self,response):
# print('reponse',reponse.url)
my_cookies = CookieJar()
my_cookies.extract_cookies(response,response.request)
# print('cookies!!!!',my_cookies._cookies) # ==> 社会主义cookie,有用的cookie,想要的都有,取就是了
for k,v in my_cookies._cookies.items():
for i,j in v.items():
for m,n in j.items():
self.cookie_dict[m] = n.value
req = Request( #https://www.rddoc.com/doc/Scrapy-1.3/topics/request-response/
url = 'https://www.zhihu.com/login/email',
method = 'POST',
headers=self.headers,
body='email=18310703270&password=nihao123@',
cookies=self.cookie_dict,
callback=self.home_page,
)
yield req
def home_page(self,response):
# print(response.url)
req = Request(
url = 'https://www.zhihu.com/question/28853910',
method='GET',
headers=self.headers,
callback=self.get_question,
)
yield req
def get_question(self,response):
results = Selector(response).xpath('//body')
print(type(results))
if results.get('is_end') == True:
print("true")
next_page = results.get('next')
print(next_page)
else:
print("false")
print(type(results))
# next_page = results.get('paging')
next_page = results.get('next')
# aa = json.loads(next_page)
print(type(next_page),json.loads(next_page))
# print(html)
# def show(self,response):
# print('show啦啦啦啦啦啦',response.request)
# url_list = Selector(response=response).xpath('//div[@class="List-item"]/div[@class="ContentItem AnswerItem"]/meta[@itemprop="url"]/@content')
# # url_list = Selector(response=response).xpath('//div[@class="List-item"]/div[@class="ContentItem AnswerItem"]/meta[@itemprop="url"]/@content')
# # print(url_list.extract())
# # def click(self,response):
# # chromedriver = "F:\software\package\chromedriver_win32\chromedriver.exe"
# # os.environ["webdriver.chrome.driver"] = chromedriver
# # driver = webdriver.Chrome(chromedriver)
# # driver.get('https://www.zhihu.com/question/28853910')
# # next = driver.find_element_by_xpath('//div[@id="QuestionAnswers-answers"]/div[@class="Card"]/button[@class="Button QuestionMainAction"]')
# # while next:
# # # url_list = Selector(response=response).xpath('//div[@class="List-item"]/div[@class="ContentItem AnswerItem"]/meta[@itemprop="url"]/@content')
# # html = driver.page_source
# # print(html.encode('utf-8'))
# # next.click()
# # else:
# # print("没有了")
#
# # req = Request(
# # url=url,
# # headers=self.headers,
# # callback=self.show,
# # )
# # yield req
|
import requests
from random import randint
from time import sleep
def get_user_detail(user_id):
user_detail_address = 'http://18.219.29.53:5000/users/' + user_id + '/detail'
r = requests.get(user_detail_address)
return r.json()['detail']
def step1_login(user_id):
login_address = 'http://18.219.29.53:5000/users/' + user_id + '/login'
r = requests.post(login_address)
print "Login response: ", r.text
# From device
def step2_register_session(user_id):
register_address = 'http://18.219.29.53:5000/register_session'
json_body = {'id': user_id}
r = requests.post(register_address, json=json_body)
print "Register Session Response :", r.text
def step3_active_session(user_id):
active_session_address = 'http://18.219.29.53:5000/active_session/' + user_id
r = requests.post(active_session_address)
print "Active session response: " + r.text
def step4_update_user_heat_rate(user_id):
update_heart_rate_address = 'http://18.219.29.53:5000/active_user/' + user_id + '/heart_rate'
user_detail = get_user_detail(user_id)
user_heart_rate = user_detail['max_hr']
floor = user_detail['cool_zone'][0]
ceil = user_detail['exercise_zone'][1]
print 'Mock start' + '.' * 30
for i in range(361):
mock_hr = randint(floor, ceil)
print 'max_hr = ' + str(user_heart_rate) + ' mocked_hr = ' + str(mock_hr)
body = {'id': user_id, 'heart_rate': mock_hr}
r = requests.post(update_heart_rate_address,json=body)
print 'post iter: ' + str(i) + ' ' + '*' * 10 + ' ' + r.json()['message']
sleep(0.2)
print 'Mock finished' + '.' * 30
def deactive_user(user_id):
deactive_address = 'http://18.219.29.53:5000/active_user/' + user_id + '/deactive'
r = requests.post(deactive_address)
print r.json()
def start_mocking_procedure(user_id):
step1_login(user_id)
step2_register_session(user_id)
step3_active_session(user_id)
step4_update_user_heat_rate(user_id)
deactive_user(user_id)
start_mocking_procedure('001') |
from typing import NoReturn
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from errors import UnknownGraph
from main import ALPHA
def show_frequency(single_freq: dict, show: bool = True) -> NoReturn:
"""Show the graph of single frequency dictionary.
Args:
single_freq (dict): Single letter frequencies of text.
show (bool): Displays graph.
Returns:
(NoReturn)
"""
# split up the dictionary so that the x axis is list of keys (letters) and y (frequencies) axis is list of values
x, y = [], []
for key in sorted(single_freq.keys()):
x.append(key)
y.append(single_freq[key])
fig, ax = plt.subplots(1, 1) # create scatter plot and label axis
ax.scatter(x, y)
ax.set_xlabel("Letter")
ax.set_ylabel("Frequency")
if show:
plt.show() # show graph
def show_frequency_words(text: str, show: bool = True) -> NoReturn:
"""Show the graph of single frequency dictionary.
Args:
text (str): String text.
show (bool): Displays graph.
Returns:
NoReturn
"""
text = text.upper()
word_count = Counter(
text.split()
) # use collections counter to count words and create dictionary
# split up the dictionary so that the x axis is list of keys (words) and y (frequencies) axis is list of values
x = sorted(word_count, key=lambda z: word_count[z], reverse=True)
y = sorted(word_count.values(), reverse=True)
fig, ax = plt.subplots(1, 1) # create scatter plot and label axis
ax.scatter(x[:15], y[:15])
ax.set_xlabel("Word")
ax.set_ylabel("Frequency")
if show:
plt.show() # show graph
def show_heat_map(bigram: np.ndarray, show: bool = True) -> NoReturn:
"""Show the heat map of a bigram matrix.
Args:
bigram (np.ndarray): Bigram frequencies of text.
show (bool): Displays graph.
Returns:
NoReturn
"""
ax = sns.heatmap(
bigram, xticklabels=ALPHA, yticklabels=ALPHA
) # use seaborn heat map using alphabet for axis
plt.yticks(rotation=0) # rotates y tick labels
ax.invert_yaxis() # makes alphabet increase from bottom
if show:
plt.show() # show graph
def save_graph(
file: str, single_freq: dict = None, bigram: np.ndarray = None
) -> NoReturn:
"""Save a frequency or heat map graph.
Args:
file (str): Desired file name.
single_freq (dict): Single letter frequencies of text.
bigram (np.ndarray): Bigram frequencies of text.
Returns:
NoReturn
"""
if single_freq: # specify which type, create the plot but set show to False
show_frequency(single_freq, False)
elif np.any(bigram):
show_heat_map(bigram, False)
else:
raise UnknownGraph
plt.savefig(f"img/{file}", dpi=300) # save with resolution 300 pixels per inch
|
from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
app_name = 'accounts'
urlpatterns = [
path('', views.home, name='home'),
path('signup/', views.signup, name='signup'),
path('login/', auth_views.LoginView.as_view(
template_name='accounts/signin.html', extra_context={'login_page': "active"}), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='accounts/logout.html'), name='logout'),
path('account-info/', views.account_info, name='account-info'),
path('account-member', views.confirm_member, name='confirm-member'),
path('account-spots/', views.account_spots, name='account-spots'),
path('account-payments/', views.account_payments, name='account-payments'),
] |
# coding: utf-8
"""
Lilt REST API
The Lilt REST API enables programmatic access to the full-range of Lilt backend services including: * Training of and translating with interactive, adaptive machine translation * Large-scale translation memory * The Lexicon (a large-scale termbase) * Programmatic control of the Lilt CAT environment * Translation memory synchronization Requests and responses are in JSON format. The REST API only responds to HTTPS / SSL requests. ## Authentication Requests are authenticated via REST API key, which requires the Business plan. Requests are authenticated using [HTTP Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication). Add your REST API key as both the `username` and `password`. For development, you may also pass the REST API key via the `key` query parameter. This is less secure than HTTP Basic Auth, and is not recommended for production use. # noqa: E501
The version of the OpenAPI document: v2.0
Contact: support@lilt.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import lilt
from lilt.models.qa_rule_matches import QARuleMatches # noqa: E501
from lilt.rest import ApiException
class TestQARuleMatches(unittest.TestCase):
"""QARuleMatches unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test QARuleMatches
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = lilt.models.qa_rule_matches.QARuleMatches() # noqa: E501
if include_optional :
return QARuleMatches(
matches = [
lilt.models.qa_rule_matches_matches.QARuleMatches_matches(
context = lilt.models.qa_rule_matches_context.QARuleMatches_context(
length = 7,
offset = 19,
text = 'This segment has a speling mistake', ),
length = 7,
message = 'Possible spelling mistake found',
offset = 19,
replacements = [],
rule = lilt.models.qa_rule_matches_rule.QARuleMatches_rule(
category = lilt.models.qa_rule_matches_rule_category.QARuleMatches_rule_category(
id = 'TYPOS',
name = 'Possible Typo', ),
description = 'Possible spelling mistake',
id = 'MORFOLOGIK_RULE_EN_US',
issue_type = 'misspelling',
sub_id = '0',
urls = [], ),
short_message = 'Spelling mistake', )
]
)
else :
return QARuleMatches(
)
def testQARuleMatches(self):
"""Test QARuleMatches"""
# inst_req_only = self.make_instance(include_optional=False)
# inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import os
import numpy as np
import data_helper_habitat as dhh
class MapNet(nn.Module):
# Implementation of MapNet and all its core components following the paper:
# Henriques and Vedaldi, MapNet: An Allocentric Spatial Memory for Mapping Environments, CVPR 2018
def __init__(self, par, update_type, input_flags):
super(MapNet, self).__init__()
(with_feat, with_sseg, with_dets, use_raw_sseg, use_raw_dets, with_depth) = input_flags
self.crop_size = par.crop_size
self.global_map_dim = par.global_map_dim
self.observation_dim = par.observation_dim
self.cell_size = par.cell_size
self.grid_channels = par.grid_channels
self.map_embedding = par.map_embedding
self.sseg_labels = par.sseg_labels
self.dets_nClasses = par.dets_nClasses
self.orientations = par.orientations
self.pad = par.pad
self.loss_type = par.loss_type
self.update_type = par.update_type
if with_feat:
self.resnet_feat_dim = 256 #512
fnet = models.resnet50(pretrained=True)
self.ResNet50Truncated = nn.Sequential(*list(fnet.children())[:-5]) # -4 up to layer2 (conv3_x), -5 up to layer1 (conv2_x)
# Define the small network that outputs the final grid with the embedding
self.small_cnn_img = nn.Sequential(
nn.Conv2d(in_channels=self.resnet_feat_dim, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=par.img_embedding, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True)
)
if with_depth:
# depth feature extractor
self.resnet_feat_dim = 256 #512
fnet_depth = models.resnet50(pretrained=True)
fnet_depth.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,bias=False)
self.ResNet50Truncated_depth = nn.Sequential(*list(fnet_depth.children())[:-5])
self.small_cnn_depth = nn.Sequential(
nn.Conv2d(in_channels=self.resnet_feat_dim, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=par.depth_embedding, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True)
)
if with_sseg and not(use_raw_sseg):
# Define the small network that outputs the semantic label grid
self.small_cnn_sseg = nn.Sequential( # 40 classes
nn.Conv2d(in_channels=self.sseg_labels, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=par.sseg_embedding, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True)
)
'''
if with_dets and not(use_raw_dets):
# Define the small network that outputs the detection grid embedding
self.small_cnn_det = nn.Sequential( # 91 classes
nn.Conv2d(in_channels=self.dets_nClasses, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=par.dets_embedding, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True)
)
'''
# After the grids are concatenated to a single grid, pass it through a couple of convolutions to extract common features between the embeddings
'''
self.embedding_net = nn.Sequential(
nn.Conv2d(in_channels=self.grid_channels, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=self.map_embedding, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=self.map_embedding, out_channels=self.map_embedding, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True)
)
'''
# Choose how to update the map
if update_type=="lstm":
# LSTM to update the map with the current observation
self.lstm = nn.LSTM(input_size=self.map_embedding, hidden_size=self.map_embedding, num_layers=1)
elif update_type=="fc":
# Use a fully connected layer to update the embedding at every grid by combining the current
# embedding and the previous embedding at every grid location
self.update_fc = nn.Linear(self.map_embedding*2, self.map_embedding)
else: # case 'avg'
# Do average pooling over the embeddings
self.update_avg = nn.AvgPool1d(kernel_size=2)
# Choose loss
if self.loss_type=="BCE":
self.loss_BCE = nn.BCELoss(reduction="sum") # Binary cross entropy loss
else:
self.loss_CEL = nn.CrossEntropyLoss(ignore_index=-1)
def build_loss(self, p_pred, p_gt):
batch_size = p_pred.shape[0]
seq_len = p_pred.shape[1]
# Remove the first frame in the sequence, since it is always constant
p_pred = p_pred[:,1:,:,:,:]
p_gt = p_gt[:,1:,:,:,:]
if self.loss_type=="BCE":
p_pred = p_pred.contiguous().view(batch_size*(seq_len-1), self.orientations, self.global_map_dim[0], self.global_map_dim[1])
p_gt = p_gt.contiguous().view(batch_size*(seq_len-1), self.orientations, self.global_map_dim[0], self.global_map_dim[1])
loss = self.loss_BCE(p_pred, p_gt)
loss /= p_pred.shape[0] # assuming reduction="sum"
else:
# Both p_pred and p_gt are b x q x h x w x r
# Need to convert p_pred to N x C, p_gt to N
p_pred = p_pred.contiguous().view(batch_size*(seq_len-1), self.orientations*self.global_map_dim[0]*self.global_map_dim[1]) # N x C
p_gt = p_gt.contiguous().view(batch_size*(seq_len-1), self.orientations*self.global_map_dim[0]*self.global_map_dim[1])
# For each example, get the index of p_gt for which it is 1. It should be unique.
# When the gt is outside of the map, the p_gt example is all zeroes
lbls = torch.zeros(p_gt.shape[0]).long().cuda()
for i in range(p_gt.shape[0]):
ind = torch.nonzero(p_gt[i,:], as_tuple=False) # label that signifies the ground-truth position and orienation
if (ind.nelement()==0): # if ind is empty (p_gt is empty), then ignore this example's index in t
lbls[i] = -1
else:
lbls[i] = ind[0][0]
loss = self.loss_CEL(p_pred, lbls) # this does the log_softmax inside the loss
return loss
def extract_img_feat(self, img_data):
img_feat = self.ResNet50Truncated(img_data)
# Resize the features to the image/depth resolution
img_feat = F.interpolate(img_feat, size=(self.crop_size[1], self.crop_size[0]), mode='nearest')
return img_feat
def extract_depth_feat(self, depth_data):
depth_feat = self.ResNet50Truncated_depth(depth_data)
depth_feat = F.interpolate(depth_feat, size=(self.crop_size[1], self.crop_size[0]), mode='nearest')
return depth_feat
def init_p(self, batch_size):
# Initialize the position (p0) in the center of the map at angle 0 (i.e. orientation index 0).
p0 = np.zeros((batch_size, self.orientations, self.global_map_dim[0], self.global_map_dim[1]), dtype=np.float32)
p0[:, 0, int(self.global_map_dim[0]/2.0), int(self.global_map_dim[1]/2.0)] = 1 # 14,14
return torch.tensor(p0, dtype=torch.float32).cuda()
def forward_single_step(self, local_info, t, input_flags, map_previous=None, p_given=None, update_type="lstm"):
# Forward pass of mapNet when the episode is not known before hand (i.e. online training)
# Runs MapNet for a single step
(img_data, points2D, local3D, sseg, depth_data) = local_info
batch_size = img_data.shape[0]
with_feat, with_depth = input_flags[0], input_flags[5]
if with_feat:
img_feat = self.extract_img_feat(img_data)
else:
# placeholder, not used later
img_feat = torch.zeros(batch_size, 1, self.crop_size[1], self.crop_size[0])
if with_depth:
depth_feat = self.extract_depth_feat(depth_data)
else:
depth_feat = torch.zeros(batch_size, 1, self.crop_size[1], self.crop_size[0])
# Follow the groundProjection() but now do only for batch_size
grid = torch.zeros((batch_size, self.grid_channels, self.observation_dim[0], self.observation_dim[1]), dtype=torch.float32).cuda()
for b in range(batch_size):
points2D_step = points2D[b]
local3D_step = local3D[b]
img_feat_step = img_feat[b,:,:,:].unsqueeze(0)
sseg_step = sseg[b,:,:,:].unsqueeze(0)
depth_feat_step = depth_feat[b,:,:,:].unsqueeze(0)
grid_step = self.groundProjectionStep(img_feat=img_feat_step, points2D=points2D_step,
local3D=local3D_step, sseg=sseg_step, depth_feat=depth_feat_step, input_flags=input_flags)
grid[b,:,:,:] = grid_step
#grid = self.embedding_net(grid)
rotation_stack = self.rotational_sampler(grid=grid)
if t==0:
# Case of first step
p_ = self.init_p(batch_size=batch_size).clone()
map_next = self.register_observation(rotation_stack=rotation_stack, p=p_, batch_size=batch_size)
else:
if p_given is None:
p_ = self.position_prediction(rotation_stack=rotation_stack, map_previous=map_previous, batch_size=batch_size)
else:
p_ = p_given
reg_obsv = self.register_observation(rotation_stack=rotation_stack, p=p_, batch_size=batch_size)
map_next = self.update_map(reg_obsv, map_previous, batch_size=batch_size, update_type=update_type)
return p_, map_next
def forward(self, local_info, update_type, input_flags, p_gt=None):
# Runs MapNet for an entire episode
(img_data, points2D, local3D, sseg, depth_data) = local_info
batch_size = img_data.shape[0]
seq_len = img_data.shape[1]
# Extract the img features
with_feat, with_depth = input_flags[0], input_flags[5]
if with_feat:
img_data = img_data.view(batch_size*seq_len, 3, self.crop_size[1], self.crop_size[0])
img_feat = self.extract_img_feat(img_data)
img_feat = img_feat.view(batch_size, seq_len, self.resnet_feat_dim, self.crop_size[1], self.crop_size[0])
else:
# placeholder, not used later
img_feat = torch.zeros(batch_size, seq_len, 1, self.crop_size[1], self.crop_size[0])
if with_depth:
# depth features
depth_data = depth_data.view(batch_size*seq_len, 1, self.crop_size[1], self.crop_size[0])
depth_feat = self.extract_depth_feat(depth_data)
depth_feat = depth_feat.view(batch_size, seq_len, self.resnet_feat_dim, self.crop_size[1], self.crop_size[0])
else:
depth_feat = torch.zeros(batch_size, seq_len, 1, self.crop_size[1], self.crop_size[0])
# Project the img features on a ground grid
grid = self.groundProjection(img_feat_all=img_feat, points2D_all=points2D, local3D_all=local3D,
sseg_all=sseg, depths_all=depth_feat, batch_size=batch_size, seq_len=seq_len, input_flags=input_flags)
# Pass the combined grid through a small network to extract common embedding features
#grid = self.embedding_net(grid.view(batch_size*seq_len, self.grid_channels, self.observation_dim[0], self.observation_dim[1]))
#grid = grid.view(batch_size, seq_len, self.map_embedding, self.observation_dim[0], self.observation_dim[1])
# Rotate the grid's feature channels to obtain the rotational stack
grid_packed = grid.view(batch_size*seq_len, self.map_embedding, self.observation_dim[0], self.observation_dim[1])
rotation_stack_packed = self.rotational_sampler(grid=grid_packed)
rotation_stack = rotation_stack_packed.view(batch_size, seq_len, self.map_embedding, self.orientations, self.observation_dim[0], self.observation_dim[1])
# The next steps need to be carried out in sequence as p_ depends on each step's previous map.
p_pred = torch.zeros((batch_size, seq_len, self.orientations, self.global_map_dim[0], self.global_map_dim[1]), dtype=torch.float32).cuda()
#p_pred = torch.tensor(p_pred, dtype=torch.float32).cuda()
map_pred = torch.zeros((batch_size, seq_len, self.map_embedding, self.global_map_dim[0], self.global_map_dim[1]), dtype=torch.float32).cuda()
#map_pred = torch.tensor(map_pred, dtype=torch.float32).cuda()
for q in range(seq_len):
rotation_stack_step = rotation_stack[:,q,:,:,:,:]
if q==0:
# For first observation we assume p_=p0 and we just need to register the observation.
# In this case, the registered observation is actually the first map so we do not
# need to update it (so no need for the LSTM).
p_ = self.init_p(batch_size=batch_size).clone() #self.p0.clone()
map_next = self.register_observation(rotation_stack=rotation_stack_step, p=p_, batch_size=batch_size)
else:
# Here we need to predict p, register the obsv based on p, and update the map
map_previous = map_next.clone()
# Do the cross correlation with the existing map and pass through a softmax
p_ = self.position_prediction(rotation_stack=rotation_stack_step, map_previous=map_previous, batch_size=batch_size)
# Convolve the localization prediction (Pt) with the rotational stack using a deconvolution
# in order to register the observations in the map
if p_gt is not None:
# if p_gt is given, then register the observation with it.
# We still use p_ for the loss, but we use the ground-truth to register the map
reg_obsv = self.register_observation(rotation_stack=rotation_stack_step, p=p_gt[:,q,:,:,:], batch_size=batch_size)
else:
reg_obsv = self.register_observation(rotation_stack=rotation_stack_step, p=p_, batch_size=batch_size)
# Update the map using LSTM - hidden state: map_previous, input: reg_obsv
# Each spatial location is passed independently in the LSTM
map_next = self.update_map(reg_obsv, map_previous, batch_size=batch_size, update_type=update_type)
# Store the p_ predictions and the map for each timestep
p_pred[:,q,:,:,:] = p_
map_pred[:,q,:,:,:] = map_next
return p_pred, map_pred
def groundProjection(self, img_feat_all, points2D_all, local3D_all, sseg_all, depths_all, batch_size, seq_len, input_flags):
# A wrapper over groundProjectionStep to do the projection for batch_size x seq_len
grid = np.zeros((batch_size, seq_len, self.grid_channels, self.observation_dim[0], self.observation_dim[1]), dtype=np.float32)
grid = torch.tensor(grid, dtype=torch.float32).cuda()
#map_occ = np.zeros((batch_size, seq_len, 1, self.observation_dim[0], self.observation_dim[1]), dtype=np.float32)
#map_occ = torch.tensor(map_occ, dtype=torch.float32).cuda()
for b in range(batch_size):
points2D_seq = points2D_all[b]
local3D_seq = local3D_all[b]
for q in range(seq_len):
points2D_step = points2D_seq[q] # n_points x 2
local3D_step = local3D_seq[q] # n_points x 3
img_feat_step = img_feat_all[b,q,:,:,:].unsqueeze(0) # 1 x resNet_feat_dim x crop_size(1) x crop_size(0)
sseg_step = sseg_all[b,q,:,:,:].unsqueeze(0) # 1 x 1 x crop_size(1) x crop_size(0)
depths_step = depths_all[b,q,:,:,:].unsqueeze(0) # 1 x 1 x crop_size(1) x crop_size(0)
grid_step = self.groundProjectionStep(img_feat=img_feat_step, points2D=points2D_step,
local3D=local3D_step, sseg=sseg_step, depth_feat=depths_step, input_flags=input_flags)
grid[b,q,:,:,:] = grid_step.squeeze(0)
#map_occ[b,q,:,:,:] = map_occ_step.squeeze(0)
return grid #, map_occ
def bin_pooling(self, img_feat, points2D, map_coords):
# Bin pooling during ground projection of the features
grid = np.zeros((img_feat.shape[1], self.observation_dim[0], self.observation_dim[1]), dtype=np.float32)
grid = torch.tensor(grid, dtype=torch.float32).cuda()
pix_x, pix_y = points2D[:,0], points2D[:,1]
pix_feat = img_feat[0, :, pix_y, pix_x]
uniq_rows = np.unique(map_coords, axis=0)
for i in range(uniq_rows.shape[0]):
ucoord = uniq_rows[i,:]
ind = np.where( (map_coords==ucoord).all(axis=1) )[0] # indices of where ucoord can be found in map_coords
# Features indices in the ind array belong to the same bin and have to be max-pooled
bin_feats = pix_feat[:,ind] # [d x n] n:number of feature vectors projected, d:feat_dim
bin_feat, _ = torch.max(bin_feats,1) # [d]
grid[:, ucoord[1], ucoord[0]] = bin_feat
return grid
def label_pooling(self, sseg, points2D, map_coords):
# Similar to bin_pooling() but instead features we pool the semantic labels
# For each bin get the frequencies of the class labels based on the labels projected
# Each grid location will hold a probability distribution over the semantic labels
grid = np.zeros((self.sseg_labels, self.observation_dim[0], self.observation_dim[1]), dtype=np.float32)
pix_x, pix_y = points2D[:,0], points2D[:,1]
sseg = sseg.cpu()
pix_lbl = sseg[0, 0, pix_y, pix_x]
uniq_rows = np.unique(map_coords, axis=0)
for i in range(uniq_rows.shape[0]):
ucoord = uniq_rows[i,:]
ind = np.where( (map_coords==ucoord).all(axis=1) )[0] # indices of where ucoord can be found in map_coords
bin_lbls = pix_lbl[ind]
# Labels are from 0-39 where 0:wall ... 39:other_prop
hist, bins = np.histogram(bin_lbls, bins=list(range(self.sseg_labels+1)))
hist = hist / float(bin_lbls.shape[0])
grid[:, ucoord[1], ucoord[0]] = hist
grid = torch.tensor(grid, dtype=torch.float32).cuda()
return grid
'''
def dets_pooling(self, dets, points2D, map_coords):
# Bin pooling of the detection masks. Detection scores in the same bin are averaged.
grid = np.zeros((self.dets_nClasses, self.observation_dim[0], self.observation_dim[1]), dtype=np.float32)
pix_x, pix_y = points2D[:,0], points2D[:,1]
# pix_dets holds a vector of binary values which indicate the presence of each category
# multiple values can be 1 due to overlapping bounding boxes
# pix_dets that end up in the same grid location are averaged
pix_dets = dets[0,:,pix_y, pix_x]
uniq_rows = np.unique(map_coords, axis=0)
for i in range(uniq_rows.shape[0]):
ucoord = uniq_rows[i,:]
ind = np.where( (map_coords==ucoord).all(axis=1) )[0] # indices of where ucoord can be found in map_coords
bin_dets = pix_dets[:, ind] # all detections in the same bin
bin_det = bin_dets.mean(1)
grid[:, ucoord[1], ucoord[0]] = bin_det
grid = torch.tensor(grid, dtype=torch.float32).cuda()
return grid
'''
# Performs the ground projection for a single image
def groundProjectionStep(self, img_feat, points2D, local3D, sseg, depth_feat, input_flags):
(with_feat, with_sseg, with_dets, use_raw_sseg, use_raw_dets, with_depth) = input_flags
# Create the grid and discretize the set of coordinates into the bins
# Points2D holds the image pixel coordinates with valid depth values
# Local3D holds the X,Y,Z coordinates that correspond to the points2D
# For each local3d find which bin it belongs to
map_coords, valid = dhh.discretize_coords(x=local3D[:,0], z=local3D[:,2], map_dim=self.observation_dim, cell_size=self.cell_size)
#map_occ = torch.tensor(map_occ, dtype=torch.float32).cuda()
points2D = points2D[valid,:]
map_coords = map_coords[valid,:]
grids = []
if with_feat:
# Max-pool the features for each bin and extract the img embedding to get the img grid
grid_img = self.bin_pooling(img_feat, points2D, map_coords)
# Pass the grid to a CNN to get the observation with 32-D embeddings
grid_img_in = grid_img.unsqueeze(0)
grids.append(self.small_cnn_img(grid_img_in.cuda())) # 1 x 32 x 21 x 21
if with_depth:
# do the depth grid
grid_depth = self.bin_pooling(depth_feat, points2D, map_coords)
grid_depth_in = grid_depth.unsqueeze(0)
grids.append(self.small_cnn_depth(grid_depth_in.cuda()))
if with_sseg:
# Get the probabilities of the labels on the grid and extract an embedding
grid_sseg = self.label_pooling(sseg, points2D, map_coords)
grid_sseg_in = grid_sseg.unsqueeze(0)
if use_raw_sseg:
grids.append(grid_sseg_in)
else:
grids.append(self.small_cnn_sseg(grid_sseg_in))
'''
if with_dets:
grid_det = self.dets_pooling(dets, points2D, map_coords)
grid_det_in = grid_det.unsqueeze(0)
if use_raw_dets:
grids.append(grid_det_in)
else:
grids.append(self.small_cnn_det(grid_det_in))
'''
if len(grids) == 0:
raise Exception("No input grids!")
# Stack the grids
grid_out = torch.cat(grids, 1).cuda()
return grid_out #, map_occ
def rotational_sampler(self, grid, rot_init=True):
# The grid (after the groundProjection) is facing up
# We need to rotate it so as to face to the right (angle 0)
if rot_init:
grid = self.do_rotation(grid, angle=-np.pi/2.0)
# Rotate the grid's feature channels to obtain the rotational stack
rotation_stack = np.zeros((grid.shape[0], self.map_embedding, self.orientations, self.observation_dim[0], self.observation_dim[1]), dtype=np.float32 )
rotation_stack = torch.tensor(rotation_stack, dtype=torch.float32).cuda()
for i in range(self.orientations):
angle = 2*np.pi*(i/self.orientations)
rotation_stack[:,:,i,:,:] = self.do_rotation(grid, angle) #grid_trans
return rotation_stack
def do_rotation(self, grid, angle):
# apply a single rotation on a grid (N x n x s x s)
theta = torch.tensor( [ [np.cos(angle), -1.0*np.sin(angle), 0], [np.sin(angle), np.cos(angle), 0] ] ).cuda() # 2 x 3 affine transform
theta = theta.unsqueeze(0).repeat(grid.shape[0], 1, 1)
grid_affine = F.affine_grid(theta, grid.size(), align_corners=True)
return F.grid_sample(grid, grid_affine.float(), align_corners=True)
def position_prediction(self, rotation_stack, map_previous, batch_size):
# Do the cross correlation with the existing map and pass through a softmax
corr_map = np.zeros((batch_size, self.orientations, self.global_map_dim[0], self.global_map_dim[1]), dtype=np.float32)
corr_map = torch.tensor(corr_map, dtype=torch.float32).cuda()
for b in range(batch_size):
map_ = map_previous[b,:,:,:].unsqueeze(0) # 1 x n x h x w
for r in range(self.orientations):
# convolve each filter with the previous map
filt = rotation_stack[b,:,r,:,:].unsqueeze(0) # 1 x n x s x s
corr_tmp = F.conv2d(input=map_, weight=filt, padding=self.pad)
corr_map[b,r,:,:] = corr_tmp
p_ = np.zeros((batch_size, self.orientations, self.global_map_dim[0], self.global_map_dim[1]), dtype=np.float32)
p_ = torch.tensor(p_, dtype=torch.float32).cuda()
for i in range(batch_size):
p_tmp = corr_map[i,:,:,:].view(-1) # # if we do the CEL loss then we do not use softmax (it is included in the loss layer)
p_tmp = p_tmp.view(self.orientations, self.global_map_dim[0], self.global_map_dim[1]) # reshape the tensor back to map
p_[i,:,:,:] = p_tmp
return p_
def register_observation(self, rotation_stack, p, batch_size):
reg_obsv = np.zeros((batch_size, self.map_embedding, self.global_map_dim[0], self.global_map_dim[1]), dtype=np.float32)
reg_obsv = torch.tensor(reg_obsv, dtype=torch.float32).cuda()
for i in range(batch_size):
filt = rotation_stack[i,:,:,:,:] # n x r x s x s
filt = filt.permute(1,0,2,3) #permute(3,0,1,2) # r x n x s x s
p_in = p[i,:,:,:].unsqueeze(0) # 1 x r x h x w # take one example from the batch
reg = F.conv_transpose2d(input=p_in, weight=filt, padding=self.pad) # 1 x n x h x w
reg_obsv[i,:,:,:] = reg
return reg_obsv
def update_map(self, reg_obsv, map_previous, batch_size, update_type):
# Update the map using LSTM - hidden state: map_previous, input: reg_obsv
# Each spatial location is passed independently in the LSTM
if update_type=="lstm":
map_next = np.zeros((batch_size, self.map_embedding, self.global_map_dim[0], self.global_map_dim[1]), dtype=np.float32)
map_next = torch.tensor(map_next, dtype=torch.float32).cuda()
# ** LSTM input requires sequence length dimension, in our case is 1
for i in range(self.global_map_dim[0]):
for j in range(self.global_map_dim[1]):
emb_in = reg_obsv[:,:,i,j] # b x n
emb_hidden = map_previous[:,:,i,j] # b x n
emb_in = emb_in.unsqueeze(0) # add the dimension for the sequence length
emb_hidden = emb_hidden.unsqueeze(0) # add the dimension for the nLstm layers
hidden = (emb_hidden.contiguous().cuda(), emb_hidden.contiguous().cuda()) # LSTM expects two hidden inputs
lstm_out, hidden_out = self.lstm(emb_in, hidden)
map_next[:,:,i,j] = lstm_out
elif update_type=="fc": # using a fully connected layer
map2 = torch.cat((map_previous, reg_obsv), 1)
map_next = self.update_fc(map2.permute(0,2,3,1))
map_next = torch.tanh(map_next)
map_next = map_next.permute(0,3,1,2) # b x n x h x w
else: # case 'avg', using AvgPool1d layer
map_next = np.zeros((batch_size, self.map_embedding, self.global_map_dim[0], self.global_map_dim[1]), dtype=np.float32)
map_next = torch.tensor(map_next, dtype=torch.float32).cuda()
for i in range(self.global_map_dim[0]):
for j in range(self.global_map_dim[1]):
vec1 = reg_obsv[:,:, i, j].unsqueeze(2)
vec2 = map_previous[:,:,i,j].unsqueeze(2)
vec = torch.cat((vec1, vec2), 2)
avg_out = self.update_avg(vec).squeeze(2)
map_next[:,:,i,j] = avg_out
map_next = torch.tanh(map_next)
return map_next |
'''
Given an array of integers where 1 ≤ a[i] ≤ n (n = size of array), some elements appear twice and others appear once.
Find all the elements of [1, n] inclusive that do not appear in this array.
Could you do it without extra space and in O(n) runtime? You may assume the returned list does not count as extra space.
Example:
Input:
[4,3,2,7,8,2,3,1]
Output:
[5,6]
'''
# This probably doesn't count since I'm using extra memory with the set
def findDisappearedNumbers(array):
# make a set {1,2,3,...,n}
output = set(range(1,len(array)+1))
# remove each element of the array from the set
for element in array:
if element in output:
output.remove(element)
# turn the set into a list
return list(output)
array = [4,3,2,7,8,2,3,1]
print(findDisappearedNumbers(array))
# Another solution that seems faster in practice. Perhaps appending to a list is faster than
# removing from a set
def findDisappearedNumbers2(array):
# take the input and turn it into a set
input_set = set(array)
# initialize list of numbers missing from the input
missing = []
for number in range( 1, len(array) +1 ):
if number not in input_set:
missing.append(number)
return missing
print(findDisappearedNumbers2(array))
|
from flask_restplus import fields
class AdminSchema:
schema_user_req = {
'username': fields.String(required=True, description='username'),
'email': fields.String(required=True, description='email'),
'role': fields.String(required=False, description='Role user')
}
schema_user_res = {
'id': fields.Integer(required=True, description='user id'),
'username': fields.String(required=True, description='username'),
'email': fields.String(required=True, description='email'),
'phone_number': fields.String(required=True, description='phone number'),
'created_at': fields.DateTime(required=True, description='created at user'),
'updated_at': fields.String(required=True, description='updated at user'),
'role': fields.String(required=True, description='role of user'),
'active': fields.Boolean(required=True, description='user is active or none')
}
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
from openerp.osv.orm import setup_modifiers
from openerp.tools.translate import _
class accounting_report(osv.osv_memory):
_inherit = "accounting.report"
_columns = {
# 'filter_cmp': fields.selection([('filter_no', 'No Filters'), ('filter_date', 'Date'), ('filter_period', 'Periods'), ('analytic', 'Cuenta Analitica')], "Filter by", required=True),
'analytic_account_id': fields.many2many('account.analytic.account', 'account_analytic_account_financial_report', 'report_line_id', 'analytic_account_id', 'Cuentas Analiticas'),
'filter': fields.selection([('filter_no', 'No Filters'), ('filter_date', 'Date'), ('filter_period', 'Periods'), ('analytic', 'Cuenta Analitica')], "Filter by", required=False),
}
accounting_report()
#class account_common_report(osv.osv_memory):
# _inherit = "account.common.report"
# _columns = {
# 'filter': fields.selection([('filter_no', 'No Filters'), ('filter_date', 'Date'), ('filter_period', 'Periods'), ('analytic', 'Cuenta Analitica')], "Filter by", required=True),
# }
#account_common_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
list = []
str = input()
list = str.split()
print(list[2],list[0],list[1]) |
from core.base import Base
try:
from .base import *
except:
from base import *
class LaGou(SpiderBase, Base):
name = 'lagou'
def __init__(self, logger=None,*args):
super(LaGou, self).__init__(logger, *args)
def open_search_home(self):
headers = {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
}
args = {
"url": "https://www.lagou.com/gongsi/",
"headers": headers
}
self.send_request("get", **args)
def query_list_page(self, key, page_to_go):
headers = {
'Origin': 'https://www.lagou.com',
'X-Anit-Forge-Code': '0',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': 'https://www.lagou.com/gongsi/',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
'X-Anit-Forge-Token': 'None',
}
data = {
'first': 'false',
'pn': str(page_to_go),
'sortField': '0',
'havemark': '0'
}
# retry_times = 10
retry_times = 0
error_time = 0
while True:
if error_time >= 5:
self.l.info("no result with 10 times retry")
error_time = 0
self.proxy= {} # 重新获取代理
if retry_times >= 20:
self.l.info("no result with 10 times retry")
break
try:
args = {
"url": f'https://www.lagou.com/gongsi/{key}.json',
"headers": headers,
"data": data
}
retry_times += 1
self.s.cookies = requests.utils.cookiejar_from_dict({}) # 置空cookies
self.open_search_home()
res = self.send_request("post", **args)
except Exception as e:
continue
if (res.status_code == 200):
if "result" in res.text:
self.l.info("search success !!!")
time.sleep(1)
return res.text
elif "操作太频繁" in res.text:
self.l.info(f"操作太频繁:{error_time}")
# 直接更换代理
error_time += 1
time.sleep(2)
continue
else:
self.l.info("公司的搜索页面有问题")
error_time += 1
time.sleep(2)
continue
else:
self.l.error(f"response status_code is wrong:{res.status_code}")
# 直接更换代理
self.proxy_fa = 10
self.proxy = {}
continue
return ""
def query_detail_page(self, url):
l = self.l
headers = {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
}
# response = self.session.get(url, headers=headers, timeout=30)
args = {
"url": url,
"headers": headers
}
retry_times = 0
error_time = 0
while True:
if error_time >= 5:
self.l.info("no result with 10 times retry")
error_time = 0
self.proxy= {} # 重新获取代理
if retry_times >= 20:
self.l.info("no result with 10 times retry")
break
try:
retry_times += 1
self.s.cookies = requests.utils.cookiejar_from_dict({}) # 重置cookies
self.open_search_home()
response = self.send_request("get", **args)
except Exception as e:
continue
if (response.status_code == 200):
if "公司主页" in response.text:
l.info("search success !!!")
time.sleep(1)
return response.text
elif ("封禁" in response.text) or ("请按住滑块,拖动到最右边" in response.text) or ("存在异常访问行为" in response.text):
l.info(f"ip被封禁了")
# 直接更换代理
self.proxy_fa = 10
self.proxy = {}
continue
else:
l.info("公司的搜索页面有问题")
error_time += 1
time.sleep(2)
continue
elif (response.status_code == 303):
return ""
else:
l.error(f"response status_code is wrong:{response.status_code}")
# 直接更换代理
self.proxy_fa = 10
self.proxy = {}
continue
return ""
if __name__ == '__main__':
# l = DaJie()
# l.run(['112233'])
pass
|
import cv2
import numpy as np
import time
from matplotlib import pyplot as plt
a=['wheat_grain21.jpeg','wheat_grain27.jpeg','wheat_grain32.jpeg','wheat_grain37.jpeg']
data_path='/home/ambuje/Desktop/'
import mpi4py.MPI
rank = mpi4py.MPI.COMM_WORLD.Get_rank()
size = mpi4py.MPI.COMM_WORLD.Get_size()
task_list = range(4)
aa=time.time()
def f(task,i):
q=data_path+a[i]
img = cv2.imread(q,0)
# def auto_canny(image, sigma=0.33):
# # compute the median of the single channel pixel intensities
# v = np.median(image)
# # apply automatic Canny edge detection using the computed median
# lower = int(max(0, (1.0 - sigma) * v))
# upper = int(min(255, (1.0 + sigma) * v))
# edged = cv2.Canny(image, lower, upper)
# # return the edged image
# return edged
blurred = cv2.GaussianBlur(img, (3, 3), 0)
# apply Canny edge detection using a wide threshold, tight
# threshold, and automatically determined threshold
#wide = cv2.Canny(blurred, 10, 200)
tight = cv2.Canny(blurred, 225, 250)
#auto = auto_canny(blurred)
# show the images
# cv2.imshow("Original", img)
# cv2.imshow("Edges", np.hstack([wide, tight, auto]))
# cv2.waitKey(0)
(cnts, _) = cv2.findContours(tight, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
print("I found %i wheat grains" % len(cnts))
for i,task in enumerate(task_list):
#This is how we split up the jobs.
#The % sign is a modulus, and the "continue" means
#"skip the rest of this bit and go to the next time
#through the loop"
# If we had e.g. 4 processors, this would mean
# that proc zero did tasks 0, 4, 8, 12, 16, ...
# and proc one did tasks 1, 5, 9, 13, 17, ...
# and do on.
if i%size!=rank: continue
#print ("Task number %d (%d) being done by processor %d of %d" % (i, task, rank, size))
f(task,i)
b=time.time()
print(b-aa)
|
#!/bin/python
import sys
d1,m1,y1 = raw_input().strip().split(' ')
d1,m1,y1 = [int(d1),int(m1),int(y1)]
d2,m2,y2 = raw_input().strip().split(' ')
d2,m2,y2 = [int(d2),int(m2),int(y2)]
y= y1 - y2
if y == 0:
m = m1 - m2
if m == 0:
if d1 <= d2:
print 0
else:
print str(15*(d1-d2))
elif (m > 0):
print str(500*(m))
else:
print 0
elif (y > 0):
print 10000
else:
print 0 |
import mysql.connector as mariadb
import sys
mariadb_connection = mariadb.connect(user='root', password='', database='doorlock') ## Connect to db
cursor = mariadb_connection.cursor()
query = "SELECT position FROM status ORDER BY time DESC LIMIT 1;" ## Get the latest position of the lock
cursor.execute(query)
response = cursor.fetchone()[0] ## It automatically returns a tuple, we want the 1st string
print(response) ## Forward the response
|
def gcd(a, b):
while b != 0:
a, b = b, a % b
return(a)
A, B, C, D = map( int, input().split())
CD = C*D//gcd(C,D)
print(B-A+1 - (B//C - (A-1)//C) - (B//D-(A-1)//D) + (B//CD - (A-1)//CD))
|
"""
Python module to perform data ingress operations
for the Advanticsys sensors
"""
import pandas as pd
# from crop.db import create_database
from .constants import (
CONST_ADVANTICSYS_COL_LIST,
CONST_ADVANTICSYS_COL_TIMESTAMP,
CONST_ADVANTICSYS_COL_MODBUSID,
CONST_ADVANTICSYS_COL_TEMPERATURE,
CONST_ADVANTICSYS_COL_HUMIDITY,
CONST_ADVANTICSYS_COL_CO2LEVEL,
ERR_IMPORT_ERROR_1,
ERR_IMPORT_ERROR_2,
ERR_IMPORT_ERROR_3,
ERR_IMPORT_ERROR_4,
ERR_IMPORT_ERROR_5,
CONST_ADVANTICSYS_TIMESTAMP_MIN,
CONST_ADVANTICSYS_TIMESTAMP_MAX,
CONST_ADVANTICSYS_MODBUSID_MIN,
CONST_ADVANTICSYS_MODBUSID_MAX,
CONST_ADVANTICSYS_TEMPERATURE_MIN,
CONST_ADVANTICSYS_TEMPERATURE_MAX,
CONST_ADVANTICSYS_HUMIDITY_MIN,
CONST_ADVANTICSYS_HUMIDITY_MAX,
CONST_ADVANTICSYS_CO2LEVEL_MIN,
CONST_ADVANTICSYS_CO2LEVEL_MAX,
CONST_ADVANTICSYS,
)
from .structure import SensorClass, TypeClass, ReadingsAdvanticsysClass
def advanticsys_import(file_path):
"""
Reads in advanticsys csv file as pandas data frame and performs checks
Args:
file_path - full path to an advanticsys csv file
Returns:
success - status
log - error message
advanticsys_df - pandas dataframe representing advanticsys data file,
returns None if data is invalid
"""
advanticsys_raw_df = advanticsys_read_csv(file_path)
return advanticsys_df_checks(advanticsys_raw_df)
def advanticsys_df_checks(advanticsys_raw_df):
"""
Args
Return
"""
# Checks if df exists
if not isinstance(advanticsys_raw_df, pd.DataFrame):
return False, "Not a pandas dataframe", None
# Checks if df is empty
if advanticsys_raw_df.empty:
return False, "Dataframe empty", None
# Checks structure
success, log = advanticsys_check_structure(advanticsys_raw_df)
if not success:
return success, log, None
# converts data and uses only columns from CONST_ADVANTICSYS_COL_LIST
success, log, advanticsys_df = advanticsys_convert(advanticsys_raw_df)
if not success:
return success, log, None
# Checks for validity
success, log = advanticsys_df_validity(advanticsys_df)
if not success:
return success, log, None
return success, log, advanticsys_df
def advanticsys_read_csv(file_path):
"""
Reads in advanticsys csv file as pandas data frame.
Args:
file_path - full path to an advanticsys csv file
Returns:
df - pandas dataframe representing advanticsys data file
"""
return pd.read_csv(file_path)
def advanticsys_check_structure(advanticsys_df):
"""
Checks if advanticsys dataframe has expected structure
Args:
advanticsys_df - pandas dataframe representing advanticsys data file
Returns:
True/False depending on whether the dataframe has the correct structure
Error message
"""
# Check if all the nessecary columns are present in the dataframe
for advanticsys_column in CONST_ADVANTICSYS_COL_LIST:
if not advanticsys_column in advanticsys_df.columns:
return False, ERR_IMPORT_ERROR_1
return True, None
def advanticsys_convert(advanticsys_raw_df):
"""
Prepares Adavantix dataframe to be imported to database by selecting only neccessary columns
and converting to correct data types.
Args:
advanticsys_raw_df - pandas dataframe representing advanticsys data file
Returns:
success - status
log - error message
advanticsys_df - converted pandas dataframe
"""
success = True
log = None
try:
advanticsys_df = advanticsys_raw_df[CONST_ADVANTICSYS_COL_LIST]
except:
success = False
log = ERR_IMPORT_ERROR_1 + ": " + ",".join(CONST_ADVANTICSYS_COL_LIST)
advanticsys_df = None
return success, log, advanticsys_df
# convert to expected types
try:
advanticsys_df[CONST_ADVANTICSYS_COL_TIMESTAMP] = pd.to_datetime(
advanticsys_df[CONST_ADVANTICSYS_COL_TIMESTAMP],
format="%Y-%m-%dT%H:%M:%S.%f",
)
advanticsys_df[CONST_ADVANTICSYS_COL_MODBUSID] = advanticsys_df[
CONST_ADVANTICSYS_COL_MODBUSID
].astype("int16")
advanticsys_df[CONST_ADVANTICSYS_COL_TEMPERATURE] = advanticsys_df[
CONST_ADVANTICSYS_COL_TEMPERATURE
].astype("float64")
advanticsys_df[CONST_ADVANTICSYS_COL_HUMIDITY] = advanticsys_df[
CONST_ADVANTICSYS_COL_HUMIDITY
].astype("float64")
advanticsys_df[CONST_ADVANTICSYS_COL_CO2LEVEL] = advanticsys_df[
CONST_ADVANTICSYS_COL_CO2LEVEL
].astype("float64")
except:
success = False
log = ERR_IMPORT_ERROR_2
advanticsys_df = None
return success, log, advanticsys_df
# check for missing values
if advanticsys_df.isnull().values.any():
success = False
log = ERR_IMPORT_ERROR_3
advanticsys_df = None
return success, log, advanticsys_df
return success, log, advanticsys_df
def advanticsys_df_validity(advanticsys_df):
"""
Checks if advanticsys dataframe has expected structure
Args:
advanticsys_df - pandas dataframe representing advanticsys data file
Returns:
True/False depending on whether the dataframe has the correct stricture
Error message
"""
success = True
log = ""
# Checking for duplicates
duplicates = advanticsys_df[advanticsys_df.duplicated()]
if len(duplicates) > 0:
success = False
log = (
ERR_IMPORT_ERROR_4
+ ". Check the following entries: "
+ str(list(duplicates.index))
)
if not success:
return success, log
col_names = [
CONST_ADVANTICSYS_COL_TIMESTAMP,
CONST_ADVANTICSYS_COL_MODBUSID,
CONST_ADVANTICSYS_COL_TEMPERATURE,
CONST_ADVANTICSYS_COL_HUMIDITY,
CONST_ADVANTICSYS_COL_CO2LEVEL,
]
col_mins = [
CONST_ADVANTICSYS_TIMESTAMP_MIN,
CONST_ADVANTICSYS_MODBUSID_MIN,
CONST_ADVANTICSYS_TEMPERATURE_MIN,
CONST_ADVANTICSYS_HUMIDITY_MIN,
CONST_ADVANTICSYS_CO2LEVEL_MIN,
]
col_maxs = [
CONST_ADVANTICSYS_TIMESTAMP_MAX,
CONST_ADVANTICSYS_MODBUSID_MAX,
CONST_ADVANTICSYS_TEMPERATURE_MAX,
CONST_ADVANTICSYS_HUMIDITY_MAX,
CONST_ADVANTICSYS_CO2LEVEL_MAX,
]
# Check every column
for col_name, col_min, col_max in zip(col_names, col_mins, col_maxs):
success, log = advanticsys_df_check_range(
advanticsys_df, col_name, col_min, col_max
)
if not success:
return success, log
return success, log
def advanticsys_df_check_range(advanticsys_df, col_name, col_min, col_max):
"""
Checks if value in a dataframe for a specific column are within a range.
If not creates an error message.
Args:
advanticsys_df - pandas dataframe representing advanticsys data file
col_name - column name
col_min - minimum value
col_max - maximum value
Returns:
success - status
log - error message
"""
success = True
log = ""
out_of_range_df = advanticsys_df[
(advanticsys_df[col_name] < col_min) | (advanticsys_df[col_name] > col_max)
]
if len(out_of_range_df) > 0:
success = False
log = (
ERR_IMPORT_ERROR_5
+ " <"
+ col_name
+ "> out of range (min = %f, max = %f)" % (col_min, col_max)
+ " Entries: "
+ str(list(out_of_range_df.index))
)
return success, log
def advanticsys_check_warning(advanticsys_df, col_name, thold_max, thold_min):
"""
Checks if value in a dataframe for a specific column are within a threshold.
If not issues a warning.
Args:
advanticsys_df - pandas dataframe representing advanticsys data file
col_name - column name
thold_max - max threshold of value
thold_min - min threshold of value
Returns:
success - status
log - error message
"""
success = True
log = ""
out_of_range_df = advanticsys_df[
(advanticsys_df[col_name] < thold_min) | (advanticsys_df[col_name] > thold_max)
]
if len(out_of_range_df) > 0:
success = False
log = (
+" <"
+ col_name
+ "> out of range (min = %f, max = %f)" % (thold_min, thold_max)
+ " Entries: "
+ str(list(out_of_range_df.index))
)
return success, log
def insert_advanticsys_data(session, adv_df):
"""
The function will take the prepared advanticsys data frame from the ingress module
and find sensor id with respect to modbusid and sensor type and insert data into the db.
-session: an open sqlalchemy session
-adv_df: dataframe containing a checked advanticsys df
-cnt_dupl: counts duplicate values
"""
result = True
log = ""
cnt_dupl = 0
cnt_new = 0
# Gets the the assigned int id of the "Advanticsys" type
try:
adv_type_id = (
session.query(TypeClass)
.filter(TypeClass.sensor_type == CONST_ADVANTICSYS)
.first()
.id
)
except:
result = False
log = "Sensor type {} was not found.".format(CONST_ADVANTICSYS)
return result, log
# Gets the sensor_id of the sensor with type=advanticsys and device_id=modbusid
for _, row in adv_df.iterrows():
adv_device_id = row[CONST_ADVANTICSYS_COL_MODBUSID]
adv_timestamp = row[CONST_ADVANTICSYS_COL_TIMESTAMP]
try:
adv_sensor_id = (
session.query(SensorClass)
.filter(SensorClass.device_id == str(adv_device_id))
.filter(SensorClass.type_id == adv_type_id)
.first()
.id
)
except:
adv_sensor_id = -1
result = False
log = "{} sensor with {} = {} was not found.".format(
CONST_ADVANTICSYS, CONST_ADVANTICSYS_COL_MODBUSID, str(adv_device_id)
)
break
# check if data entry already exists
if adv_sensor_id != -1:
found = False
query_result = (
session.query(ReadingsAdvanticsysClass)
.filter(ReadingsAdvanticsysClass.sensor_id == adv_sensor_id)
.filter(ReadingsAdvanticsysClass.timestamp == adv_timestamp)
.first()
)
if query_result is not None:
found = True
try:
if not found:
data = ReadingsAdvanticsysClass(
sensor_id=adv_sensor_id,
timestamp=adv_timestamp,
temperature=row[CONST_ADVANTICSYS_COL_TEMPERATURE],
humidity=row[CONST_ADVANTICSYS_COL_HUMIDITY],
co2=row[CONST_ADVANTICSYS_COL_CO2LEVEL],
)
session.add(data)
cnt_new += 1
else:
cnt_dupl += 1
except:
result = False
log = "Cannot insert new data to database"
if result:
log = "New: {} (uploaded); Duplicates: {} (ignored)".format(cnt_new, cnt_dupl)
return result, log
|
# Broadcast message server
import socket
import select
import sys
print 'Hello World! I am the KitChat server. Your wish is my command!'
PORT = 5999
# CONNECTION_LIST will hold available clients
# We can read iterate through the list and see if there is data available on each socket
# if there is data, we want to relay it to the other clients (and ignore the one who sent it)
# send message from the input socket to the list of connected sockets, sans server socket.
def mesgAll (sock, message, name):
if name==True: # prepend username to broadcast
message = '\r' + ID[sock] + ": " + message + '\r'# prepend username
for socket in CONNECTION_LIST: # iterate through all connected clients
if socket != serverSock and socket != sock: #disregard server and sender
try:
socket.send(message)
except:
socket.close() # failed
CONNECTION_LIST.remove(socket) # bye bye client
if __name__ == "__main__":
CONNECTION_LIST = [] # list of currently connected clients
ID = {}
RECV_BUFFER = 4096 # how much data we grab on each call to recv; recv talks to OS which buffers data on a lower level
if (len(sys.argv) == 2): # Port is first argument if provided
PORT = int(sys.argv[1])
# this is the socket used to accept connections by the server
serverSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Socket options here:
# default: dissallow the socket/ip_address combo to be used until after a delay time (TIME_WAIT) has passed
# (to make sure all en route packets are dead)
# SO_REUSEADDR: allows a new client to bind to a recently closed prot/IP combo instantly
# socket.SO_SOCKET defines the level of the stack that we're working at - SOCKET level
serverSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # 3rd argument is to set it True or False
# 0.0.0.0 will serve all network adaptors
# 127.0.0.1 will only server clients on the same machine (i.g. if running serve/client(s) on colossus
serverSock.bind(("0.0.0.0", PORT))
serverSock.listen(20) # backlog is the pending connection buffer size, can be determined emperically under load
CONNECTION_LIST.append(serverSock)
print "KitChat server started! (Port: " + str(PORT) + ")"
while 1: #infinately handle clients!
# Using select because polling is goofy and interrupts (signals?) are the best
# sending select the CONNECTION_LIST with the serverSock in it will allow the OS
# to monitor the serverSock for clients wishing to connect
#
#
# Use select to wait/block until something exciting happens on a socket
# in the CONNECTION_LIST
read_sockets, write_sockets, error_sockets = select.select(CONNECTION_LIST,[],[])
for sock in read_sockets:
if sock == serverSock: # if the excitement is happening on the server socket, we've got a new client!
sockfd, addr = serverSock.accept() # accept the connection on the server socket, and push to unique socket
CONNECTION_LIST.append(sockfd) # add the client to the currently connected list so we can monitor with select
print addr, "has connected"
# clients send username as soon as they connect
ID[sockfd] = sockfd.recv(RECV_BUFFER) # grab the name sent by the client
print ID[sockfd] + " has joined the chat!"
#print "connection list below: "
#print CONNECTION_LIST
# users on the server side.
# mesgAll(sockfd, "\nWelcome %s, %s to KitChat!\n" % addr)
mesgAll(sockfd, "\n---%s has logged in---\n" % ID[sockfd], False)
else: # we're getting information from a client socket
data = sock.recv(RECV_BUFFER) # since using TCP stream, grab the buffer length
if data: # do while(data): here in order to get more than just RECV_BUFFER size
mesgAll(sock, data, True) # True means force client to prepend username before data is sent
else: # connection died
mesgAll( sock, "\n---%s has logged off---\n" % ID[sock], False)
print ID[sock] + " (%s,%s) has logged off." % addr
#TODO Play AOL log-off sound, send special character codes
sock.close()
del ID[sock] # remove them from the dictionary
CONNECTION_LIST.remove(sock)
continue
#except: # the change the select() noticed was actually a socket disconnecting
#mesgAll( sock, "Client %s, %s has logged off." % addr)
#TODO Play AOL log-off sound
#CONNECTION_LIST.remove(sock)
#sock.close()
#continue
server_socket.close() #if somehow we exit the while loop, relent the socket
|
from django.contrib import admin
from .models import *
admin.site.register(Gun)
admin.site.register(Solider)
admin.site.register(Platoon)
admin.site.register(Ranks)
admin.site.register(Ammo)
|
/home/miaojian/miniconda3/lib/python3.7/hmac.py |
import logging
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect, Response
from time import time
import os
import shutil
import plistlib
from pcpbridge.lib.base import BaseController, render
from pcpbridge.lib import PCastDEV as PCast
log = logging.getLogger(__name__)
class BridgeController(BaseController):
def index(self):
# Return a rendered template
#return render('/file.mako')
# or, return a response
redirect(url(controller='bridge', action='workflows'))
def workflows(self):
if request.method == "GET":
log.debug("%s -- %s" % (request.host, request.url))
return render('/workflows_form.mako')
else:
pcast = PCast.PCast()
(status, pcast_result, pcast_stdout, pcast_stderr) = pcast.getWorkflows(username = request.POST['user'],
password = request.POST['password'])
if(status != 0):
c.exception = pcast_result
c.code = status
log.error("%s -- %s, username = '%s', status = %s" % (request.remote_addr, request.url, request.POST['user'], pcast_result))
return render('/pcast_error.mako')
else:
flows = plistlib.readPlistFromString(pcast_stdout)
c.user = request.POST['user']
c.workflows = [{'name':x['name'], 'uuid':x['uuid'], 'description':x['description']} for x in flows['workflows']]
log.debug("%s -- %s, username = '%s', status = %s" % (request.remote_addr, request.url, request.POST['user'], pcast_result))
return render('/workflows.mako')
def workflows_xml(self):
if request.method == "GET":
log.debug("%s -- %s" % (request.host, request.url))
return render('/workflows_form.mako')
else:
pcast = PCast.PCast()
(status, pcast_result, pcast_stdout, pcast_stderr) = pcast.getWorkflows(username = request.POST['user'],
password = request.POST['password'])
if(status != 0):
c.exception = pcast_result
c.code = status
log.error("%s -- %s, username = '%s', status = %s" % (request.remote_addr, request.url, request.POST['user'], pcast_result))
return render('/pcast_error.mako')
else:
return Response(pcast_stdout, content_type="text/xml")
def upload(self, id=None):
if request.method == "GET":
c.uuid=id
return render('/upload_form.mako')
else:
workdir = os.path.join("/tmp","%s%s" % (request.POST['user'],time()))
os.makedirs(workdir)
myfile = request.POST['file']
permanent_file = open(os.path.join(workdir,
myfile.filename.lstrip(os.sep)),
'w')
shutil.copyfileobj(myfile.file, permanent_file)
myfile.file.close()
permanent_file.close()
metadata_file = open(os.path.join(workdir,
"metadata.plist"),
'w')
metadata_file.write("""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Description</key>
<string>%s</string>
<key>Title</key>
<string>%s</string>
</dict>
</plist>
""" % (request.POST['description'],request.POST['title']))
metadata_file.close()
pcast = PCast.PCast()
(status, pcast_result, pcast_stdout, pcast_stderr) = pcast.submit(username = request.POST['user'],
password = request.POST['password'],
file_path = permanent_file.name,
metadata = metadata_file.name,
workflow_uuid = request.POST['uuid'],
)
try:
upload_uuid = pcast_stdout.split("\n")[0].rsplit(' ',1)[1]
except Exception, e:
upload_uuid = "N/A"
c.pcast_upload_uuid = upload_uuid
c.pcast_result = pcast_result
c.pcast_stdout = pcast_stdout
c.pcast_stderr = pcast_stderr
c.status = status
log.info("%s -- %s, username = '%s', status = %s" % (request.remote_addr, request.url, request.POST['user'], pcast_result))
log.info("Upload UUID : %s" % (upload_uuid))
log.info("Upload File : %s" % (permanent_file.name))
if status == 0:
shutil.rmtree(workdir, False, lambda x,y,z: log.error("Couldn't remove %s : %s" % (y,z)))
return render('/debug_upload.mako') |
x=int(input())
if(x<2):
print('N')
elif(x==2):
print('Y')
else:
for i in range(2,x):
if(x%i==0):
print('N')
break
else:
print('Y')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Gaia
#
# Gaia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import gaia2
import sys, os.path, json
import gaia2.fastyaml as yaml
from optparse import OptionParser
def convertJsonToSig(filelist_file, result_filelist_file):
fl = yaml.load(open(filelist_file, 'r'))
result_fl = fl
errors = []
for trackid, json_file in fl.iteritems():
try:
data = json.load(open(json_file))
# remove descriptors, that will otherwise break gaia_fusion due to incompatibility of layouts
if 'tags' in data['metadata']:
del data['metadata']['tags']
if 'sample_rate' in data['metadata']['audio_properties']:
del data['metadata']['audio_properties']['sample_rate']
sig_file = os.path.splitext(json_file)[0] + '.sig'
yaml.dump(data, open(sig_file, 'w'))
result_fl[trackid] = sig_file
except:
errors += [json_file]
yaml.dump(result_fl, open(result_filelist_file, 'w'))
print "Failed to convert", len(errors), "files:"
for e in errors:
print e
return len(errors) == 0
if __name__ == '__main__':
parser = OptionParser(usage = '%prog [options] filelist_file result_filelist_file\n' +
"""
Converts json files found in filelist_file into *.sig yaml files compatible with
Gaia. The result files are written to the same directory where original files were
located.
"""
)
options, args = parser.parse_args()
try:
filelist_file = args[0]
result_filelist_file = args[1]
except:
parser.print_help()
sys.exit(1)
convertJsonToSig(filelist_file, result_filelist_file)
|
class Solution:
def rightView(self, root, res, depth):
if not root:
return
if depth == len(res):
res.append(root.val)
self.rightView(root.right, res, depth + 1)
self.rightView(root.left, res, depth + 1)
def rightSideView(self, root):
result = []
self.rightView(root, result, 0)
return result
|
"""trying with postgres again
Revision ID: d671b27f6ff0
Revises:
Create Date: 2021-05-10 12:53:44.257464
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm import sessionmaker
Session = sessionmaker()
# revision identifiers, used by Alembic.
revision = 'd671b27f6ff0'
down_revision = None
branch_labels = None
depends_on = None
# , create_type=False
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('images',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(), nullable=False),
sa.Column('parent_id', sa.Integer(), nullable=False),
sa.Column('parent_type', postgresql.ENUM('error', 'answer', name='parent_types'), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('modules',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=40), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=40), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('hashed_password', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('errors',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('module_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['module_id'], ['modules.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('description'),
sa.UniqueConstraint('title')
)
op.create_table('messages',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=False),
sa.Column('sender_id', sa.Integer(), nullable=False),
sa.Column('recipient_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['recipient_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['sender_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('subscriptions',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('module_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['module_id'], ['modules.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('user_id', 'module_id')
)
op.create_table('answers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('error_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['error_id'], ['errors.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('description')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('answers')
op.drop_table('subscriptions')
op.drop_table('messages')
op.drop_table('errors')
op.drop_table('users')
op.drop_table('modules')
op.drop_table('images')
bind = op.get_bind()
session = Session(bind=bind)
session.execute('DROP TYPE parent_types')
# ### end Alembic commands ###
|
class Solution(object):
def reverse(self , x):
a = 0
b = x if x > 0 else -x
while b:
if a > 2 ** 31 / 10:
return 0
else:
a = a * 10 + b % 10
b= b / 10
return a if x > 0 else -a
|
"""change domain config options
Revision ID: 253ae54f5788
Revises: 36c91aa9b3b5
Create Date: 2019-11-16 16:58:11.287152
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "253ae54f5788"
down_revision = "36c91aa9b3b5"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"flicket_config", sa.Column("change_domain", sa.BOOLEAN(), nullable=True)
)
op.add_column(
"flicket_config",
sa.Column(
"change_domain_only_admin_or_super_user", sa.BOOLEAN(), nullable=True
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("flicket_config", "change_domain_only_admin_or_super_user")
op.drop_column("flicket_config", "change_domain")
# ### end Alembic commands ###
|
# http://www.iso.org/iso/country_codes/iso_3166_code_lists.htm
COUNTRIES = (
(u'AFG', u'Afghanistan'),
(u'ALA', u'Aland Islands'),
(u'ALB', u'Albania'),
(u'DZA', u'Algeria'),
(u'ASM', u'American Samoa'),
(u'AND', u'Andorra'),
(u'AGO', u'Angola'),
(u'AIA', u'Anguilla'),
(u'ATG', u'Antigua and Barbuda'),
(u'ARG', u'Argentina'),
(u'ARM', u'Armenia'),
(u'ABW', u'Aruba'),
(u'AUS', u'Australia'),
(u'AUT', u'Austria'),
(u'AZE', u'Azerbaijan'),
(u'BHS', u'Bahamas'),
(u'BHR', u'Bahrain'),
(u'BGD', u'Bangladesh'),
(u'BRB', u'Barbados'),
(u'BLR', u'Belarus'),
(u'BEL', u'Belgium'),
(u'BLZ', u'Belize'),
(u'BEN', u'Benin'),
(u'BMU', u'Bermuda'),
(u'BTN', u'Bhutan'),
(u'BOL', u'Bolivia'),
(u'BIH', u'Bosnia and Herzegovina'),
(u'BWA', u'Botswana'),
(u'BRA', u'Brazil'),
(u'VGB', u'British Virgin Islands'),
(u'BRN', u'Brunei Darussalam'),
(u'BGR', u'Bulgaria'),
(u'BFA', u'Burkina Faso'),
(u'BDI', u'Burundi'),
(u'KHM', u'Cambodia'),
(u'CMR', u'Cameroon'),
(u'CAN', u'Canada'),
(u'CPV', u'Cape Verde'),
(u'CYM', u'Cayman Islands'),
(u'CAF', u'Central African Republic'),
(u'TCD', u'Chad'),
(u'CIL', u'Channel Islands'),
(u'CHL', u'Chile'),
(u'CHN', u'China'),
(u'HKG', u'China - Hong Kong'),
(u'MAC', u'China - Macao'),
(u'COL', u'Colombia'),
(u'COM', u'Comoros'),
(u'COG', u'Congo'),
(u'COK', u'Cook Islands'),
(u'CRI', u'Costa Rica'),
(u'CIV', u'Cote d\'Ivoire'),
(u'HRV', u'Croatia'),
(u'CUB', u'Cuba'),
(u'CYP', u'Cyprus'),
(u'CZE', u'Czech Republic'),
(u'PRK', u'Democratic People\'s Republic of Korea'),
(u'COD', u'Democratic Republic of the Congo'),
(u'DNK', u'Denmark'),
(u'DJI', u'Djibouti'),
(u'DMA', u'Dominica'),
(u'DOM', u'Dominican Republic'),
(u'ECU', u'Ecuador'),
(u'EGY', u'Egypt'),
(u'SLV', u'El Salvador'),
(u'GNQ', u'Equatorial Guinea'),
(u'ERI', u'Eritrea'),
(u'EST', u'Estonia'),
(u'ETH', u'Ethiopia'),
(u'FRO', u'Faeroe Islands'),
(u'FLK', u'Falkland Islands (Malvinas)'),
(u'FJI', u'Fiji'),
(u'FIN', u'Finland'),
(u'FRA', u'France'),
(u'GUF', u'French Guiana'),
(u'PYF', u'French Polynesia'),
(u'GAB', u'Gabon'),
(u'GMB', u'Gambia'),
(u'GEO', u'Georgia'),
(u'DEU', u'Germany'),
(u'GHA', u'Ghana'),
(u'GIB', u'Gibraltar'),
(u'GRC', u'Greece'),
(u'GRL', u'Greenland'),
(u'GRD', u'Grenada'),
(u'GLP', u'Guadeloupe'),
(u'GUM', u'Guam'),
(u'GTM', u'Guatemala'),
(u'GGY', u'Guernsey'),
(u'GIN', u'Guinea'),
(u'GNB', u'Guinea-Bissau'),
(u'GUY', u'Guyana'),
(u'HTI', u'Haiti'),
(u'VAT', u'Holy See (Vatican City)'),
(u'HND', u'Honduras'),
(u'HUN', u'Hungary'),
(u'ISL', u'Iceland'),
(u'IND', u'India'),
(u'IDN', u'Indonesia'),
(u'IRN', u'Iran'),
(u'IRQ', u'Iraq'),
(u'IRL', u'Ireland'),
(u'IMN', u'Isle of Man'),
(u'ISR', u'Israel'),
(u'ITA', u'Italy'),
(u'JAM', u'Jamaica'),
(u'JPN', u'Japan'),
(u'JEY', u'Jersey'),
(u'JOR', u'Jordan'),
(u'KAZ', u'Kazakhstan'),
(u'KEN', u'Kenya'),
(u'KIR', u'Kiribati'),
(u'KWT', u'Kuwait'),
(u'KGZ', u'Kyrgyzstan'),
(u'LAO', u'Lao People\'s Democratic Republic'),
(u'LVA', u'Latvia'),
(u'LBN', u'Lebanon'),
(u'LSO', u'Lesotho'),
(u'LBR', u'Liberia'),
(u'LBY', u'Libyan Arab Jamahiriya'),
(u'LIE', u'Liechtenstein'),
(u'LTU', u'Lithuania'),
(u'LUX', u'Luxembourg'),
(u'MKD', u'Macedonia'),
(u'MDG', u'Madagascar'),
(u'MWI', u'Malawi'),
(u'MYS', u'Malaysia'),
(u'MDV', u'Maldives'),
(u'MLI', u'Mali'),
(u'MLT', u'Malta'),
(u'MHL', u'Marshall Islands'),
(u'MTQ', u'Martinique'),
(u'MRT', u'Mauritania'),
(u'MUS', u'Mauritius'),
(u'MYT', u'Mayotte'),
(u'MEX', u'Mexico'),
(u'FSM', u'Micronesia, Federated States of'),
(u'MCO', u'Monaco'),
(u'MNG', u'Mongolia'),
(u'MNE', u'Montenegro'),
(u'MSR', u'Montserrat'),
(u'MAR', u'Morocco'),
(u'MOZ', u'Mozambique'),
(u'MMR', u'Myanmar'),
(u'NAM', u'Namibia'),
(u'NRU', u'Nauru'),
(u'NPL', u'Nepal'),
(u'NLD', u'Netherlands'),
(u'ANT', u'Netherlands Antilles'),
(u'NCL', u'New Caledonia'),
(u'NZL', u'New Zealand'),
(u'NIC', u'Nicaragua'),
(u'NER', u'Niger'),
(u'NGA', u'Nigeria'),
(u'NIU', u'Niue'),
(u'NFK', u'Norfolk Island'),
(u'MNP', u'Northern Mariana Islands'),
(u'NOR', u'Norway'),
(u'PSE', u'Occupied Palestinian Territory'),
(u'OMN', u'Oman'),
(u'PAK', u'Pakistan'),
(u'PLW', u'Palau'),
(u'PAN', u'Panama'),
(u'PNG', u'Papua New Guinea'),
(u'PRY', u'Paraguay'),
(u'PER', u'Peru'),
(u'PHL', u'Philippines'),
(u'PCN', u'Pitcairn'),
(u'POL', u'Poland'),
(u'PRT', u'Portugal'),
(u'PRI', u'Puerto Rico'),
(u'QAT', u'Qatar'),
(u'KOR', u'Republic of Korea'),
(u'MDA', u'Republic of Moldova'),
(u'REU', u'Reunion'),
(u'ROU', u'Romania'),
(u'RUS', u'Russian Federation'),
(u'RWA', u'Rwanda'),
(u'BLM', u'Saint-Barthelemy'),
(u'SHN', u'Saint Helena'),
(u'KNA', u'Saint Kitts and Nevis'),
(u'LCA', u'Saint Lucia'),
(u'MAF', u'Saint-Martin (French part)'),
(u'SPM', u'Saint Pierre and Miquelon'),
(u'VCT', u'Saint Vincent and the Grenadines'),
(u'WSM', u'Samoa'),
(u'SMR', u'San Marino'),
(u'STP', u'Sao Tome and Principe'),
(u'SAU', u'Saudi Arabia'),
(u'SEN', u'Senegal'),
(u'SRB', u'Serbia'),
(u'SYC', u'Seychelles'),
(u'SLE', u'Sierra Leone'),
(u'SGP', u'Singapore'),
(u'SVK', u'Slovakia'),
(u'SVN', u'Slovenia'),
(u'SLB', u'Solomon Islands'),
(u'SOM', u'Somalia'),
(u'ZAF', u'South Africa'),
(u'ESP', u'Spain'),
(u'LKA', u'Sri Lanka'),
(u'SDN', u'Sudan'),
(u'SUR', u'Suriname'),
(u'SJM', u'Svalbard and Jan Mayen Islands'),
(u'SWZ', u'Swaziland'),
(u'SWE', u'Sweden'),
(u'CHE', u'Switzerland'),
(u'SYR', u'Syrian Arab Republic'),
(u'TJK', u'Tajikistan'),
(u'THA', u'Thailand'),
(u'TLS', u'Timor-Leste'),
(u'TGO', u'Togo'),
(u'TKL', u'Tokelau'),
(u'TON', u'Tonga'),
(u'TTO', u'Trinidad and Tobago'),
(u'TUN', u'Tunisia'),
(u'TUR', u'Turkey'),
(u'TKM', u'Turkmenistan'),
(u'TCA', u'Turks and Caicos Islands'),
(u'TUV', u'Tuvalu'),
(u'UGA', u'Uganda'),
(u'UKR', u'Ukraine'),
(u'ARE', u'United Arab Emirates'),
(u'GBR', u'United Kingdom'),
(u'TZA', u'United Republic of Tanzania'),
(u'USA', u'United States of America'),
(u'VIR', u'United States Virgin Islands'),
(u'URY', u'Uruguay'),
(u'UZB', u'Uzbekistan'),
(u'VUT', u'Vanuatu'),
(u'VEN', u'Venezuela (Bolivarian Republic of)'),
(u'VNM', u'Viet Nam'),
(u'WLF', u'Wallis and Futuna Islands'),
(u'ESH', u'Western Sahara'),
(u'YEM', u'Yemen'),
(u'ZMB', u'Zambia'),
(u'ZWE', u'Zimbabwe'),
) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Class for vehicle data
"""
class Vehicle(object):
# accepts, optional, vehicle object
def __init__(self, v=None):
self.current_stop_number = None
self.current_stop_id = None
self.timestamp = None
self.current_stop_status = None
if v:
self.set(v)
# accepts vehicle object
def set(self, v):
self.current_stop_number = v.current_stop_sequence
self.current_stop_id = v.stop_id
self.timestamp = v.timestamp
self.current_stop_status = v.current_status
|
class Primitives(object):
def __init__(self, universe):
self.universe = universe
self._holder = None
def install_primitives_in(self, value):
# Save a reference to the holder class
self._holder = value
# Install the primitives from this primitives class
self.install_primitives()
def install_primitives(self):
raise NotImplementedError()
def _install_instance_primitive(self, primitive, warn_if_not_existing=False):
# Install the given primitive as an instance primitive in the holder class
self._holder.add_primitive(primitive, warn_if_not_existing)
def _install_class_primitive(self, primitive, warn_if_not_existing=False):
# Install the given primitive as an instance primitive in the class of
# the holder class
self._holder.get_class(self.universe).add_primitive(
primitive, warn_if_not_existing
)
|
from django.contrib import admin
from .models import Product
from .models import Review
admin.site.register(Product)
admin.site.register(Review) |
#!/usr/bin/env python
import ptpy
camera = ptpy.PTPy()
with camera.session():
handles = camera.get_object_handles(
0,
all_storage_ids=True,
all_formats=True,
)
for handle in handles:
info = camera.get_object_info(handle)
print(info)
# Download all things that are not groups of other things.
if info.ObjectFormat != 'Association':
obj = camera.get_object(handle)
with open(info.Filename, mode='w') as f:
f.write(obj.Data)
|
#!/usr/bin/env python
from keras.models import Sequential
from keras.layers import Dense, Input, Activation
from keras.layers import Convolution2D, MaxPooling2D, Flatten, normalization,Dropout
from keras.models import model_from_json
from keras.optimizers import Adam
import cv2
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# Visualizations will be shown in the notebook.
%matplotlib inline
import pandas as pd
import numpy as np
from PIL import Image
from io import BytesIO
import base64
import os
# flips the image, saves new image, inverts the steering angle
def flipImage(img,st):
openName = "."+img
#print(openName)
if float(st) == 0:
st = 0
else:
st = float(st)*-1
flippedImage = cv2.imread(openName)
flippedImage = cv2.flip(flippedImage,1)
folder = img.split('/')
#print(folder)
imName = folder[2].split('.')
#print(imName)
#.split('
#print(folder)
#print(imNa
#flippedImage = cv2.fromArray(flippedImage)
newName = "./invImg/"+imName[0]+"_inv."+imName[1]
cv2.imwrite(newName,flippedImage)
return (imName[0]+"_inv."+imName[1],st)
# prepares files for train, test, and validate by splitting driving log
def prepareData(filename,train_size=.8,test_size=.2,val_size=.3):
data = []
f1 = open(filename,'r')
head = f1.readline()
for l in f1.readlines():
data.append(l.strip())
#print(head)
print(len(data))
data = np.array(data)
trainx,testx,trainy,testy = train_test_split(data,data,test_size=test_size,train_size=train_size)
testx,valx,testy,valy = train_test_split(testx,testy,test_size=val_size,train_size=val_size)
f1.close()
print("Train size = "+str(len(trainx)))
print("Test size = "+str(len(testx)))
print("Val size = "+str(len(valx)))
f1 = open("train_2.csv",'w')
for l in trainx:
f1.write(l+"\n")
f1.close()
f1 = open("test_2.csv","w")
for l in testx:
f1.write(l+"\n")
f1.close()
f1 = open("val_2.csv","w")
for l in valx:
f1.write(l+"\n")
f1.close()
return len(trainx),len(testx),len(valx)
# data generator to resolve memory issues
def data_generator(path):
while 1:
f = open(path,'r')
for line in f:
x, y = process_line(line)
img = np.array(process_image(x))
img = np.reshape(img,(1,img.shape[0],img.shape[1],img.shape[2]))
yield (img,np.array([y]))
f.close()
def normalizeGrey(img):
a = 0 # lower norm value
b = 1 # upper norm value
minGrey = 0
maxGrey = 255
return a + ( ( (img - minGrey)*(b - a) )/( maxGrey - minGrey ) )
def val_generator(path):
while 1:
f2 = open(path,'r')
for line in f2:
x, y = process_line(line)
img = process_image(x)
img = np.reshape(img,(1,img.shape[0],img.shape[1],img.shape[2]))
yield (img,np.array([y]))
f2.close()
# image preprocessing function used by generator to resize image
def process_image(img):
neg = Image.open("."+img)
neg = np.asarray(neg.resize((neg.size[0]//2,neg.size[1]//2)))
return neg
# processes file line to find image file and steering angle
def process_line(line):
temp = line.strip().split(",")
x = temp[0]
y = float(temp[3].strip())
return x,y
def normalize(f):
lmin = float(f.min())
lmax = float(f.max())
return np.floor((f-lmin)/(lmax-lmin)*255.)
# create a directory to store the inverted data
os.mkdir("./invImg")
# Read the driving log to get image names
f1 = open("driving_log.csv",'r')
files = []
for l in f1.readlines():
temp = l.split(",")
files.append((temp[0].strip(),temp[3].strip()))
f1.close()
# read in each file and generated a flipped and inverted example for each
invertedImg = []
for c in files:
# flip the image
flipped = flipImage(c[0],c[1])
# save the new image name
newImgName = "/IMG/"+flipped[0]
invertedImg.append((newImgName,flipped[1]))
# add the new images to the driving log
f1 = open("driving_log.csv",'a')
for i in invertedImg:
f1.write(i[0]+", , ,"+str(i[1])+", , ,\n")
f1.close()
# prepare the traing, test and validation data
trainSize,testSize,valSize = prepareData("combined_log_inv.csv")
# dropout prob for dropout layers
dropout_prob = .05
# begin model definition
# paramters for layer sizes
fullDimensions = {
'full1':100,
'full2':50,
'full3':10,
'full4':1,
}
layerDepth = {
'convo1':24,
'convo2':36,
'convo3':48,
'convo4':64,
'convo5':64
}
kernalRow = {
'convo1':5,
'convo2':5,
'convo3':5,
'convo4':3,
'convo5':3
}
kernalColumn = {
'convo1':5,
'convo2':5,
'convo3':5,
'convo4':3,
'convo5':3
}
stride ={
'convo1':(2,2),
'convo2':(2,2),
'convo3':(2,2),
'convo4':(1,1),
'convo5':(1,1)
}
# sequential model layers
model = Sequential()
# normalization layer
model.add(normalization.BatchNormalization(mode=2,input_shape=(80,160,3)))
# convolution layer with pooling_1 : 5x5
model.add(Convolution2D(layerDepth['convo1'], kernalRow['convo1'],kernalColumn['convo1'],
subsample=stride['convo1'], border_mode='valid', dim_ordering='tf',input_shape=(80,160,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(kernalRow['convo1'],kernalColumn['convo1']),
strides=(1,1),border_mode='same',dim_ordering='tf'))
# convolution layer with pooling_2 : 5x5
model.add(Convolution2D(layerDepth['convo2'], kernalRow['convo2'],kernalColumn['convo2'],
subsample=stride['convo2'], border_mode='valid', dim_ordering='tf'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(kernalRow['convo2'],kernalColumn['convo2']),
strides=(1,1),border_mode='same',dim_ordering='tf'))
# convolution layer with pooling_3 : 5x5
model.add(Convolution2D(layerDepth['convo3'], kernalRow['convo3'],kernalColumn['convo3'],
subsample=stride['convo3'], border_mode='valid', dim_ordering='tf'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(kernalRow['convo3'],kernalColumn['convo3']),
strides=(1,1),border_mode='same',dim_ordering='tf'))
model.add(Dropout(dropout_prob))
# convolution layer with pooling_3 : 3x3
model.add(Convolution2D(layerDepth['convo4'], kernalRow['convo4'],kernalColumn['convo4'],
subsample=stride['convo4'], border_mode='valid', dim_ordering='tf'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(kernalRow['convo4'],kernalColumn['convo4']),
strides=(1,1),border_mode='same',dim_ordering='tf'))
# convolution layer with pooling_3 : 3x3
model.add(Convolution2D(layerDepth['convo5'], kernalRow['convo5'],kernalColumn['convo5'],
subsample=stride['convo5'], border_mode='valid', dim_ordering='tf'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(kernalRow['convo5'],kernalColumn['convo5']),
strides=(1,1),border_mode='same',dim_ordering='tf'))
# flatten outputs
model.add(Flatten())
#fully connected layer_1: 100
model.add(Dense(fullDimensions['full1'],activation='relu'))
# dropout_1
# fully connected layer_2:50
model.add(Dense(fullDimensions['full2'],activation='relu'))
# dropout
model.add(Dropout(dropout_prob))
# fully connected layer_3:10
model.add(Dense(fullDimensions['full3'],activation='relu'))
# fully connected layer_4:1 with linear activation
model.add(Dense(fullDimensions['full4'],activation='linear'))
print("Training the model ...")
# train model using mean squar error and adam optimizer
model.compile(loss='mse',
optimizer=Adam(lr=0.00001),
metrics=['accuracy','mean_squared_error'])
history = model.fit_generator(data_generator('train_2.csv'),
samples_per_epoch=trainSize, nb_epoch=6,verbose =1,
validation_data = val_generator('test_2.csv'),nb_val_samples=testSize)
# print the loss to standard outputs
print("Training Loss: ")
for h in history.history['val_mean_squared_error']:
print(h)
print("Validating the model ....")
# validate the model
eval_history = model.evaluate_generator(data_generator('val.csv'), val_samples=valSize, max_q_size=10, nb_worker=1, pickle_safe=False)
f1 = open('val.csv','r')
actual = []
for l in f1.readlines():
temp = l.split(",")
val = float(temp[3].strip())
actual.append(val)
f1.close()
samples = len(actual)
count = 0
totalLoss = 0
for e in eval_history:
totalLoss += pow(e-actual[count],2)
count+=1
avgLoss = float(totalLoss)/float(samples)
print("Validation error: "+str(avgLoss))
print("Saving the model ...")
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk.....")
|
# -*- coding: utf-8 -*-
import subprocess
from airtest.core.api import *
from poco.drivers.android.uiautomation import AndroidUiautomationPoco
from utils.operation_profile import get_android_config
ADDRESS = get_android_config().get("ADDRESS") # 获取config.yml中ADDRESS
def android_get_devices():
"""用于获取当前连接机器"""
# 获取andriod设备id的command
command_get_devices = "adb devices|grep -w 'device'|awk -F ' ' '{print $1}'"
my_device = subprocess.getoutput(command_get_devices)
command_get_devices_num = "adb devices|grep -w 'device'|awk -F ' ' '{print $1}'| wc -l"
my_device_number = int(subprocess.getoutput(command_get_devices_num).strip())
if my_device_number > 1:
raise OSError("存在多个Android设备!")
elif my_device_number < 1:
raise OSError("没有连接的Android!")
return my_device
def android_connet(my_phone):
"""用于连接安卓设备的方法,需要判断是否已经连接,如果设备已经通过poco库连接的情况下,那么就不能重新返回poco实例"""
try:
my_device_driver = connect_device(ADDRESS + my_phone)
poco = AndroidUiautomationPoco(my_device_driver, force_restart=False)
return poco
except Exception as e:
print("\033[7;31m%s\033[1;31;40m" % e)
def android_kill():
"""用于断开当前设备连接"""
command = "adb kill-server"
my_device = subprocess.getoutput(command)
print(my_device)
def android_start():
"""用于当前设备连接"""
command = "adb start-server"
my_device = subprocess.getoutput(command)
print(my_device)
|
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def index(request):
return HttpResponse("나는 장고의 가장 기본이 되는 원리를 깨우쳐버렸다...")
|
from Common.DBConnection import cursor
from DataAccess.DataModel import *
from BizModel.Entity import PageEntity
import math
def get_brand(brand_id: int):
row_number = cursor.execute('SELECT * FROM tb_fqs_brand WHERE id=%s', brand_id)
if row_number == 0:
return None
else:
row = cursor.fetchone()
return TB_FQS_Brand(row['id'], row['brand_name'])
def get_brand_by_name(brand_name: str):
row_number = cursor.execute('SELECT * FROM tb_fqs_brand WHERE brand_name=%s', brand_name)
if row_number == 0:
return None
else:
row = cursor.fetchone()
return TB_FQS_Brand(row['id'], row['brand_name'])
def get_brands(brand_name: str, page_entity: PageEntity):
if page_entity is None:
page_entity = PageEntity()
if brand_name is None or len(brand_name.strip()) == 0:
sql = 'SELECT * FROM tb_fqs_brand {0} {1}'.format(
'' if len(page_entity.order_by.strip()) == 0 else 'ORDER BY ' + page_entity.order_by,
'' if page_entity.page_size == 0 else 'LIMIT ' + str(
(page_entity.page_index - 1) * page_entity.page_size) + ',' + str(page_entity.page_size)
)
row_number = cursor.execute(sql)
if row_number == 0:
return None
else:
result = []
for i in range(0, row_number):
row = cursor.fetchone()
temp = TB_FQS_Brand(row['id'], row['brand_name'])
result.append(temp)
cursor.execute('SELECT COUNT(0) AS number FROM tb_fqs_brand')
page_entity.total_row = cursor.fetchone()['number']
if page_entity.page_size != 0:
page_entity.page_count = math.ceil(page_entity.total_row / page_entity.page_size)
return result
else:
sql = "SELECT * FROM tb_fqs_brand WHERE brand_name LIKE %s {0} {1}".format(
'' if len(page_entity.order_by.strip()) == 0 else 'ORDER BY ' + page_entity.order_by,
'' if page_entity.page_size == 0 else 'LIMIT ' + str(
(page_entity.page_index - 1) * page_entity.page_size) + ',' + str(page_entity.page_size)
)
row_number = cursor.execute(sql, '%'+brand_name+'%')
if row_number == 0:
return None
else:
result = []
for i in range(0, row_number):
row = cursor.fetchone()
temp = TB_FQS_Brand(row['id'], row['brand_name'])
result.append(temp)
cursor.execute('SELECT COUNT(0) AS number FROM tb_fqs_brand')
page_entity.total_row = cursor.fetchone()['number']
if page_entity.page_size != 0:
page_entity.page_count = math.ceil(page_entity.total_row / page_entity.page_size)
return result
def insert_brand(brand_name: str):
return cursor.execute('INSERT INTO tb_fqs_brand(`brand_name`) VALUES(%s)', brand_name)
def update_brand(brand_id: int, brand_name: str):
return cursor.execute('UPDATE tb_fqs_brand SET brand_name=%s WHERE id=%s', (brand_name, brand_id))
def delete_brand(brand_id: int):
return cursor.execute('DELETE FROM tb_fqs_brand WHERE id=%s', brand_id)
|
import mysql.connector
mydb = mysql.connector.connect(host="localhost",
user="root",
password="jyotiadate",
database="Society_db")
class operator:
def show(self,mee,mpur,mdes):
sql = "insert into meeting(date,purpose,des) values(%s,%s,%s)"
val = (mee,mpur,mdes)
cursor = mydb.cursor()
cursor.execute(sql, val)
mydb.commit()
print(cursor.rowcount, "Record inserted successfully into meeting table")
cursor.close()
return True
def close(self):
self.root.destroy()
def selectQuery(self):
cursor=mydb.cursor()
cursor.execute("select * from meeting")
reult=cursor.fetchall()
print(reult)
def selectWhere(self,No):
cursor=mydb.cursor()
cursor.execute("select * from meeting where ='%s' +(")
res=cursor.fetchall()
print(res)
|
# -*- coding: utf-8 -*-
import config
import telebot
import vk
import time
import random
import db
session = vk.AuthSession(config.my_app_id, config.user_login, config.user_password,scope='wall, messages')
vkapi = vk.API(session, v="5.62")
bot = telebot.TeleBot(config.token)
id_group=config.id_group #id группы куда идет постинг
offset_start={} #номер поста с которого начинается отсчет
offset_end={} #номер последнего поста
owner_id_n=config.owner_id_n #id групп
owner_id_str=config.owner_id_str
ow = {} # хранит id для выбора из списка id групп
id_pass=[] #хранит id юзеров
password_set = ["111","222","333"]
stop_set=["stop","стоп"]
check_data={} #флаг для работы c сохранением времени и даты
time_dic={} #сюда сохраняется дата в виде строки
time_list={}#тут храним время в юникс кодировке
str_link_photo1 = {} #хранение собранных ссылок на фото
string_photo = {} # хранение собранных ссылок на фото
lst_link_photo = {} #хранит id фото
list_url = {} #текст поста
photo = {} #хранит айдишники фоток
tags = """\n\n#Куплю #Продам #Обменяю #Барахолка #БарахолкаСПб #СПб"""
end_dic = {} #хранит ключи к основному словарю
@bot.message_handler(commands = ["start","help"])
def start(message):
bot.send_message(message.from_user.id,"Hi")
def random_id_group(message):
#случайны выбор id для списка с id групп
ow[message.chat.id]= random.randint(0,9)
return ow[message.chat.id]
def state_mes (message): #функция проверющая состояние пользователя
if message.text.lower() in stop_set: # если слово в стоп листе
if message.chat.id in id_pass : # если пользователь авторизован
bot.send_message(message.from_user.id,"You logged off") # сообщение о выходе из систем
id_pass.remove(message.chat.id) #удаляем id пользователя из списка
@bot.message_handler(func=lambda message: message.text in password_set and message.chat.id not in id_pass) # авторизация по паролю:
def login_usr(message):
bot.send_message(message.from_user.id, "The correct password!")
check_data[message.chat.id] = 0
print("0== ",check_data[message.chat.id])
n1 = random.randint(2,10)
n2 = random.randint(11,20)
offset_start[message.chat.id]=[x for x in range(n1,n2)] # созадем список номеров постов
offset_end[message.chat.id] =[x for x in range(n1+1,n2+1)]
random_id_group(message) # генерируем id для списка айди групп
if message.chat.id not in id_pass: # если id нет в списке значит добавляем
id_pass.append(message.chat.id)
bot.send_message(message.from_user.id, "Your ID")
bot.send_message(message.from_user.id, message.chat.id)
else:
bot.send_message(message.from_user.id, "You are already in the database")
@bot.message_handler(func=lambda message: message.text not in password_set and message.chat.id not in id_pass)
def access_check(message):
return bot.send_message(message.from_user.id, "Password error!")
@bot.message_handler(func=lambda message: message.chat.id in id_pass and message.text.lower() in stop_set)
def access_close(message):
id_pass.remove(message.chat.id)
offset_start[message.chat.id].clear()
offset_end[message.chat.id].clear()
return bot.send_message(message.from_user.id, "The session closed")
def api_vk_func(message): # основная функция доступа к апи вк
col_elem = offset_end[message.chat.id][0] - offset_start[message.chat.id][0] # считаем новый номер поста
a = vkapi.wall.get(owner_id=owner_id_n[ow[message.chat.id]], offset=offset_start[message.chat.id][0],
count=col_elem)
s = 'https://vk.com/public{0}?w=wall-{0}_{1}'.format(owner_id_str[ow[message.chat.id]], a['items'][0]['id'])
print(s)
list_param= a['items'][0]
#провреяем наличие необходимых тегов в посте, таких как наличие фото и id отправителя поста
if 'signer_id' in list_param and 'attachments' in list_param and 'copy_history' not in list_param:
time.sleep(1)
print(list_param['signer_id'])
print(list_param['attachments'])
print("vse OK")
return a,s
else:
time.sleep(1)
offset_start[message.chat.id].pop(0)
offset_end[message.chat.id].pop(0) #удаляем нулевой элемент
offset_start[message.chat.id].append(max(offset_start[message.chat.id])+1) # сдвигаем список
offset_end[message.chat.id].append(max(offset_end[message.chat.id]) + 1)
print("Ne OK")
return api_vk_func(message) #если не Ок то еще вызываем функцию
#командой стейт получем первый пост а так же омжно проверить какой пост сейчас на очереди
@bot.message_handler(func=lambda message: message.chat.id in id_pass and message.text == 'state' or message.text == "0")
def state_post(message):
if message.text == "state":
s = api_vk_func(message)
print("state = ",s[1])
bot.send_message(message.from_user.id,s[1])
if message.text == "0" and message.text !="state": # комманда "0" - отказ от постинга объявы и переход к следующему
offset_start[message.chat.id].pop(0)
offset_end[message.chat.id].pop(0)#удаляем из очереди номер поста
random_id_group(message) #генерация нового id группы
s = api_vk_func(message)
print("0 = ",s[1])
bot.send_message(message.from_user.id, s[1])
offset_start[message.chat.id].append(max(offset_start[message.chat.id])+1) # делаем бескоенчную очередь
offset_end[message.chat.id].append(max(offset_end[message.chat.id]) + 1)
@bot.message_handler(func=lambda message: message.chat.id in id_pass and message.text == "1" ) # формирование поста
def add_post(message):
s = api_vk_func(message)
text = s[0]['items'][0]['text'] # получение текста из поста
text += """\n\nhttps://vk.com/id{0}""".format(s[0]['items'][0]['signer_id']) # берем id автора поста
text += tags #добавляем теги
dbase = db.Basesql('userlink.db', 'links')
dbase.insert_db(s[0]['items'][0]['signer_id'], s[1])
list_url[message.chat.id]={}
list_url[message.chat.id][s[1]] = text # текст поста
len_photo = s[0]['items'][0]['attachments'] # список с прикрепленными файлами
lst_link_photo[message.chat.id]={}
photo[message.chat.id]=[] #хранит айдишники фоток
for j in range(len(len_photo)):
photo[message.chat.id].append(str(s[0]['items'][0]['attachments'][j]['photo']['id'])) # id прикрепленных фото
lst_link_photo[message.chat.id][s[1]] = photo[message.chat.id] # список с айди фото
str_link_photo1[message.chat.id]=[]
for n in range(len(lst_link_photo[message.chat.id][s[1]])):
str_link_photo1[message.chat.id].append("photo-{0}_{1}".format(owner_id_str[ow[message.chat.id]],
str(lst_link_photo[message.chat.id][s[1]][n]))) # собираем ссылку из id группы и id фото
string_photo[message.chat.id]={} #создает вложенный словарь
string_photo[message.chat.id][s[1]] = ', '.join(str_link_photo1[message.chat.id]) # преобразуем из списка в строку с разделителем запятая
str_link_photo1[message.chat.id].clear() # чистим список с ссылками
photo[message.chat.id].clear() # чистим список с айдишками
end_dic[message.chat.id]=[] #словарь для ключей для основго словаря
for i in list_url[message.chat.id].keys(): # берем ссылку как id к словарям
end_dic[message.chat.id].append(i) #добавляем в словарь, словарь для хранения ссылок как ключей для отправки поста
if message.chat.id in time_dic: #печатаем дату последней публикации если она была
bot.send_message(message.from_user.id, "date and time of the last post {0}".format(time_dic[message.chat.id][0]))
bot.send_message(message.from_user.id, "Enter the data and time")
time_dic[message.chat.id]=[] #сюда сохраняется дата в виде строки
time_list[message.chat.id]=[] #тут храним время в юникс кодировке
check_data[message.chat.id] = 1 # флаг для работы c сохранением времени и даты
print("1== ",check_data[message.chat.id])
@bot.message_handler(func=lambda message: message.chat.id in id_pass and check_data[message.chat.id]==1
and message.text != "1" or message.text != "0") # поcтинг в группу
def check_data_add(message):
#формируем дату для отложенного постинга
print("2== ", check_data[message.chat.id])
if len(message.text) == 12 and int(message.text) and check_data[message.chat.id] == 1:
time_dic[message.chat.id].append(message.text)
#берем срез и ставим в нужной последовательности
time_list[message.chat.id].append(int(time.mktime(time.strptime('{0}-{1}-{2} {3}:{4}:00'.format(time_dic[message.chat.id][0][4:8],
time_dic[message.chat.id][0][2:4],
time_dic[message.chat.id][0][0:2],
time_dic[message.chat.id][0][8:10],
time_dic[message.chat.id][0][10:12]),'%Y-%m-%d %H:%M:%S'))))
check_data[message.chat.id]=1
vkapi.wall.post(owner_id=id_group, from_group=1, message=list_url[message.chat.id][end_dic[message.chat.id][0]],
attachments=string_photo[message.chat.id][end_dic[message.chat.id][0]],publish_date=time_list[message.chat.id][0])
dbase = db.Basesql('userlink.db', 'links')
idmax=dbase.select_maxid()
link_to_post= link_post_my_group(message, time_list)
dbase.update_maxid(time_list[message.chat.id][0], link_to_post, idmax)
end_dic[message.chat.id].clear() # чистим
list_url[message.chat.id].clear() # чистим
time_list[message.chat.id].clear()
bot.send_message(message.from_user.id, "Saved")
offset_start[message.chat.id].pop(0)
offset_end[message.chat.id].pop(0)
random_id_group(message) # выбираем новую группу для взятия постов
s = api_vk_func(message)
bot.send_message(message.from_user.id, s[1])
offset_start[message.chat.id].append(max(offset_start[message.chat.id]) + 1) # вычисляем следующий элемент в списке и добавляе
offset_end[message.chat.id].append(max(offset_end[message.chat.id]) + 1)
check_data[message.chat.id] = 0
print("3== ", check_data[message.chat.id])
#if len(message.text) != 12 or len(message.text) == 2: # модифицировать под дату если вводишь два числа например
#30 - минут и постит через 30 минут с актуального времени
if len(message.text) != 12 and check_data[message.chat.id] == 1:
bot.send_message(message.from_user.id, "Not the right format of date and time!")
if check_data[message.chat.id] == 0:
bot.send_message(message.from_user.id, "Enter the number 1 or 0")
def link_post_my_group(message,time_list): # ссылки на посты из моей группы для записи в базу
a = vkapi.wall.get(owner_id= id_group, filter = 'postponed',offset=0, count=100) # берем все возможные отложенне записи со стены
for i in range(len(a['items'])): #берем колличествов записей
if a['items'][i]['date'] == time_list[message.chat.id][0]: #берем дату и сравнивмаем с последней введенной записью
id_group_str=str(id_group) #ссылка на группу без минуса перед номером
s = 'https://vk.com/public{0}?w=wall-{0}_{1}'.format(id_group_str[1:], a['items'][i]['id']) # формируем ссылку на пост
return s
bot.polling(none_stop=True)
|
import socket
from thread import *
HOST = '0.0.0.0'
PORT = 2222
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(10)
def clientthread(conct):
while True:
data = conct.recv(1024)
if data == 'close':
break
conct.send(data)
conct.close()
while 1:
conn, addr = s.accept()
start_new_thread(clientthread, (conn,))
s.close() |
from django.db import models
from django.contrib.auth.models import User
class post(models.Model):
imagen = models.ImageField(upload_to='fotos')
miniatura = models.ImageField(upload_to='fotos')
titulo = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
cuerpo = models.TextField()
fecha = models.DateField()
autor = models.ForeignKey(User)
def __unicode__(self):
return self.titulo
|
__author__ = 'ferdous'
import random
def run():
lines = open('/usr/share/dict/words').readlines()
for line in range(1,301):
ln = ''.join(['<li><a href="#">', random.choice(lines).rstrip(), '</a></li>'])
print ln
if __name__ == "__main__":
run()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-19 08:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nova', '0019_task_svn_url'),
]
operations = [
migrations.RemoveField(
model_name='choice',
name='question',
),
migrations.AddField(
model_name='app',
name='env',
field=models.CharField(default='test', max_length=25, verbose_name='\u90e8\u7f72\u73af\u5883'),
preserve_default=False,
),
migrations.AddField(
model_name='apphost',
name='env',
field=models.CharField(default='test', max_length=25, verbose_name='\u90e8\u7f72\u73af\u5883'),
preserve_default=False,
),
migrations.AlterField(
model_name='asset',
name='env',
field=models.IntegerField(blank=True, choices=[(1, '\u751f\u4ea7\u73af\u5883'), (2, '\u51c6\u751f\u4ea7\u73af\u5883'), (3, '\u6d4b\u8bd5\u73af\u5883')], null=True, verbose_name='\u8fd0\u884c\u73af\u5883'),
),
migrations.DeleteModel(
name='Choice',
),
migrations.DeleteModel(
name='Question',
),
]
|
import urllib
import urllib.request
import re
import random
import time
import os
import pandas as pd
import json
import guesssp
import dataarrange
#import numpy
#抓取所需内容
user_agent = ["Mozilla/5.0 (Windows NT 10.0; WOW64)", 'Mozilla/5.0 (Windows NT 6.3; WOW64)',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',
'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',
'Opera/9.27 (Windows NT 5.2; U; zh-cn)',
'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',
'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000 Chrome/26.0.1410.43 Safari/537.1 ',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.92 Safari/537.1 LBBROWSER',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11']
def spider_score(gameid):
stock_total = []
for page in range(1, 2):
url = 'http://vip.win007.com/AsianOdds_n.aspx?id=' + str(gameid) + '&l=0' # gameid
request = urllib.request.Request(url,
headers={"User-Agent": random.choice(user_agent)}) # 随机从user_agent列表中抽取一个元素
try:
response = urllib.request.urlopen(request)
except urllib.error.HTTPError as e: # 异常检测
print('page=', page, '', e.code)
except urllib.error.URLError as e:
print('page=', page, '', e.reason)
#print(response)
content = response.read().decode('utf-8') # 读取网页内容
#print(str(content))
pattern = re.compile('<div class="vs"[\s\S]*')
body = re.findall(pattern, str(content))
#print(body[0])
pattern = re.compile('(?<=<div class="score">).*(?=<)')
data_page = re.findall(pattern, body[0]) # 正则匹配
pattern = re.compile('\d{4}-\d{2}-\d{2} \d{2}:\d{2}')
date = re.findall(pattern, body[0]) # 正则匹配
#print(date[0])
list_data = [date[0]]
#print(list_data)
stock_total.extend(data_page)
stock_total.extend(list_data)
time.sleep(random.randrange(1, 2)) # 每抓一页随机休眠几秒,数值可根据实际情况改动
stock_last = stock_total[:]
#print(stock_last)
return stock_last
def gamespiderAsia( gameid , betid):
root = os.getcwd() # 获取当前路径
stock_total=[] #stock_total:所有页面的数据
#gameid = 1910700
for page in range(1,2):
url = ['http://vip.win007.com/changeDetail/handicap.aspx?id=' + str(gameid) + '&companyid=8&l=0',
'http://vip.win007.com/changeDetail/handicap.aspx?id=' + str(gameid) + '&companyid=22&l=0',
'http://vip.win007.com/changeDetail/handicap.aspx?id=' + str(gameid) + '&companyid=1&l=0']
request=urllib.request.Request(url=url[betid],headers={"User-Agent":random.choice(user_agent)})#随机从user_agent列表中抽取一个元素
try:
response=urllib.request.urlopen(request)
except urllib.error.HTTPError as e: #异常检测
print('page=',page,'',e.code)
except urllib.error.URLError as e:
print('page=',page,'',e.reason)
content=response.read().decode('GBK') #读取网页内容
#print(content)
#print('get page',page) #打印成功获取的页码
pattern = re.compile('<span id="odds2">[\s\S]*')
body = re.findall(pattern, str(content))
#print(body)
pattern = re.compile('(?<=>)([\u4E00-\u9FA5A-Za-z+\/+\:+\ (\-|\+)?\d+(\.\d+)?]+)(?=<)')
data_page = re.findall(pattern, body[0]) # 正则匹配
#print(data_page)
stock_total.extend(data_page)
time.sleep(random.randrange(1,2)) #每抓一页随机休眠几秒,数值可根据实际情况改动
#删除空白字符
stock_last=stock_total[:] #stock_last为最终所要得到的数据
num = stock_last.count("封")
for i in range((int)(len(stock_last)+num*2)):
if stock_last[i] == "封":
stock_last.insert(i + 1,'-')
stock_last.insert(i + 2,'-')
num = stock_last.count("即")
for i in range((int)(len(stock_last)+num*2)):
if stock_last[i] == "即":
stock_last.insert(i + 1,'-')
stock_last.insert(i + 2,'-')
i +=2
i +=2
num = stock_last.count("早")
for i in range((int)(len(stock_last) + num * 2)):
if stock_last[i] == "早":
stock_last.insert(i + 1, '-')
stock_last.insert(i + 2, '-')
result = [] #result:最终数据
# 转换成多维列表
Hang = (int)(len(stock_last)/7)
for y in range(0, Hang):
for x in range(0, 7):
if x == 0:
result.append([])
result[y].append(stock_last[x + y * 7])
# 变化时间超过10分钟&&时间大于90,抓取完成。id放入donelist
time_now = time.strftime("%Y%m%d%H%M", time.localtime())
#print(gameid)
#print(result)
betstr = ['365bet', '10bet', 'aobet']
df = pd.DataFrame(result)
path = root + '\\' + 'data\\' + str(gameid) + 'Asia' + betstr[betid] + '.xlsx'
df.to_excel(path, header=None)
if len(result):
# deadtime = result[1][5].replace("-","")
# print(deadtime)
# deadtime = deadtime.replace(" ", "")
# print(deadtime)
# deadtime = deadtime.replace(":", "")
# print(deadtime)
deadtime = result[1][5]
#print(deadtime)
if (deadtime != '-'):
# print(deadtime)
# if ((int)(deadtime) <= 10000000):
# deadtime = '20210' + deadtime
# else:
# deadtime = '2021' + deadtime
deadtime = '2021-' + deadtime
# print(deadtime)
deadtime_stamp = time.mktime(time.strptime(deadtime, "%Y-%m-%d %H:%M"))
time_now_stamp = time.mktime(time.strptime(time_now, "%Y%m%d%H%M"))
if (result[1][0] == '中场'):
gametime = 45
else:
gametime = (int)(result[1][0])
time_diff = (int)(time_now_stamp - deadtime_stamp)
# print(gameid)
# print(deadtime)
# print(time_diff)
if (((gametime >= 60) & (time_diff >= 2400))|((gametime >= 85) & (time_diff >= 600))):
score = spider_score(gameid)
guesssp.spider_done(gameid)
# abc = (str)(result[1][1])
Homescore = (int)(score[0])
Guestscore = (int)(score[1])
Date = score[2]
# print('test')
dataarrange.Singleupdata(Homescore, Guestscore, gameid, Date)
return
def gamespiderEuro( gameid , betid):
stock_total=[] #stock_total:所有页面的数据
#gameid = 1910700
for page in range(1,2):
url = ['http://vip.win007.com/changeDetail/1x2.aspx?id=' + str(gameid) + '&companyid=8&l=0',
'http://vip.win007.com/changeDetail/1x2.aspx?id=' + str(gameid) + '&companyid=22&l=0',
'http://vip.win007.com/changeDetail/1x2.aspx?id=' + str(gameid) + '&companyid=1&l=0']
request=urllib.request.Request(url=url[betid],headers={"User-Agent":random.choice(user_agent)})#随机从user_agent列表中抽取一个元素
try:
response=urllib.request.urlopen(request)
except urllib.error.HTTPError as e: #异常检测
print('page=',page,'',e.code)
except urllib.error.URLError as e:
print('page=',page,'',e.reason)
content=response.read().decode('GBK') #读取网页内容
#print(content)
#print('get page',page) #打印成功获取的页码
pattern = re.compile('<div id="out">[\s\S]*')
body = re.findall(pattern, str(content))
#print(body)
pattern = re.compile('(?<=>)([\u4E00-\u9FA5A-Za-z+\/+\:+\ (\-|\+)?\d+(\.\d+)?]+)(?=<)')
data_page = re.findall(pattern, body[0]) # 正则匹配
#print(data_page)
stock_total.extend(data_page)
time.sleep(random.randrange(1,2)) #每抓一页随机休眠几秒,数值可根据实际情况改动
#删除空白字符
stock_last=stock_total[:] #stock_last为最终所要得到的数据
result = [] #result:最终数据
# 转换成多维列表
Hang = (int)(len(stock_last)/5)
for y in range(0, Hang):
for x in range(0, 5):
if x == 0:
result.append([])
result[y].append(stock_last[x + y * 5])
# 变化时间超过10分钟&&时间大于90,抓取完成。id放入donelist
# time_now = time.strftime("%Y%m%d%H%M", time.localtime())
# deadtime = result[1][3].replace("-", "")
# deadtime = deadtime.replace(" ", "")
# deadtime = deadtime.replace(":", "")
# if (deadtime != ''):
# deadtime = '2020' + deadtime
# deadtime_stamp = time.mktime(time.strptime(deadtime, "%Y%m%d%H%M"))
# time_now_stamp = time.mktime(time.strptime(time_now, "%Y%m%d%H%M"))
# time_diff = (int)(time_now_stamp - deadtime_stamp)
# if ((time_diff >= 600)):
# guesssp.spider_done(gameid)
# 获取当前路径
root = os.getcwd()
# print(time_now)
df = pd.DataFrame(result)
betstr = ['365bet', '10bet', 'aobet']
path = root + '\\' + 'data\\' + str(gameid) + 'Euro' + betstr[betid] + '.xlsx'
#print(df)
df.to_excel(path, header=None)
return
def gamespiderBS( gameid , betid):
root = os.getcwd()
stock_total=[] #stock_total:所有页面的数据
#gameid = 1910700
for page in range(1,2):
url = ['http://vip.win007.com/changeDetail/overunder.aspx?id=' + str(gameid) + '&companyid=8&l=0',
'http://vip.win007.com/changeDetail/overunder.aspx?id=' + str(gameid) + '&companyid=22&l=0',
'http://vip.win007.com/changeDetail/overunder.aspx?id=' + str(gameid) + '&companyid=1&l=0']
request=urllib.request.Request(url=url[betid],headers={"User-Agent":random.choice(user_agent)})#随机从user_agent列表中抽取一个元素
try:
response = urllib.request.urlopen(request)
except urllib.error.HTTPError as e: # 异常检测
print('page=', page, '', e.code)
except urllib.error.URLError as e:
print('page=', page, '', e.reason)
content = response.read().decode('GBK') # 读取网页内容 utf-8 GBK
# print(content)
# print('get page',page) #打印成功获取的页码
pattern = re.compile('<span id="odds2">[\s\S]*')
body = re.findall(pattern, str(content))
# print(body)
pattern = re.compile('(?<=>)([\u4E00-\u9FA5A-Za-z+\/+\:+\ (\-|\+)?\d+(\.\d+)?]+)(?=<)')
data_page = re.findall(pattern, body[0]) # 正则匹配
# print(data_page)
stock_total.extend(data_page)
time.sleep(random.randrange(1, 2)) # 每抓一页随机休眠几秒,数值可根据实际情况改动
# 删除空白字符
stock_last = stock_total[:] # stock_last为最终所要得到的数据
num = stock_last.count("封")
for i in range((int)(len(stock_last) + num * 2)):
if stock_last[i] == "封":
stock_last.insert(i + 1, '-')
stock_last.insert(i + 2, '-')
num = stock_last.count("即")
for i in range((int)(len(stock_last) + num * 2)):
if stock_last[i] == "即":
stock_last.insert(i + 1, '-')
stock_last.insert(i + 2, '-')
i += 2
i += 2
num = stock_last.count("早")
for i in range((int)(len(stock_last) + num * 2)):
if stock_last[i] == "早":
stock_last.insert(i + 1, '-')
stock_last.insert(i + 2, '-')
result = [] # result:最终数据
# 转换成多维列表
Hang = (int)(len(stock_last) / 7)
for y in range(0, Hang):
for x in range(0, 7):
if x == 0:
result.append([])
result[y].append(stock_last[x + y * 7])
# time_now = time.strftime("%Y%m%d%H%M", time.localtime())
# print(result[0][5])
# print(result[1][5])
#
# deadtime = result[1][5].replace("-", "")
# deadtime = deadtime.replace(" ", "")
# deadtime = deadtime.replace(":", "")
betstr = ['365bet', '10bet', 'aobet']
df = pd.DataFrame(result)
path = root + '\\' + 'data\\' + str(gameid) + 'BS' + betstr[betid] + '.xlsx'
df.to_excel(path, header=None)
return
def spider_determine(gameid):
date = spider_score(gameid)
if(len(date)==3):
Homescore = date[0]
Guestscore = date[1]
Date = date[2]
elif(len(date)==1):
Date = date[0]
#print(Date)
time_now = time.strftime("%Y%m%d%H%M", time.localtime())
time_now_stamp = time.mktime(time.strptime(time_now, "%Y%m%d%H%M"))
starttime_stamp = time.mktime(time.strptime(Date, "%Y-%m-%d %H:%M"))
time_diff = (int)(time_now_stamp - starttime_stamp)
#print(time_diff)
if(time_diff>=518400):
guesssp.spider_done(gameid)
if (time_diff > 7200):
if (len(date) == 3):
guesssp.spider_done(gameid)
dataarrange.Singleupdata(Homescore, Guestscore, gameid, Date)
if((time_diff>=-14400) & (time_diff<=2400)):
gamespiderAsia(gameid, 0)
gamespiderEuro(gameid, 0)
gamespiderBS(gameid, 0)
dataarrange.EuroandAsiaUpdata(gameid)
#spider_determine(1998672)
|
cont1 = 1
while cont1 <= 6:
cont2 = 1
#inicio do cont 1
while cont2 <= 6:
#inicio do cont 2
if (cont1+cont2) == 7:
print(cont1, cont2)
cont2 = cont2 + 1
#fim do cont 2
cont1 = cont1 + 1
#fim do cont 1 |
version_info = (2, 4, 3, 'dev')
_specifier_ = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc', 'final': '', 'dev': 'dev'}
postfix = ''
if version_info[3] != 'final':
if version_info[3] == 'dev' and len(version_info) < 5:
postfix = 'dev0'
else:
postfix = _specifier_[version_info[3]] + str(version_info[4])
__version__ = '%s.%s.%s%s' % (version_info[0], version_info[1], version_info[2], postfix)
# The version of the attribute spec that this package
# implements. This is the value used in
# _model_module_version/_view_module_version.
#
# Update this value when attributes are added/removed from
# the widget models, or if the serialized format changes.
#
# The major version needs to match that of the JS package.
EXTENSION_SPEC_VERSION = '^2.4.1'
|
from django.conf.urls import url,include
from . import views
urlpatterns = [
url(r'^billgenrate/$',views.billgenerate.as_view(), name="bill"),
url(r'^billgroup/',views.billgroup.as_view(),name='billgroup'),
url(r'^bills/(?P<appointment_id>[0-9]+)/$', views.billsview, name='billstemplate'),
url(r'^billing/(?P<id>[0-9]+)/(?P<group>[a-zA-Z0-9]+)/(?P<billid>[a-zA-Z]+-[a-zA-Z0-9]+-[0-9]+)/$', views.billingview, name='billinvoice'),
]
|
#!/usr/bin/python3
"""defines class Base"""
class Base:
"""Class Base"""
__nb_objects = 0
def __init__(self, id=None):
"""constructor"""
if id is not None:
self.id = id
else:
self.__nb_objects += 1
self.id = self.__nb_objects
|
# Canny Edge and Hough Transform Example:
#
# This example demonstrates using the Canny edge detector
# And the Hough transform to find straight lines in an image.
import sensor, image, time
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
clock = time.clock() # Tracks FPS.
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) # Find edges
#利用canny算子进行快速边缘检测
image.binary()
#lines = img.find_lines(threshold=50) # Find lines.
#直线识别
#for l in lines:
#img.draw_line(l, color=(127)) # Draw lines
#标记出这条直线。
|
#!/usr/bin/env python3
#
# Development Order #4:
#
# Determine the duration of a specified test.
#
import datetime
import sys
import pscheduler
logger = pscheduler.Log(prefix='tool-ethr', quiet=True)
json = pscheduler.json_load(exit_on_error=True)
# Duration: How long the test should run
# TODO: Need to make a better study of this.
duration = json.get('duration', None)
if duration:
delta = pscheduler.iso8601_as_timedelta(duration)
duration = int(pscheduler.timedelta_as_seconds(delta))
else:
# TODO: This should be a default
duration = 20
omit_delta = pscheduler.iso8601_as_timedelta(json.get("omit", "P0D"))
omit = int(pscheduler.timedelta_as_seconds(omit_delta))
# TODO: The 3 should be a default like it is for iperf*
full_duration = duration + omit + 3 + 2
logger.debug("final duration = %ss" % (full_duration))
pscheduler.succeed_json({
"duration": 'PT%dS' % (full_duration)
})
|
a=int(input())
d=0
if(a>1):
for i in range(1,a+1):
c=a%i
if(c==0):
d+=1
if(d>2):
print("yes")
else:
print("no")
|
"""
* You should write your code inside a function
* Your function should take the input(s) as argument(s)
* Your function should return the answer as a data-structure
* You can validate/test your code by calling your function and printing the data-structure it returns
* Your function should return the same output if it is called multiple times in a row
https://public.karat.io/content/q093/wordlist.txt
The text file at the URL above lists common words in English, together with a count of their occurrence in a certain text. It is tab-delimited and newline-separated. The file is sorted with most common words at the top.
\r\n
Given two strings s1 and s2, we will call (s1, s2) a "step" if you can form s2 by adding exactly one letter to s1 and possibly rearranging the letters of s1.
For example:
(OF, FOR) is a step
(OF, IF) is not a step
(OF, OCT) is not a step
(ERA, EAR) is not a step
(SHE, SHEEP) is not a step
(TEE, TEST) is not a step
Given the 1000-word wordlist we just generated, produce an index
w -> { w1 | (w, w1) is a step }
that associates to each word all the words in the wordlist that are a step
away from it.
index = step_index(wordlist)
# Expected output (pseudocode):
index['ERA'] = ['REAL', 'RARE', 'AREA', 'READ', 'RATE',
'BEAR', 'NEAR', 'RACE',
'HEAR', 'YEAR', 'DEAR', 'FEAR', 'CARE']
index['JOY'] = []
"""
# all_words = []
# read file
# while i read it
# if the word has length 2 <= word <= ab
# all words append this current word [word, occur]
# def step_index(wordlist):
# for word in wordlist:
# pass
import urllib.request
def one_step(s1, wordlist):
s1_sorted = sorted(s1)
index_dict = []
for word in wordlist:
if len(word[0]) == len(s1) + 1:
print("words: ", word[0])
s2_sorted = sorted(word[0])
# step away
if s1_sorted == s2_sorted[:len(s1_sorted)]:
print("word", word)
index_dict.append(word[0])
return index_dict
def get_list(n, k):
"""
Write a function that, given parameters N and K, downloads the file and returns
the N most common words of length [2..K] together with their occurrence counts.
len(file) = m
worst = O(m)
best = o(n)
"""
all_words = []
url = "https://public.karat.io/content/q093/wordlist.txt"
raw_text = urllib.request.urlopen(url).read().decode('utf8')
count = 0
for line in raw_text.split("\r\n"):
word = line.split("\t")
if count == n:
break
if 2 <= len(word[0]) <= k:
all_words.append(word)
count += 1
return all_words
result = (get_list(1000, 5))
# print(result)
index = one_step('ERA', result)
print(index)
|
# Calculate Profit
# ---------------------------------------
profite = {
"costPrice": 55,
"sellPrice": 65,
"invontory": 1500
}
def totalProfite(profite):
return (profite["sellPrice"] * profite["invontory"]) - (profite["costPrice"] * profite["invontory"])
print(totalProfite(profite))
# 150000 |
from discord.ext import commands
import commons.errors as errors
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
if message.author.bot:
return
if self.bot.user in message.mentions:
self.bot.dispatch("help", "help", message.guild.id, message.channel)
@commands.command()
@commands.is_owner()
async def reload(self, ctx):
self.bot.dispatch("reload")
await ctx.send("Reload completed!")
@reload.error
async def reload_error(self, ctx, error):
if isinstance(error, commands.NotOwner):
return
errors.error_print(error)
@commands.command()
async def ping(self, ctx):
await ctx.send("pong!")
@ping.error
async def ping_error(self, ctx, error):
errors.error_print(error)
def setup(bot):
bot.add_cog(Events(bot)) |
from __future__ import print_function,division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn.init as init
from utils.utils_trans import *
#from voxel_net2 import add_conv_stage
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
#print(classname)
init.xavier_uniform(m.weight.data)
#init.xavier_uniform(m.bias.data)
def add_double_conv_stage(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=True, useBN=False):
if useBN:
return nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm2d(dim_out),
nn.LeakyReLU(0.1),
nn.Conv2d(dim_out, dim_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm2d(dim_out),
nn.LeakyReLU(0.1)
)
else:
return nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
nn.ReLU(),
nn.Conv2d(dim_out, dim_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
nn.ReLU()
)
def add_conv_stage(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=True, useBN=False):
if useBN:
return nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm2d(dim_out),
nn.LeakyReLU(0.1)
)
else:
return nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
nn.ReLU()
)
def upsample(input_fm, output_fm):
return nn.Sequential(
nn.ConvTranspose2d(input_fm, output_fm, 2, 2, 0, bias=False),
nn.ReLU()
)
###########################################################################################
## single-view predict network
class singleNet_verydeep(nn.Module):
def __init__(self, useBN=True):
super(singleNet_verydeep, self).__init__()
self.conv1 = add_conv_stage(3, 32, useBN=useBN)
self.conv2 = add_conv_stage(32, 64, useBN=useBN)
self.conv3 = add_conv_stage(64, 128, useBN=useBN)
self.conv4 = add_conv_stage(128, 256, useBN=useBN)
self.conv5 = add_conv_stage(256, 512, useBN=useBN)
self.conv6 = add_conv_stage(512, 512, useBN=useBN)
self.conv7 = add_conv_stage(512, 512, useBN=useBN)
self.conv8 = add_conv_stage(512, 512, useBN=useBN)
self.conv7m = add_conv_stage(1024, 512, useBN=useBN)
self.conv6m = add_conv_stage(1024, 512, useBN=useBN)
self.conv5m = add_conv_stage(1024, 512, useBN=useBN)
self.conv4m = add_conv_stage(512, 256, useBN=useBN)
self.conv3m = add_conv_stage(256, 128, useBN=useBN)
self.conv2m = add_conv_stage(128, 64, useBN=useBN)
self.max_pool = nn.MaxPool2d(2)
self.upsample87 = upsample(512, 512)
self.upsample76 = upsample(512, 512)
self.upsample65 = upsample(512, 512)
self.upsample54 = upsample(512, 256)
self.upsample43 = upsample(256, 128)
self.upsample32 = upsample(128, 64)
## weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
conv1_out = self.conv1(x)
conv2_out = self.conv2(self.max_pool(conv1_out))
conv3_out = self.conv3(self.max_pool(conv2_out))
conv4_out = self.conv4(self.max_pool(conv3_out))
conv5_out = self.conv5(self.max_pool(conv4_out))
#####
conv6_out = self.conv6(self.max_pool(conv5_out))
conv7_out = self.conv7(self.max_pool(conv6_out))
conv8_out = self.conv8(self.max_pool(conv7_out)) ##4096 2 2
conv8m_out = torch.cat((self.upsample87(conv8_out), conv7_out), 1) ## 4096 4 4
conv7m_out = self.conv7m(conv8m_out) ## 2048 4 4
conv7m_out_ = torch.cat((self.upsample76(conv7m_out), conv6_out), 1) ## 2048 8 8
conv6m_out = self.conv6m(conv7m_out_)
conv6m_out_ = torch.cat((self.upsample65(conv6m_out), conv5_out), 1)
conv5m_out = self.conv5m(conv6m_out_)
#####
conv5m_out = torch.cat((self.upsample54(conv5m_out), conv4_out), 1)
conv4m_out = self.conv4m(conv5m_out)
conv4m_out_ = torch.cat((self.upsample43(conv4m_out), conv3_out), 1)
conv3m_out = self.conv3m(conv4m_out_)
conv3m_out_ = torch.cat((self.upsample32(conv3m_out), conv2_out), 1)
conv2m_out = self.conv2m(conv3m_out_)
outputs = F.sigmoid(self.max_pool(conv2m_out))
return outputs
####################################################################################
## mulit-view update network
####################################################################################
class MulitUpdateNet_verydeep(nn.Module):
def __init__(self, useBN=True,use_GPU=True):
super(MulitUpdateNet_verydeep, self).__init__()
self.GPU = use_GPU and torch.cuda.is_available()
print ('use GPU or not:{}'.format(self.GPU))
self.SingleNet=singleNet_verydeep()
if self.GPU:
self.SingleNet=self.SingleNet.cuda()
for param in self.SingleNet.parameters():
param.requires_grad = False
self.inv_trans=list()
self.trans=list()
if self.GPU:
for trans in inv_transform_list:
self.inv_trans.append(torch.from_numpy(trans).type(torch.FloatTensor).cuda())
for trans in transform_list:
self.trans.append(torch.from_numpy(trans).type(torch.FloatTensor).cuda())
else:
for trans in inv_transform_list:
self.inv_trans.append(torch.from_numpy(trans).type(torch.FloatTensor))
for trans in transform_list:
self.trans.append(torch.from_numpy(trans).type(torch.FloatTensor))
#print len(self.inv_trans),len(self.trans)
self.conv1 = add_conv_stage(3, 32, useBN=useBN)
self.conv2 = add_conv_stage(32, 64, useBN=useBN)
self.conv3 = add_conv_stage(64, 128, useBN=useBN)
self.conv4 = add_conv_stage(128, 256, useBN=useBN)
self.conv5 = add_conv_stage(256, 512, useBN=useBN)
self.conv6 = add_conv_stage(512, 512, useBN=useBN)
self.conv7 = add_conv_stage(512, 512, useBN=useBN)
self.conv8 = add_conv_stage(512, 512, useBN=useBN)
self.conv7m = add_conv_stage(1024, 512, useBN=useBN)
self.conv6m = add_conv_stage(1024, 512, useBN=useBN)
self.conv5m = add_conv_stage(1024, 512, useBN=useBN)
self.conv4m = add_conv_stage(512, 256, useBN=useBN)
self.conv3m = add_conv_stage(256, 128, useBN=useBN)
self.conv2m = add_conv_stage(128, 64, useBN=useBN)
self.max_pool = nn.MaxPool2d(2)
self.upsample87 = upsample(512, 512)
self.upsample76 = upsample(512, 512)
self.upsample65 = upsample(512, 512)
self.upsample54 = upsample(512, 256)
self.upsample43 = upsample(256, 128)
self.upsample32 = upsample(128, 64)
self.conv_reduce=add_conv_stage(128,64,useBN=useBN)
## weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
## define the trans fn in torch format
def dense2sparse(self,dense_data):
"""
dense format(N,64,64,64) -> sparse(coord) format list of (num_nozeros,3) center_orginal
dense_data: the pred Variable [64,64,64]
return: Variable of sparse_data
"""
if dense_data.data.ndimension() != 4:
print ('when dense->sparse ,the Dimension number is Wrong!')
print ('current data shape', dense_data.data.size())
num_sample = dense_data.data.size()[0]
# print (dense_data.data.size())
sparse_list = list()
for i in xrange(num_sample):
## the center is orginal,so coord(x,y,z) is float
sparse_data = torch.nonzero(dense_data[i].data).type(torch.FloatTensor) - 32.0 + 0.5
"""
if is_GPU:
sparse_data = sparse_data.cuda()"""
sparse_data = sparse_data.numpy() ## notice here! we use .numpy() to get numpy array(cpu)
sparse_list.append(sparse_data)
return sparse_list
def sparse2dense(self,sparse_list):
"""
sparse(coord) format list of (num_nozeros,3) center_orginal -> dense format(N,64,64,64)
param: sparse_list: the list of sparse(coord) data (3,num_nozero)
return:dense data [N,64,64,64]
"""
if sparse_list[0].ndimension() != 2 or sparse_list[0].size()[1] != 3:
print ('when sparse->dense, the Dimension number is Wrong!')
print ('current data shape:{}'.format(sparse_list[0].size()))
num_sample = len(sparse_list)
for i in xrange(num_sample):
## the center is center-of-VoxelGrid ,so the coord(x,y,z) is int
sparse_list[i] = (sparse_list[i] + 32.0 - 0.5).type(torch.ByteTensor)
# _,mask1=torch.max(sparse_data.data,0)
# _,mask2=torch.min(sparse_data.data,64)
"""
dense_data = torch.zeros(num_sample, 64, 64, 64)
if is_GPU:
dense_data = dense_data.cuda()
"""
dense_list = list()
for idx, sparse_data in enumerate(sparse_list):
# print ('sparse_data:{}'.format(sparse_data.data.size())) ##torch.Size([6496, 3])
dense_data = np.zeros((64, 64, 64), dtype=np.float32)
xyz = sparse_data.numpy().T.astype(np.int)
valid_ix = ~np.any((xyz < 0) | (xyz >= 64), 0)
xyz = xyz[:, valid_ix]
#print "xyz", xyz.shape
# dense_data = np.zeros(dims.flatten(), dtype=np.float32)
dense_data[tuple(xyz)] = 1.0
dense_list.append(dense_data[np.newaxis, :])
dense_data = np.concatenate(dense_list, axis=0)
#print "dense_data", dense_data.shape
dense_data = torch.from_numpy(dense_data)
if self.GPU:
dense_data = dense_data.cuda()
return Variable(dense_data) ##[N,64,64,64]
## when training, just iterate once
## after training, we can iterate 5 times to refine the output
def forward(self,x1,x2,v12s,target1s=None):
"""
:param x1: img1 (N,1,256,256)
:param x2: img2 (N,1,256,256)
:param v12s: list of v12 e.g. [(1,2),(3,4)......] len(v12s)=N
:param target1s: just for evaluate the self.SingleNet ,[N,64,64,64]
"""
self.SingleNet.eval()
preds = self.SingleNet(x1) ##[N,64,64,64]
preds = preds > 0.5
#print preds.data.sum()
#print preds ##[torch.FloatTensor of size Nx64x64x64]
#singlenet_eval(preds.data,target1s)
preds = self.dense2sparse(preds) ## list of (num_nonzero,3)
preds_trans = list()
#for pred in preds:
for i in xrange(len(preds)):
#print ('pred data shape')
#print(pred.shape) ##(177609L, 3L) [n,zyx]
#print 'before transform in torch', pred.T ##[zyx,n]
# pred = trans12.mm(pred.view(3,-1))
trans12=transform_list[v12s[i][1]].dot(inv_transform_list[v12s[i][0]])
pred = trans12.dot(preds[i].T[::-1, :]) ##[xyz,n]
pred = pred[::-1, :].T ## [n,zyx]
pred = torch.from_numpy(pred.astype(np.float32)).type(torch.FloatTensor)
preds_trans.append(pred)
preds = self.sparse2dense(preds_trans) ## [N,64,64,64]
conv1_out = self.conv1(x2)
conv2_out = self.conv2(self.max_pool(conv1_out))
conv2_out_cat = torch.cat((self.max_pool(conv2_out), preds), 1)
conv2_reduce=self.conv_reduce(conv2_out_cat)
conv3_out = self.conv3(conv2_reduce)
conv4_out = self.conv4(self.max_pool(conv3_out))
conv5_out = self.conv5(self.max_pool(conv4_out))
#####
conv6_out = self.conv6(self.max_pool(conv5_out))
conv7_out = self.conv7(self.max_pool(conv6_out))
conv8_out = self.conv8(self.max_pool(conv7_out)) ##512 2 2
conv8m_out = torch.cat((self.upsample87(conv8_out), conv7_out), 1) ## 512 4 4
conv7m_out = self.conv7m(conv8m_out) ## 512 4 4
conv7m_out_ = torch.cat((self.upsample76(conv7m_out), conv6_out), 1) ## 512 8 8
conv6m_out = self.conv6m(conv7m_out_)
conv6m_out_ = torch.cat((self.upsample65(conv6m_out), conv5_out), 1)
conv5m_out = self.conv5m(conv6m_out_)
#####
conv5m_out = torch.cat((self.upsample54(conv5m_out), conv4_out), 1)
conv4m_out = self.conv4m(conv5m_out)
conv4m_out_ = torch.cat((self.upsample43(conv4m_out), conv3_out), 1)
conv3m_out = self.conv3m(conv4m_out_) ##(128,64,64)
conv3m_out_ = torch.cat((self.upsample32(conv3m_out), conv2_out), 1)
conv2m_out = self.conv2m(conv3m_out_)
preds = F.sigmoid(self.max_pool(conv2m_out))
return preds
def predict(self,x1,x2,v12s,iter=5):
## x1: view1 img for initial prediction
## x2: view2 img for multi-view update
self.SingleNet.eval()
preds=self.SingleNet(x1) ##[N,64,64,64]
#preds=preds.transpose(3,1)
#print 'singlenet output',preds.data.is_cuda
for i in xrange(iter):
## img2 + pred1 ->CNN =pred2
preds=self.dense2sparse(preds>0.5) ## list of (num_nonzero,3)
preds_trans = list()
for i in xrange(len(preds)):
trans12 = transform_list[v12s[i][1]].dot(inv_transform_list[v12s[i][0]])
pred = trans12.dot(preds[i].T[::-1, :]) ##[xyz,n]
pred = pred[::-1, :].T ## [n,zyx]
pred = torch.from_numpy(pred.astype(np.float32)).type(torch.FloatTensor)
preds_trans.append(pred)
preds=self.sparse2dense(preds_trans) ## [N,64,64,64] ->[N,C,H,W]
#################################################################################
conv1_out = self.conv1(x2)
conv2_out = self.conv2(self.max_pool(conv1_out))
conv2_out_cat = torch.cat((self.max_pool(conv2_out), preds), 1)
conv3_out = self.conv3(conv2_out_cat)
conv4_out = self.conv4(self.max_pool(conv3_out))
conv5_out = self.conv5(self.max_pool(conv4_out))
#####
conv6_out = self.conv6(self.max_pool(conv5_out))
conv7_out = self.conv7(self.max_pool(conv6_out))
conv8_out = self.conv8(self.max_pool(conv7_out)) ##512 2 2
conv8m_out = torch.cat((self.upsample87(conv8_out), conv7_out), 1) ## 512 4 4
conv7m_out = self.conv7m(conv8m_out) ## 512 4 4
conv7m_out_ = torch.cat((self.upsample76(conv7m_out), conv6_out), 1) ## 512 8 8
conv6m_out = self.conv6m(conv7m_out_)
conv6m_out_ = torch.cat((self.upsample65(conv6m_out), conv5_out), 1)
conv5m_out = self.conv5m(conv6m_out_)
#####
conv5m_out = torch.cat((self.upsample54(conv5m_out), conv4_out), 1)
conv4m_out = self.conv4m(conv5m_out)
conv4m_out_ = torch.cat((self.upsample43(conv4m_out), conv3_out), 1)
conv3m_out = self.conv3m(conv4m_out_) ##(128,64,64)
conv3m_out_ = torch.cat((self.upsample32(conv3m_out), conv2_out), 1)
conv2m_out = self.conv2m(conv3m_out_)
preds = F.sigmoid(self.max_pool(conv2m_out))
###############################################################################
## img1 + pred2 ->CNN =pred3
preds = self.dense2sparse(preds > 0.5) ## list of (1,num_nonzero,3)
preds_trans = list()
for i in xrange(len(preds)):
trans21 = transform_list[v12s[i][0]].dot(inv_transform_list[v12s[i][1]])
pred = trans21.dot(preds[i].T[::-1, :]) ##[xyz,n]
pred = pred[::-1, :].T ## [n,zyx]
pred = torch.from_numpy(pred.astype(np.float32)).type(torch.FloatTensor)
preds_trans.append(pred)
preds = self.sparse2dense(preds_trans) ## [N,64,64,64]
####################################################################################
conv1_out = self.conv1(x2)
conv2_out = self.conv2(self.max_pool(conv1_out))
conv2_out_cat = torch.cat((self.max_pool(conv2_out), preds), 1)
conv3_out = self.conv3(conv2_out_cat)
conv4_out = self.conv4(self.max_pool(conv3_out))
conv5_out = self.conv5(self.max_pool(conv4_out))
#####
conv6_out = self.conv6(self.max_pool(conv5_out))
conv7_out = self.conv7(self.max_pool(conv6_out))
conv8_out = self.conv8(self.max_pool(conv7_out)) ##512 2 2
conv8m_out = torch.cat((self.upsample87(conv8_out), conv7_out), 1) ## 512 4 4
conv7m_out = self.conv7m(conv8m_out) ## 512 4 4
conv7m_out_ = torch.cat((self.upsample76(conv7m_out), conv6_out), 1) ## 512 8 8
conv6m_out = self.conv6m(conv7m_out_)
conv6m_out_ = torch.cat((self.upsample65(conv6m_out), conv5_out), 1)
conv5m_out = self.conv5m(conv6m_out_)
#####
conv5m_out = torch.cat((self.upsample54(conv5m_out), conv4_out), 1)
conv4m_out = self.conv4m(conv5m_out)
conv4m_out_ = torch.cat((self.upsample43(conv4m_out), conv3_out), 1)
conv3m_out = self.conv3m(conv4m_out_) ##(128,64,64)
conv3m_out_ = torch.cat((self.upsample32(conv3m_out), conv2_out), 1)
conv2m_out = self.conv2m(conv3m_out_)
preds = F.sigmoid(self.max_pool(conv2m_out))
return preds
|
numero = int(input('Digite um número inteiro: '))
print("""Escolha uma das bases de conversões
[ 1 ] Converter para BINÁRIO
[ 2 ] Converter para OCTAL
[ 3 ] Converter para HEXADECIMAL""")
opção = int(input('Sua opção: '))
if opção == 1:
print(f'O número {numero} convertido para BINÁRIO é {bin(numero)[2:]}')
elif opção == 2:
print(f'O número {numero} convertido para OCTAL é {oct(numero[2:])}')
elif opção == 3:
print(f'O número {numero} convertido para HEXADECIMAL é {hex(numero)[2:]}')
else:
print('Digite uma opção Válida')
|
from django.conf.urls import url
from .views import *
app_name = "footy"
urlpatterns = [
url(r'^$', ShowMatchesView.as_view(), name="show_matches"),
url(r'^new_match/$', CreateEventView.as_view(), name="new_match"),
url(r'^add_location/$', AddLocationView.as_view(), name="add_location"),
url(r'^joined_match/(?P<pk>[0-9]+)/$', JoinMatchView.as_view(), name="join_match"),
url(r'^leave_match/(?P<pk>[0-9]+)/$', LeaveMatchView.as_view(), name="leave_match"),
url(r'^my_matches/$', UserMatchesView.as_view(), name="my_matches"),
url(r'^nearby_matches/$', NearByMatchesView.as_view(), name="nearby_matches"),
url(r'^profile/(?P<pk>[0-9]+)/$', ProfileView.as_view(), name="profile"),
url(r'^profile/$', MyProfileView.as_view(), name="my_profile"),
url(r'^logout/$', LogoutView.as_view(), name="logout"),
url(r'^login/$', LoginView.as_view(), name="login"),
url(r'^signup/$', CreateUserView.as_view(), name="signup"),
] |
#------------------CONTROL PID--------------------
import math
import time
#controller direction variables
#DIRECT = 0
#REVERSE = 1
class PID(object):
#def __init__(self, c_input, c_output, c_setpoint, kp, ki, kd, controller_direction):
def __init__(self, c_setpoint, kp, ki, kd, controller_direction):
#build the object
#self.control_input = c_input
#self.control_output = c_output
self.control_input = None
self.control_output = None
self.control_setpoint = c_setpoint
self.kp = kp
self.ki = ki
self.kd = kd
self.control_dir = controller_direction
self.ITerm = self.control_output
self.LastError = None
# Possible controller_direction values are: 1(DIRECT) | 0(REVERSE)
self.LastInput = self.control_input
# Object Outputs
self.OutMin = None;
self.OutMax = None;
# Always in AUTO MODE, Never in MANUAL MODE
# self.InAutoMode = false
#Sample Time in Milliseconds, in this case 100 ms = 0.1 seconds
self.SampleTime = 100
# # ******* Set the Output Limits of the PID *******
# # should be called before PIDCompute()
# # Default Values are (0 - 255) (Min, Max)
# # ---- To change OutPutLimits range, call the method again with the new
# # ---- ranges after creating pid_control object
# #SetOutputLimits(0, 255)
self.SetOutputLimits(0, 100)
# # ******* Set the Gain Tunnings for PID *********
# # should be called before PIDCompute()
# #SetTunning(kp, ki, kd)
self.SetTunning(kp, ki, kd)
# # ******* Set Controller Direction *********
# # should be called before PIDCompute()
# # SetControllerDirection(self.control_dir)
# #SetControllerDirection(controller_direction)
self.SetControllerDirection(controller_direction)
# Get current time in milliseconds
millis = int(round(time.time() * 1000))
# Get Last Time
self.LastTime = millis - self.SampleTime
def SetOutputLimits(self, Range_Min, Range_Max):
self.OutMin = Range_Min
self.OutMax = Range_Max
if (self.control_output > self.OutMax):
self.control_output = self.OutMax
if (self.control_output < self.OutMin):
self.control_output = self.OutMin
if (self.ITerm > self.OutMax):
ITerm = self.OutMax
if (self.ITerm < self.OutMin):
self.ITerm = self.OutMin
def SetTunning(self, SetKp, SetKi, SetKd):
# Time per samples = 0.1 [s] | 100 [ms]
# Take a sample every 100 ms
TimePerSample = (float(self.SampleTime) / float(1000))
#TimePerSample = 100
self.kp = SetKd
self.ki = SetKi * TimePerSample
self.kd = SetKd / TimePerSample
# if control direction is REVERSE(value = 1)
if(self.control_dir == 1):
self.kp = (0 - self.kp)
self.ki = (0 - self.ki)
self.kd = (0 - self.kd)
# This method can be called at any moment in the execution time #
def SetControllerDirection(self, UserControlDirection):
if(UserControlDirection != self.control_dir):
self.kp = (0 - self.kp)
self.ki = (0 - self.ki)
self.kd = (0 - self.kd)
def GetInputPID(self, user_input):
self.control_input = user_input
def ComputePID(self):
# Get current time in milliseconds
now = int(round(time.time() * 1000))
TimeChange = now - self.LastTime
if(TimeChange >= self.SampleTime):
compute_input = self.control_input
compute_error = (self.control_setpoint - compute_input)
if(self.ITerm is None):
self.ITerm = 0
self.ITerm = self.ITerm + (self.ki * compute_error)
if(self.ITerm > self.OutMax):
self.ITerm = self.OutMax
if(self.ITerm < self.OutMin):
self.ITerm = self.OutMin
#dInput = (self.control_input - self.LastInput)
if(self.LastError is None):
self.LastError = compute_error
# Compute de Differential Error
dError = (compute_error - self.LastError)
# Compute PID
# This method takes variations in the inputs ???
# pid_output = (self.kp * compute_error) + self.ITerm - (self.kd * dInput)
# This method takes variations in the Error dE = Error - lastError
pid_output = (self.kp * compute_error) + self.ITerm - (self.kd * dError)
if(pid_output > self.OutMax):
pid_output = self.OutMax
if(pid_output < self.OutMin):
pid_output = self.OutMin
#Compute PID OUTPUT
self.control_output = pid_output
#self.LastInput = compute_input
self.LastError = compute_error
self.LastTime = now
# ****** Call After ComputePID() ******
def GetOutputPID(self):
return self.control_output
# ****** Call at any Moment ********
def GetSetPoint(self):
return self.control_setpoint
# ******* Call only after ComputePID() *****
def GetCurrentError(self):
return self.LastError
|
import elektra
import pandas as pd
import filecmp
import datetime as dt
flow_date = dt.datetime(2020, 10, 17)
### Create Prices (make a daily from lmps)
print('\n\n--- Create Block Prices ---')
prices = pd.read_csv('lmps.csv')
result = elektra.create_prices(flow_date, 'M.P4F8', 'INDIANA.HUB', 'miso', '2x16', 'daily', prices)
print(result)
#### Scrub Hourly Prices (make sure there are enough houlies)
print('\n\n--- Scrub Hourly Prices ---')
result = elektra.scrub_hourly_prices(flow_date,'M.YERX', '116013753', 'pjm', prices)
print(result)
### Convert
print('\n\n--- Convert Blocks ---')
result = elektra.convert(flow_date, '7x24', '2x16') # 16: (October 17 2020 is a Saturday, and has 16 peak hours)
print(result)
result = elektra.convert(flow_date, '7x24', '5x16') # 0: (October 17 2020 is a Saturday, and has 0 weekday peak hours)
print(result)
result = elektra.convert(flow_date, '5x16', '2x16') # 0: (October 17 2020 is a Saturday, and there could not be a 5x16 input block)
print(result)
### Translate Blocks
print('\n\n--- Translate Blocks ---')
flow_date = dt.datetime(2020, 10, 1)
result = elektra.translateBlocks('pjm', 20, 'monthly', flow_date, '7x24', ['5x16', '2x16'], 'mwh')
print(result)
### Is DST Transition?
print('\n\n--- DST Transition ---')
flow_date = dt.datetime(2021, 3, 14)
is_tx, short_day, long_day = elektra.is_dst_transition(flow_date)
print(is_tx) # True; this is one of the transition dates
print(short_day) # True; this is the sprint DST transition date
print(long_day) # False; that would be the "fall back" date |
import numpy as np
import pandas as pd
import keras
from keras.datasets import fashion_mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
import matplotlib.pyplot as plt
np.random.seed(7)
(X_train,y_train),(X_test, y_test) = fashion_mnist.load_data()
print("X_train shape:", X_train.shape, "y_train shape:", y_train.shape)
print("X_test shape:", X_test.shape, "y_test shape:", y_test.shape)
plt.imshow(X_train[1])
print("Class : ",y_train[1])
#Reshape input data
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
#Normalize inputs from 0-255 to 0-1
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
#One hot encoding of outputs
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
num_classes = y_test.shape[1]
#Build CNN model
model = Sequential()
model.add(Conv2D(64, (2,2), input_shape=(28,28,1), activation = "relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(32, (2,2), activation = "relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(10,activation="softmax"))
model.summary()
#Compile the model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
#Fit the model
history = model.fit(X_train,y_train, epochs=20,batch_size=128,verbose=1)
scores = model.evaluate(X_test,y_test)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print("\n%s: %.2f%%" % (model.metrics_names[0], scores[0]*100))
# Plot accuracy result
plt.plot(history.history['acc'])
plt.title('model accuracy plot')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='lower right')
plt.show()
# Plot loss result
plt.plot(history.history['loss'])
plt.title('model loss plot')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
|
'''
Created on Sep 15, 2015
@author: Jonathan Yu
'''
def minutesNeeded(m):
return 60 + (m - 1) * 25
if __name__ == '__main__':
pass |
total = 0
for i in range(101):
total = total + i;
print('loop counter is: ' + str(i) + ' and total is: ' + str(total))
print('final total is: ' + str(total))
|
import random
import time
import string
import threading
import _thread
from socket import *
import ast
import sys
import hashlib
import config
from socket import error
import datetime
import pickle
import os.path
from os import path
initialBalances = {'A': 100, 'B': 100, 'C': 100, 'D': 100, 'E': 100}
timeOutDuration = 10
def calculateBalances(currentState):
currentBalances = initialBalances.copy()
for block in currentState['blockChain']:
for transaction in block[1]:
sender = transaction[0]
receiver = transaction[1]
amt = transaction[2]
currentBalances[receiver] = currentBalances[receiver] + int(amt)
currentBalances[sender] = currentBalances[sender] - int(amt)
# print(currentBalances[receiver])
# print(currentBalances[sender])
return currentBalances
def separateMessages(message):
remainingMessage = message
messageStrings = []
while len(remainingMessage) > 3:
messageStrings.append(remainingMessage[:remainingMessage.find('%')])
if len(remainingMessage[remainingMessage.find('%') :]) > 3:
remainingMessage = remainingMessage[remainingMessage.find('%') + 1:]
else:
remainingMessage = ''
return messageStrings
def turnLetterIntoNum(letter):
if letter.upper() == 'A':
return 0
else:
if letter.upper() == 'B':
return 1
else:
if letter.upper() == 'C':
return 2
else:
if letter.upper() == 'D':
return 3
else:
if letter.upper() == 'E':
return 4
def balGreaterThanOrEqual(bal,BallotNum):
# print(str(bal) + ' ' + str(BallotNum))
if int(bal[0]) > int(BallotNum[0]):
return True
else:
if int(bal[0]) == int(BallotNum[0]) and int(bal[1] > BallotNum[1]):
return True
else:
if int(bal[0] == BallotNum[0]) and int(bal[1])== int(BallotNum[1]):
return bal[2] >= BallotNum[2]
return False
def saveState(currentState):
saveState = currentState.copy()
saveState['state']= 'N/A'
saveState['acceptVal']= 'N/A'
saveState['acceptBal']= 'N/A'
saveState['value']= 'N/A'
saveState['BallotNum']= (0,0,currentState['proc_num'])
saveState['mostRecentResponse'] = 'N/A'
saveState['messagesReceived'] = []
saveState['transactions'] = []
saveState['inSync'] = True
# print(str(saveState))
# print(str(currentState))
pickle.dump(saveState,open( 'save' + str(currentState['proc_num']) + '.txt', "wb" ))
def readState(currentState):
if path.exists("save" + str(currentState['proc_num'] ) + ".txt" ):
currentState = pickle.load( open( "save" + str(currentState['proc_num']) +'.txt' , "rb" ) )
return currentState
def sendPropAck(message,currentState,NWSock):
newMessage = {}
newMessage['type'] = 'prop_ack'
newMessage['bal'] = message['bal']
newMessage['acceptBal'] = currentState['acceptBal']
newMessage['acceptVal'] = currentState['acceptVal']
newMessage['destination'] = message['sender']
newMessage['sender'] = message['destination']
NWSock.send(bytes(str(newMessage)+ '%', encoding='utf8'))
return 0
def sendAccAck(message,NWSock):
newMessage = {}
newMessage['type'] = 'acc_ack'
newMessage['bal'] = message['bal']
print('Sent acc ack message with ballowNum: ' + str(newMessage['bal']))
newMessage['value'] = message['value']
newMessage['destination'] = message['sender']
newMessage['sender'] = message['destination']
NWSock.send(bytes(str(newMessage)+ '%', encoding='utf8'))
return 0
def sendPropMessages(currentState,NWSock,newBlock):
newMessage = {}
newMessage['type'] = 'prop'
newMessage['bal'] = (getDepthNumFromBlock(newBlock),currentState['BallotNum'][1] +1,currentState['proc_num'])
#print(newMessage['bal'])
newMessage['sender'] = currentState['proc_num']
currentState['messagesReceived'] = []
currentState['state'] = 'waiting for prop_ack'
currentState['value'] = newBlock
print('Sending prop messages with ballot num: ' + str(newMessage['bal']))
for server in [0,1,2,3,4]:
newMessage['destination'] = server
if newMessage['destination'] == newMessage['sender']:
receiveMessage(newMessage,currentState,NWSock)
else:
NWSock.send(bytes(str(newMessage) + '%', encoding='utf8'))
return 0
def sendDecisionMessages(currentState,NWSock):
newMessage = {}
newMessage['type'] = 'decision'
newMessage['bal'] = currentState['messagesReceived'][0]['bal']
newMessage['value'] = currentState['messagesReceived'][0]['value']
newMessage['sender'] = currentState['messagesReceived'][0]['destination']
for server in [0,1,2,3,4]:
newMessage['destination'] = server
if newMessage['destination'] == newMessage['sender']:
receiveMessage(newMessage,currentState,NWSock)
else:
NWSock.send(bytes(str(newMessage) + '%', encoding='utf8'))
return 0
def sendAccMessages(currentState,NWSock):
value = None
b = (len(currentState['blockChain']) + 1,-999,-999)
for message in currentState['messagesReceived']:
if message['acceptVal'] != 'N/A':
if balGreaterThanOrEqual(message['acceptBal'],b):
value = message['acceptVal']
b= message['acceptBal']
# print('')
# print('acceptVal is: ' + str(value))
newMessage = {}
newMessage ['type'] = 'acc'
newMessage ['bal'] = currentState['messagesReceived'][0]['bal']
print("sending acc messages with ballot number: " + str(newMessage['bal']))
# print('bal remains: ' + str(newMessage['bal']))
# print('')
if value is not None:
newMessage['value'] = value
else:
newMessage['value'] = currentState['value']
newMessage['sender'] = currentState['messagesReceived'][0]['destination']
currentState['messagesReceived'] = []
currentState['state'] = 'waiting for acc_ack'
for server in [0,1,2,3,4]:
newMessage['destination'] = server
if newMessage['destination'] == newMessage['sender']:
receiveMessage(newMessage,currentState,NWSock)
else:
NWSock.send(bytes(str(newMessage)+ '%' , encoding='utf8'))
def sendSync(currentState,NWSock):
#synchronize if new block is not the next block
newMessage = {}
newMessage['type'] = 'sync'
newMessage['blockChainLength'] = len(currentState['blockChain'])
newMessage['bal'] = currentState['BallotNum']
newMessage['sender'] = currentState['proc_num']
for server in [0,1,2,3,4]:
newMessage['destination'] = server
if newMessage['destination'] == newMessage['sender']:
pass
else:
NWSock.send(bytes(str(newMessage)+ '%' , encoding='utf8'))
def sendSyncResponse(currentState,message,NWSock):
newMessage = {}
newMessage['type'] = 'sync-response'
newMessage['data'] = currentState['blockChain'][message['blockChainLength']:]
# print('SYNCING WITH DATA: ' + str(newMessage['data']))
newMessage['bal'] = currentState['BallotNum']
newMessage['sender'] = message['destination']
newMessage['destination'] = message['sender']
NWSock.send(bytes(str(newMessage) + '%', encoding='utf8'))
def sendTransSet(currentState,NWSock):
newMessage = {}
newMessage['type'] = 'trans-set'
newMessage['sender'] = currentState['proc_num']
newMessage['destination'] = -1
newMessage['transactions'] = currentState['transactions']
NWSock.send(bytes(str(newMessage) + '%', encoding='utf8'))
def sendBalance(currentState,NWSock):
newMessage = {}
newMessage['type'] = 'balances'
newMessage['sender'] = currentState['proc_num']
newMessage['destination'] = -1
newMessage['balances'] = calculateBalances(currentState)
newMessage['depth'] = len(currentState['blockChain'])
NWSock.send(bytes(str(newMessage) + '%', encoding='utf8'))
def sendBlockChain(currentState,NWSock):
newMessage = {}
newMessage['type'] = 'blockchain'
newMessage['sender'] = currentState['proc_num']
newMessage['destination'] = -1
newMessage['blockChain'] = currentState['blockChain']
NWSock.send(bytes(str(newMessage) + '%', encoding='utf8'))
def rejectTrans(transaction,NWSock,currentState):
newMessage = {}
newMessage['type'] = 'failure'
newMessage['sender'] = currentState['proc_num']
newMessage['destination'] = -1
newMessage['msg'] = 'Transaction Failed'
newMessage['data'] = transaction
NWSock.send(bytes(str(newMessage)+ '%' , encoding='utf8'))
def receiveDecision(currentState,message,NWSock):
if currentState['proc_num'] == message['sender']:
currentState['state'] = 'N/A'
#print(message['value'])
if len(currentState['blockChain']) + 1 != getDepthNumFromBlock(message['value']):
# print('New block is not the next value')
if len(currentState['blockChain']) == getDepthNumFromBlock(message['value']):
currentState['mostRecentResponse'] = datetime.datetime.now()
else:
print('Received decision out of order, updating blockchain now for all blocks past block depth:' + str(len(currentState['blockChain'])))
sendSync(currentState,NWSock)
else:
# The block is the next in the chain. Now we validate the transactions
validityCheck = checkIfTransactionsAreValid(currentState,NWSock, message['value'])
#print(validityCheck)
if validityCheck == [True,True]:
#print('VALID')
#if decided value is from this proc_num
print('Received validated block decision: ' + str(message['value']))
print('')
# print('SAVE STATE OUTPUT BELOW:')
# because we added a new block, we have to reset paxos states
currentState['state'] = 'N/A'
currentState['value'] = 'N/A'
currentState['acceptBal'] = 'N/A'
currentState['acceptVal'] = 'N/A'
currentState['mostRecentResponse'] = 'N/A'
currentState['messagesReceived'] = []
currentState['blockChain'].append(message['value'])
currentState['BallotNum'] = (len(currentState['blockChain']),0,-1)
if currentState['proc_num'] == turnLetterIntoNum(message['value'][1][0][0]):
trans = currentState['transactions'][:2]
currentState['transactions'].remove(trans[0])
currentState['transactions'].remove(trans[1])
saveState(currentState)
else:
print('Invalid transactions')
currentState['acceptVal']= 'N/A'
currentState['acceptBal']= 'N/A'
# if transactions are not valid:
# print(str(currentState['transactions']))
if currentState['proc_num'] == message['sender']:
# print('Making state N/A')
currentState['state'] = 'N/A'
currentState['mostRecentResponse'] = "N/A"
trans = currentState['transactions'][:2]
if validityCheck[1] == False:
rejectTrans(trans[1],NWSock,currentState)
currentState['transactions'].remove(trans[1])
# print('1 BAD ' + str(currentState['transactions']))
if validityCheck[0] == False:
rejectTrans(trans[0],NWSock,currentState)
currentState['transactions'].remove(trans[0])
# print('0 BAD ' + str(currentState['transactions']))
# print('NEW BLOCKCHAIN CREATED:')
# for block in currentState['blockChain']:
# print(str(block))
# if some transactions are invalid:
# I dont think we need to use this:
# currentState['BallotNum'] = (len(blockChain), currentState['BallotNum'][1],currentState['proc_num'])
def receiveMessage(message,currentState,NWSock):
if message['type'] =='prop_ack' and currentState['state'] == 'waiting for prop_ack':
print('Received prop_ack with ballot number: ' + str(message['bal']) )
currentState['messagesReceived'].append(message)
currentState['mostRecentResponse'] = datetime.datetime.now()
if len(currentState['messagesReceived']) >= 3:
sendAccMessages(currentState,NWSock)
else:
if message['type'] == 'acc_ack' and currentState['state'] == 'waiting for acc_ack':
print('Received acc_ack with ballot number: ' + str(message['bal']) )
currentState['messagesReceived'].append(message)
currentState['mostRecentResponse'] = datetime.datetime.now()
if len(currentState['messagesReceived']) >= 3:
sendDecisionMessages(currentState,NWSock)
else:
if message['type'] == 'prop':
print('Received prop with ballot number: ' + str(message['bal']) )
if balGreaterThanOrEqual(message['bal'],currentState['BallotNum']):
currentState['BallotNum'] = message['bal']
sendPropAck(message,currentState,NWSock)
else:
if message['type'] == 'acc':
print('Received acc with ballot number: ' + str(message['bal']) )
if balGreaterThanOrEqual(message['bal'],currentState['BallotNum']):
currentState['acceptBal']=message['bal']
currentState['acceptVal'] = message['value']
sendAccAck(message,NWSock)
else:
if message['type'] == 'decision':
print('Received decision from message with ballot number: ' + str(message['bal']) )
receiveDecision(currentState,message,NWSock)
else:
if message['type'] == 'sync':
print('Received request to sync from process ' + str(message['sender']))
sendSyncResponse(currentState,message,NWSock)
else:
if message['type'] == 'sync-response':
print('Received data from server: ' + str(message['sender']))
for block in message['data']:
# print('Received :' + str(block))
# Test if block is to be the next block in the chain
if getDepthNumFromBlock(block) == len(currentState['blockChain']) + 1:
currentState['blockChain'].append(block)
print('Updated the blockchain')
saveState(currentState)
currentState['inSync'] = True
else:
if message['type'] == 'transaction':
print('Received transaction request from client')
currentState['transactions'].append(message['transaction'])
print('Transaction List is now: ' + str(currentState['transactions']))
else:
if message['type'] == 'print_set':
sendTransSet(currentState,NWSock)
else:
if message['type'] == 'print_balance' and currentState['inSync']:
sendBalance(currentState,NWSock)
else:
if message['type'] == 'print_blockchain' and currentState['inSync']:
sendBlockChain(currentState,NWSock)
return 0
#added blockChain field for initialization
def initiateCurrentState(proc_num):
currentState = {}
currentState['state']= 'N/A'
currentState['acceptVal']= 'N/A'
currentState['acceptBal']= 'N/A'
#is default value for when a block is successfully mined:
currentState['value']= 'N/A'
currentState['BallotNum']= (0,0,proc_num)
currentState['proc_num']= proc_num
currentState['mostRecentResponse'] = 'N/A'
currentState['messagesReceived'] = []
currentState['transactions'] = []
currentState['blockChain'] = []
currentState['inSync'] = True
currentState = readState(currentState)
return currentState
def get_random_string():
random_str = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(10)]) #length of 10
return random_str
def isValidBlock(block):
# (depth, prevhash, nonce) = block[0]
# transactions = block[1]
# # print(depth, prevhash, nonce)
# # print(transactions)
# trans1 = str(transactions[0][0]) + " " + str(transactions[0][1] + " " + str(transactions[0][2]))
# trans2 = str(transactions[1][0]) + " " + str(transactions[1][1]) + " " + str(transactions[1][2])
# string_to_hash = trans1 + trans2 + nonce
hash_value = hashlib.sha256(str(block).encode()).hexdigest()
# print( str(hash_value[-1] ))
if str(hash_value[-1]) == str(0) or str(hash_value[-1]) == str(1):
return True
else:
return False
# TODO for ajit: put in the correct way to access depth number in the block
# Everything in slashes below needs to be replaced with the right thing based on how you organize the data structures
def getDepthNumFromBlock(block):
return block[0][0]
# checked function and its works
#do we want to return bal as well?
def checkIfTransactionsAreValid(currentState,NWSock,new_block):
trans = new_block[1]
bal= dict(calculateBalances(currentState))
transCorrect = [True,True]
# print(str(trans))
for transact in [0,1]:
sender = trans[transact][0]
rec = trans[transact][1]
amt = trans[transact][2]
# print(str(amt))
# print(type(sender), type(rec), type(amt)) #type string
# print(type(bal[sender])) #type int
# print(str(bal))
# print(str(int(bal[sender]) - int(amt)))
if bal[sender] - int(amt) < 0:
transCorrect[transact] = False
else:
bal[sender] = bal[sender] - int(amt)
# print(str(bal[sender]))
return transCorrect
#TODO ajit: Create the block from currentState. Everything you need to make it is there.
# We will only be generating the block once every round until it works. Youll need to calculate hash of previous block and stuff too
#tested and works
def createBlock(currentState):
# call get_random_string() to generate nonce
transactions = currentState['transactions'][:2]
# print(transactions[0])
blockChain = currentState['blockChain']
nonce = get_random_string()
# print(nonce)
depth_newblock = len(blockChain) + 1
# print('New created block has depth: ' + str(depth_newblock))
# print(depth_newblock)
prevBlockStr = 'NULL'
if depth_newblock > 1:
prevBlockStr = str(currentState['blockChain'] [-1])
prevBlockStr= hashlib.sha256(prevBlockStr.encode()).hexdigest()
# prev_transaction_1 = str(blockChain[len(blockChain)-1][1][0][0]) + " " + str(blockChain[len(blockChain)-1][1][0][1]]) + " " + str(blockChain[len(blockChain)-1][1][0][2])
# prev_transaction_2 = str(blockChain[len(blockChain)-1][1][1][0]) + " " + str(blockChain[len(blockChain)-1][1][1][1])) + " " + str(blockChain[len(blockChain)-1][1][1][2])
# prev_depth = blockChain[len(blockChain)-1][0][0]
# hash_prev = blockChain[len(blockChain)-1][0][1]
# prev_nonce = blockChain[len(blockChain)-1][0][2]
# string_hash = prev_transaction_1 + prev_transaction_2 + str(prev_depth) + hash_prev + prev_nonce
# prev_hash =
head_of_block = (depth_newblock,prevBlockStr, nonce)
transactions_in_block = []
transactions_in_block.append(transactions[0])
transactions_in_block.append(transactions[1])
block = (head_of_block, transactions_in_block)
return block
def connectToNetwork(proc_num):
NWSock = socket(AF_INET, SOCK_STREAM)
while True:
try:
NWSock.connect(('127.0.0.1', config.serverPortNumber))
break
except:
pass
NWSock.send(bytes(str(proc_num), encoding='utf8'))
NWSock.setblocking(0)
return NWSock
def blockEquals(block1,block2):
if block1 =='' or block2 == '':
return False
if block1[0][0] == block2[0][0] and block1[0][1] == block2[0][1]:
for transaction1,transaction2 in zip(block1[1],block2[1]):
if transaction1[0] != transaction2[0] or transaction1[1] != transaction2[1] or transaction1[2] != transaction2[2]:
return False
else:
return False
return True
def run(proc_num):
currentState = initiateCurrentState(proc_num)
# print(currentState)
lastValidBlock = ''
NWSock = connectToNetwork(currentState['proc_num'])
print('NW Connected')
while True:
messageString = ''
try:
messageString = NWSock.recv(4096).decode('utf-8')
except:
pass
if messageString != '':
messages = separateMessages(messageString)
for message in messages:
messageDict = ast.literal_eval(message)
# print('Received message' + str(messageDict))
receiveMessage(messageDict,currentState,NWSock)
# if currentState['mostRecentResponse'] != "N/A":
# currentTime = datetime.datetime.now()
# #get time passed since this response
# if (currentTime - currentState['mostRecentResponse']).seconds > timeOutDuration:
# print('Not received any responses to request. Proposition failed. Attempting to update blockChain')
# sendSync(currentState,NWSock)
# lastValidBlock = ''
if(len(currentState['transactions']) > 1 and currentState['inSync']):
#creates block based on current state.
block = createBlock(currentState)
if blockEquals(block,lastValidBlock):
# print('Block NOT equals')
#this means that we have already calculated the right nonce, and because we create block from current state, we know that the block is valid for being the next value
# so a block hasnt been proposed yet and this block is able to be the next one if paxos is down.
pass
else:
if isValidBlock(block):
print("Found a valid block for transactions: " + str(currentState['transactions'][0]) +' ' + str(currentState['transactions'][1]) )
sendPropMessages(currentState,NWSock,block)
lastValidBlock = block
# receive message and then process if received
if currentState['mostRecentResponse'] != "N/A":
currentTime = datetime.datetime.now()
#get time passed since this response
if (currentTime - currentState['mostRecentResponse']).seconds > timeOutDuration:
print('Not received any responses to request. Proposition failed. Requesting blockchain update before recalculating block')
sendSync(currentState,NWSock)
lastValidBlock = ''
currentState['mostRecentResponse'] = "N/A"
currentState['inSync'] = False
### MAIN STARTS HERE
run(proc_num = int(sys.argv[1])) |
"""
#------------------------------------------------------------------------------
# Recording and processing OpenCV data for experiments - record_camera_live.py
#
# Track a payload, save the position data, and output the processed data
#
# Created: 4/27/17 - Daniel Newman -- danielnewman09@gmail.com
#
# Modified:
# * 4/47/17 - DMN -- danielnewman09@gmail.com
# - Added documentation for this script
#------------------------------------------------------------------------------
"""
# import the necessary packages
from __future__ import print_function
import numpy as np
import os
import sys
import argparse
import pdb
from matplotlib import pyplot as plt
# pip install imutils
import imutils
from imutils.video import VideoStream
#conda install -c menpo opencv=2.4.11
import cv2
# Add my local path to the relevant modules list
path = os.getcwd()
rootpath = path.split('Daniel Newman')
rootpath = rootpath[0] + 'Daniel Newman/Python Modules'
sys.path.append(rootpath)
# Import my python modules
import Generate_Plots as genplt
# Use lab plot style
#plt.style.use('Crawlab')
# define the lower and upper boundaries of the desired color in the HSV
# These colors will determine the filter that the program uses to locate the payload and axis
colorLower = (10, 100, 100)
colorUpper = (39, 255, 255)
# This is the minimum normalized error. Used to
# Determine location of saving amplitudes
min_omega_error = 0.7
# Degrees to radians conversion
DEG_TO_RAD = np.pi / 180
# Conversion used to translate raw x and y coordinates to length displacements
X_conversion = 300. # pixels per meter
Y_conversion = 300. # pixels per meters
# Get the current directory
path = os.getcwd()
rootpath = path.split('Cherry_Picker_Crane')
rootpath = rootpath[0] + 'Cherry_Picker_Crane/'
# End global variables
################################################################################################
################################################################################################
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
ap.add_argument("-f", "--fps", type=float, default=20,
help="FPS of output video")
args = vars(ap.parse_args())
print('\n *************************************** \n ')
# Ask the user for the given input shaper being recorded
shaper = raw_input('Input Shaper: ')
deflection = raw_input('Nominal deflection (in degrees): ')
omega_error = raw_input('Normalized Frequency Error: ')
# Give the user the control to begin the recording process
print('\n *************************************** \n ')
enter = raw_input('Press Enter to begin recording')
print('\n *************************************** \n ')
# End user inputs
################################################################################################
################################################################################################
# Begin code for writing and processing video
# initialize the video stream and allow the camera
# sensor to warmup
print("[INFO] warming up camera...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
camera = cv2.VideoCapture(0)
# If the user entered "Test", the program will save the video to a special location
# Otherwise, videos will be saved to a location determined by the user given parameters
if shaper != 'Test':
folder = 'Data/' + deflection + 'deg_' + shaper + '/freq_error_' + omega_error
else:
folder = 'Calibrate'
# This command ensures that the folder gets made by the program if it doesn't already exist
if not os.path.exists(folder):
os.makedirs(folder)
# These are the empty arrays that will be used for data collection
theta = np.zeros((1,2))
lengths = np.zeros((1,2))
ball = np.zeros((1,3))
pin = np.zeros((1,3))
# 'Count' is a dirty way of iterating through every frame and appending the data arrays
# This could be more efficiently done
count = 0.
# Look for other video files for trial numbers and add to the existing set
# This means that the program will automatically iterate through trial numbers
file_paths = np.array([os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(folder)
for f in files if f.endswith('.pdf')])
if len(file_paths) > 0:
trial_numbers = np.zeros(len(file_paths))
for i in range(len(file_paths)):
trial_numbers[i] = int(file_paths[i][len(dirpath) + 7])
trial = int(np.amax(trial_numbers) + 1)
else:
trial = 1
# Initialize variables that will be used to display relevant data on the video frame.
# Initializing these variables ensures that they are defined for display.
curr_angle = 0.
cable_length = 0.
del_x = 0.
del_y = 0.
y1 = 0
y2 = 0
# loop over frames from the video stream
while True:
# grab the frame from the video stream and resize it to have a
# maximum width of 600 pixels
(grabbed,frame) = camera.read()
frame = imutils.resize(frame, width=420)
key = cv2.waitKey(1) & 0xFF
# convert the frame to the HSV color space
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the desired color, then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, colorLower, colorUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
areaArray = []
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
for i, c in enumerate(cnts):
area = cv2.contourArea(c)
areaArray.append(area)
center = None
# only proceed if two contours were found
# This program assumes that the only filtered contours are the pivot point and payload.
# If one item is lost temporarily, the program will throw a warning and skip the time increment
if len(cnts) >= 2:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x1, y1), radius1) = cv2.minEnclosingCircle(c)
M1 = cv2.moments(c)
center1 = (int(M1["m10"] / M1["m00"]), int(M1["m01"] / M1["m00"]))
# only proceed if the radius meets a minimum size
if radius1 > 5:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x1), int(y1)), int(radius1),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# find the smallest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
#first sort the array by area
sorteddata = sorted(zip(areaArray, cnts), key=lambda x: x[0], reverse=True)
#find the nth largest contour [n-1][1], in this case 2
secondlargestcontour = sorteddata[1][1]
p = secondlargestcontour
#p = max(cnts, key=cv2.contourArea)
((x2, y2), radius2) = cv2.minEnclosingCircle(p)
M2 = cv2.moments(p)
center2 = (int(M2["m10"] / M2["m00"]), int(M2["m01"] / M2["m00"]))
# only proceed if the radius meets a minimum size
if radius2 > 5:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x2), int(y2)), int(radius2),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# Compute the displacements of the ball and pivot point
del_x = np.round(int(x1) - int(x2)).astype(int)
del_y = np.round(int(y1) - int(y2)).astype(int)
del_x /= X_conversion
del_y /= Y_conversion
del_x = np.round(del_x,2)
del_y = np.round(del_y,2)
# Compute the current angle
curr_angle = np.round(np.arctan2(del_x,del_y) / DEG_TO_RAD,2)
# Compute the current cable length
cable_length = np.round(np.sqrt(del_x**2 + del_y**2),2)
# Write the current angle to the array
ball = np.append(
ball,
np.hstack(
(count * 1/args["fps"], x1,y1)
).reshape(1,3),
axis=0
)
# Write the current angle to the array
pin = np.append(
pin,
np.hstack(
(count * 1/args["fps"], x2, y2)
).reshape(1,3),
axis=0
)
# Write the current angle to the array
theta = np.append(
theta,
np.hstack(
(count * 1/args["fps"], curr_angle)
).reshape(1,2),
axis=0
)
# Write the current angle to the array
lengths = np.append(
lengths,
np.hstack(
(count * 1/args["fps"], cable_length)
).reshape(1,2),
axis=0
)
else:
# If one of the objects was not detected in this frame
print('\n Lost Track of Object at t = {}'.format(count * 1/args["fps"]))
# Print important information to the CV frame
cv2.putText(
frame,'Current Angle: {}'.format(curr_angle),
(10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1
)
cv2.putText(
frame,'Current Length: {}'.format(cable_length),
(10,60), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1
)
cv2.putText(
frame,'Delta X: {}'.format(del_x),
(300,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1
)
cv2.putText(
frame,'Delta Y: {}'.format(np.round(int(y1) - int(y2)).astype(int)),
(300,60), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1
)
# Iterate the count
count += 1
# show the frame to our screen
cv2.imshow("Frame", frame)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# Save the data
theta_filename = folder + '/trial_{}'.format(trial) + '_angular_data.txt'
np.savetxt(theta_filename, np.round(theta,2), delimiter=',')
lengths_filename = folder + '/trial_{}'.format(trial) + 'length_data.txt'
np.savetxt(lengths_filename, np.round(lengths,2), delimiter=',')
ball_filename = folder + '/trial_{}'.format(trial) + '_ball_data.txt'
np.savetxt(ball_filename, np.round(ball,2), delimiter=',')
pin_filename = folder + '/trial_{}'.format(trial) + '_pin_data.txt'
np.savetxt(pin_filename, np.round(pin,2), delimiter=',')
# do a bit of cleanup
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
vs.stop()
#writer.release()
# End code for writing and processing video
################################################################################################
################################################################################################
# Parse the data
file_name = 'trial_{}'.format(trial) + '_angular_data.txt'
local_path = folder + '/'
import csv
from scipy import signal
from scipy.optimize import curve_fit
# Grab the data from the file
data = np.genfromtxt(local_path + file_name, delimiter=',', skip_header=0)
# The mid point of the data. Used to ensure that the
# processing portion grabs a steady-state swing
middle = np.round(len(data[:,1])/2).astype(int)
# Determine the approximate period and corresponding number of steps
# For the given pendulum
period = 2*np.pi / np.sqrt(9.81/cable_length)
period_step = np.round(period * args["fps"]).astype(int)
# Get the approximate amplitudes from the data
peak = (np.amax(data[middle:middle + period_step,1]) - np.amin(data[middle:middle + period_step,1]))/2
rows_to_omit_end = 0.
# Iterate through the last data values.
# If the angular value exceeds the approximate amplitude,
# eliminate that row of data. Continue until the
# 'steady-state' swing is all that remains
for ii in np.arange(1,len(data[:,0])):
test_peak = data[-ii,1]
if np.abs(test_peak - peak)/peak < 1:
rows_to_omit_end = ii
break
# Take the 'rows_to_omit_end' and back that number out by 5 seconds.
# Eliminate the rest of the data.
rows_to_omit_beginning = len(data[:,0]) - rows_to_omit_end - 5 * args["fps"]
full_time = data[:,0]
full_vib = data[:,1]
# Clip the undesired rows from the data
data = data[rows_to_omit_beginning:-rows_to_omit_end,:]
# Extract the time from the angular data
time = data[:, 0]
# There are two types of data files, here we determine which we're opening
#if np.shape(data)[1] is 2:
vib = data[:, 1]
# If a nonzero average is detected, recenter the data
vib = np.subtract(vib,np.average(vib))
#else:
# vib = data[:, 4]
# End data parsing
################################################################################################
################################################################################################
# Do a sine fit of the vibration data
# We'll use this function to smooth the data
def smooth(x, window_len=11, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
From: http://wiki.scipy.org/Cookbook/SignalSmooth
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w = np.ones(window_len,'d')
else:
w = eval('np.'+window+'(window_len)')
y = np.convolve(w/w.sum(), s, mode='valid')
return y
# Now that we've defined the smoothing function, let's use it
window_size = 7
vib_smoothed = smooth(vib, window_size)
# We can also remove any nonzero trends in the data
#vib_detrend = signal.detrend(vib_smoothed)
# Let's compare the raw data with it after our simple processing
genplt.compare_responses(time,
vib,'Raw',
vib_smoothed[window_size-2:-1],'Processed',
#vib_detrend[window_size-2:-1],'Smoothed',
xlabel='Time (s)',ylabel='Vibration (deg)',
name_append=file_name[0:-4],folder=local_path
)
genplt.compare_responses(full_time,
full_vib,'Raw',
xlabel='Time (s)',ylabel='Vibration (deg)',
name_append=file_name[0:-4] + '_full',folder=local_path
)
vib_smoothed = vib_smoothed[window_size-2:-1]
# End sine fit of data
################################################################################################
################################################################################################
# CRAWLAB data functions
# First define all the functions we'll need.
def log_dec(peak1, peak2, num_cycles):
'''##########################################################################################
# log_dec.py
#
# Script to compute damping ratio using log dec method
#
# Inputs:
# peak1 = the amplitude of the first peak
# peak2 = the amplitude of the Nth peak
# num_cycles = the number of periods between two peaks
#
# Output:
# zeta = the damping ratio
#
# NOTE: Plotting is set up for output, not viewing on screen.
# So, it will likely be ugly on screen. The saved PDFs should look
# better.
#
# Created: 03/28/14
# - Joshua Vaughan
# - joshua.vaughan@louisiana.edu
# - http://www.ucs.louisiana.edu/~jev9637
#
# Modified:
# * 03/17/16 - JEV - joshua.vaughan@louisiana.edu
# - updated for Python 3
#
######################################################################################
'''
import numpy as np
delta = 1 / num_cycles * np.log(peak1 / peak2)
zeta = 1 / np.sqrt(1 + (2 * np.pi/delta)**2)
return zeta
def get_local_Extrema(time,data):
''' # Function to get the local extrema for a response
#
# Inputs:
# time = time array corresponding to the data
# data = the response data array (only pass a single dimension/state at at time)
#
# Output:
# localMaxes = the amplitude of the local maxes
# localMax_Times = the times of the local maxes
#
# Created: 03/28/14
# - Joshua Vaughan
# - joshua.vaughan@louisiana.edu
# - http://www.ucs.louisiana.edu/~jev9637
#
# Modified:
# * 03/17/16 - JEV - joshua.vaughan@louisiana.edu
# - updated for Python 3
#
######################################################################################
'''
from scipy import signal
# Get local maximums
localMax_indexes = signal.argrelextrema(data, np.greater)
localMaxes = data[localMax_indexes]
localMax_Times = time[localMax_indexes]
# Get local minimums
localMin_indexes = signal.argrelextrema(data, np.less)
localMins = data[localMin_indexes]
localMin_Times = time[localMin_indexes]
return localMaxes, localMax_Times, localMins, localMin_Times
def get_zero_crossings(time,data):
''' Function to get the local extrema for a response
#
# Inputs:
# time = time array corresponding to the data
# data = the response data array (only pass a single dimension/state at at time)
#
# Output:
# zeros = an array of the times of the zero crossings
#
# Created: 03/28/14
# - Joshua Vaughan
# - joshua.vaughan@louisiana.edu
# - http://www.ucs.louisiana.edu/~jev9637
#
# Modified:
# * 03/17/16 - JEV - joshua.vaughan@louisiana.edu
# - updated for Python 3
#
######################################################################################
'''
# create an empty zeros array
zeros = []
for index in range(len(time)-1):
if np.sign(data[index]) != np.sign(data[index + 1]):
zeros.append(time[index])
return zeros
def CRAWLAB_fft(data, time, plotflag):
''' Function to get the FFT for a response
#
# Inputs:
# time = time array corresponding to the data
# data = the response data array (only pass a single dimension/state at at time)
# plotflag = will plot the FFT if nonzero
#
# Output:
# fft_freq = an array of the freqs used in the FFT
# fft_mag = an array of the amplitude of the FFT at each freq in fft_freq
#
# Created: 03/28/14
# - Joshua Vaughan
# - joshua.vaughan@louisiana.edu
# - http://www.ucs.louisiana.edu/~jev9637
#
# Modified:
# * 03/17/16 - JEV - joshua.vaughan@louisiana.edu
# - updated for Python 3
######################################################################################
'''
from scipy.fftpack import fft
# correct for any DC offset
offset = np.mean(data)
# Get the sampling time
sample_time = time[1] - time[0]
# Get the length of the dataset
n = len(data)
# Calculate the FFT of the data, removing the offset and using a Hanning Window
fft_mag = fft((data - offset) * np.hanning(len(data)))
# Define the frequency range of the output
fft_freq = np.linspace(0.0, 1.0 / (2.0*sample_time), int(np.ceil(n/2)))
# Only return the "useful" part of the fft
fft_mag = 2.0/n * np.abs(fft_mag[0:int(np.ceil(n/2))])
# If plotflag is nonzero (True), plot the FFT before returning the magnitude and phase
if plotflag:
# Plot the relationshiop
# Many of these setting could also be made default by the .matplotlibrc file
fig = plt.figure(figsize=(6,4))
ax = plt.gca()
plt.subplots_adjust(bottom=0.17,left=0.17,top=0.96,right=0.96)
plt.setp(ax.get_ymajorticklabels(),fontsize=18)
plt.setp(ax.get_xmajorticklabels(),fontsize=18)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(True, linestyle=':', color='0.75')
ax.set_axisbelow(True)
plt.xlabel('Frequency (Hz)', fontsize=22, labelpad=8)
plt.ylabel('FFT magnitude', fontsize=22, labelpad=10)
plt.plot(fft_freq, fft_mag, linewidth=2, linestyle='-')
# Adjust the page layout filling the page using the new tight_layout command
plt.tight_layout(pad=0.5)
plt.show()
plt.cla()
plt.clf()
return fft_freq, fft_mag
# Should return the zero crossings
zeros = get_zero_crossings(time, vib_smoothed)
# Get the peaks of the response
localMaxes, localMax_Times, localMins, localMin_Times = get_local_Extrema(time, vib_smoothed)
Vib_amp = (np.average(localMaxes) - np.average(localMins))/2
print('Vibration Amplitude: {}'.format(Vib_amp))
# Write the header values
ampsfile = 'Data/Amplitudes/'+ shaper + '_' + deflection + 'deg' + '.csv'
# Determine the appropriate column for the amplitude
column = np.round((float(omega_error) - min_omega_error) / 0.1).astype(int)
# Do the reading
file1 = open(ampsfile, 'r')
reader = csv.reader(file1)
file_rows = []
for i,row in enumerate(reader):
new_row = row
if i == trial:
new_row[column] = Vib_amp
file_rows.append(new_row)
file1.close() # <---IMPORTANT
# Do the writing
file2 = open(ampsfile, 'w')
writer = csv.writer(file2)
writer.writerows(file_rows)
file2.close()
################################################################################################
################################################################################################
# Get the FFT
# We can also use the FFT to get the natrual frequency
freq, mag = CRAWLAB_fft(vib_smoothed,time,False)
# Let's plot the FFT manually so that we can scale the axes to our liking
# Plot the relationshiop
# Many of these setting could also be made default by the .matplotlibrc file
fig = plt.figure(figsize=(6,4))
ax = plt.gca()
plt.subplots_adjust(bottom=0.17,left=0.17,top=0.96,right=0.96)
plt.setp(ax.get_ymajorticklabels(),fontsize=18)
plt.setp(ax.get_xmajorticklabels(),fontsize=18)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(True,linestyle=':',color='0.75')
ax.set_axisbelow(True)
plt.xlabel('Frequency (Hz)',fontsize=22,labelpad=8)
plt.ylabel('FFT magnitude',fontsize=22,labelpad=10)
plt.plot(freq, mag, linewidth=2, linestyle='-', label = 'FFT')
# Plto and output the location of the highest peak - The fundamental frequency
freq_index = np.argmax(mag)
print('\nThe highest magnitude peak occurs at {:.4f} Hz.\n'.format(freq[freq_index]))
plt.plot([freq[freq_index], freq[freq_index]], [0, 1.1 * np.max(mag)],
linewidth = 2, linestyle = '-.', label = r'$\omega_n \approx$ {:.2f}Hz'.format(freq[freq_index]))
plt.xlim(0,5)
# Create the legend, then fix the fontsize
leg = plt.legend(loc='upper right', ncol = 1, fancybox=True)
ltext = leg.get_texts()
plt.setp(ltext, family='serif', fontsize=20)
# Adjust the page layout filling the page using the new tight_layout command
plt.tight_layout(pad=0.5)
# save the figure as a high-res pdf in the current folder
plt.savefig(local_path + file_name + '_FFT_magnitude.pdf')
plt.clf()
plt.cla()
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from qiita_pet.test.tornado_test_base import TestHandlerBase
class VAMPSHandlerTests(TestHandlerBase):
# TODO: Missing tests
pass
if __name__ == "__main__":
main()
|
# By Rakeen Rouf
import matplotlib.pyplot as plt
import math
import time
import matplotlib.animation as animation
import pandas as pd
import numpy as np
from scipy.spatial import distance
class FogDataPlotter:
def __init__(self):
self.fig_iter_num = 0
self.v = 0
self.iv = 0
def update_data(self, curr, ax1, ax2, ax3, ax4):
start = time.time()
dataa = pd.read_csv('fog_plot_data.txt')
time_data = dataa['SSSSSSSS.mmmuuun']
data_shape = dataa.shape
msd = np.empty(data_shape[0])
if self.fig_iter_num == 0:
msd_data = dataa.drop(['SSSSSSSS.mmmuuun'], axis=1).values
self.v = np.mean(msd_data[0:14, :], axis=0)
cov = np.cov(msd_data, rowvar=False)
cov[np.isnan(cov)] = 0
self.iv = np.linalg.pinv(cov)
else:
msd_data = dataa.drop(['SSSSSSSS.mmmuuun'], axis=1).values
for x in range(1, data_shape[0]):
msd[x] = distance.mahalanobis(msd_data[x, :], self.v, self.iv)
cmsd = np.cumsum(msd)
self.fig_iter_num = curr # updates figure number
for ax in (ax1, ax2, ax3, ax4): # Lets Clear the plot for the animation
ax.clear()
try: # Lets try to update our animation
ax1.plot(time_data, cmsd, 'b', linewidth=2)
plt.setp(ax1.xaxis.get_majorticklabels())
ax1.set_ylabel('Cumulative Mahalanobis Distance')
ax1.set_xlabel('Time')
ax3.plot(time_data, dataa['ABS-ENERGY'].cumsum(), 'b', linewidth=2)
plt.setp(ax3.xaxis.get_majorticklabels())
ax3.set_ylabel('Cumilative Energy')
ax3.set_xlabel('Time')
dur_data = dataa['DURATION']
min_dur = int(math.floor(dur_data.min()))
max_dur = int(math.floor(dur_data.max()))
ax4.hist(dur_data, bins=range(min_dur, max_dur + 100, 100), facecolor='blue', alpha=0.5,
edgecolor='black', linewidth=1.2, density=True)
ax4.set_xlabel('Duration')
amp_data = dataa['AMP']
min_amp = int(math.floor(amp_data.min()))
max_amp = int(math.floor(amp_data.max()))
ax2.hist(amp_data, bins=range(min_amp, max_amp + 1, 1), facecolor='blue', alpha=0.5,
edgecolor='black', linewidth=1.2, density=True)
ax2.set_xlabel('Amplitude')
print(time.time() - start)
except Exception as e:
print(repr(e))
if __name__ == '__main__':
"""
Assumptions
The first 7 lines of the txt files is not necessary in the plots
A txt file will always be available
Only Id=1 rows are used in calculations and plots
Data is dumped at least after 5 seconds after inception
"""
data_plotter = FogDataPlotter()
# data_plotter.loaddata()
fig, ((axx1, axx2), (axx3, axx4)) = plt.subplots(2, 2)
simulation = animation.FuncAnimation(fig, data_plotter.update_data, repeat=False, fargs=(axx1, axx2, axx3, axx4))
plt.tight_layout()
plt.show()
|
from aiohttp.web import View, Response
class BaseView(View):
async def get(self, *args, **kwargs):
return Response(body=b"OK")
|
# @Title: 有序矩阵中第K小的元素 (Kth Smallest Element in a Sorted Matrix)
# @Author: 2464512446@qq.com
# @Date: 2020-07-02 16:10:43
# @Runtime: 252 ms
# @Memory: 19.2 MB
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
n = len(matrix)
pq = [(matrix[i][0], i, 0) for i in range(n)]
heapq.heapify(pq)
ret = 0
for i in range(k - 1):
num, x, y = heapq.heappop(pq)
if y != n - 1:
heapq.heappush(pq, (matrix[x][y + 1], x, y + 1))
return heapq.heappop(pq)[0]
mid = (left+right) /2
count = self.find(matrix,mid,row,col)
if count < k:
left = mid +1
else:
right = mid
return right
def find(self,matrix,mid,row,col):
count = 0
i,j= row-1,0
while(i >= 0 and j < col):
if matrix[i][j] <= mid:
count += i+1
j += 1
else:
i -= 1
return count
|
# Module Maker
# Reads from a json file of modules and generates both latex and html code for copy pasting.
# Designed December 2020 by Joe Manlove
# Version 1.0 completed 12/11/2020, latex is properly generated in file.
# Version 1.0.2 completed 12/18/2020, html is properly generated in file
import json
from module import Module
# JSON resources
# https://realpython.com/python-json/
# https://www.w3schools.com/js/js_json_syntax.asp
# https://stackoverflow.com/questions/28325994/how-are-multiple-objects-in-a-single-json-file-separated
# https://stackoverflow.com/questions/26068291/use-json-data-to-initialize-an-object-in-python
import os
# path to directory containing script
PATH = os.path.dirname(__file__) + '/'
# change the input file name variable to the json you'd like to generate from
INPUT_FILE_NAME = "differential_equations/differential_equations_sp20.json"
# INPUT_FILE_NAME = "linear_algebra/linear_algebra_sp20.json"
INPUT_FILE_PATH = os.path.join(PATH, INPUT_FILE_NAME)
# removes the json extension
OUTPUT_NAME = INPUT_FILE_NAME.replace(".json", '')
HTML_OUTPUT_PATH = PATH + OUTPUT_NAME + '.html'
TEX_OUTPUT_PATH = PATH + OUTPUT_NAME + '.tex'
# collect module information from json file
with open(INPUT_FILE_PATH, "r") as read_file:
data = json.load(read_file)
with open(TEX_OUTPUT_PATH, 'w') as file:
print("% Start of code generated using Learning Module Creator\n", file=file)
i = 1
for datum in data:
# print(datum)
module = Module(**datum)
module.latex(file, i)
i += 1
print("% End of code generated using Learning Module Creator\n", file=file)
# single file generation
with open(HTML_OUTPUT_PATH, 'w') as file:
# this is an HTML comment.
print("<!-- Start of code generated using Learning Module Creator -->\n", file=file)
i = 1
for datum in data:
# print(datum)
module = Module(**datum)
module.html(file, i)
i += 1
print("<!-- End of code generated using Learning Module Creator -->\n", file=file)
|
'''Instrcciones Raice'''
'''Creacion de exepciones porpias(Mas adelante)'''
import math
def evaluaedad(edad):
if edad<0:
raise TypeError("La edad no puede ser menor que 0 ")
if edad<20:
return "eres muy jover"
elif edad<40:
return "eres jover"
elif edad<100:
return "Cuidate"
def calcularaiz(num1):
if num1<0:
raise ValueError("El numero no puede ser negativo")
else:
return math.sqrt(num1)
print(evaluaedad(6))
ap1=(int(input("Introcuce un número : ")))
print(calcularaiz(ap1))
print ("Programa terminado")
|
import Player
import random
class Computer(Player.Player):
def __init__(self,health,i):
self.name = 'Computer{}'.format(i)
super().__init__(health,self.name)
rand1 = random.randint(0,1000)
rand2 = random.randint(0,1000)
rand3 = random.randint(0,1000)
t = rand1 + rand2 + rand3
prob1 = rand1/t
prob2 = rand2/t
prob3 = rand3/t
self.rockL = prob1
self.paperL = prob2 + prob1
print("{} {}".format(self.rockL,self.paperL))
def getHand(self):
hand = []
for i in range(2):
x = random.random()
if x <= self.rockL:
hand.append('r')
elif x<= self.paperL:
hand.append('p')
else:
hand.append('s')
print("\t{}: {}".format(self.name,hand))
return hand
def getLimits(self):
return [self.rockL,self.paperL]
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.