text
stringlengths 8
6.05M
|
|---|
import os
import glob
import h5py
import keras
import numpy as np
from tkinter import *
from tkinter import ttk
from PIL import Image,ImageTk
from keras.models import load_model
from keras.preprocessing.image import load_img, img_to_array
from keras.applications.imagenet_utils import preprocess_input
from keras.models import Model
from Name import *
def main():
#获取模型权重h5文件
model = load_model('./logs/easy1.h5')
img_rows = 300 # 高
img_cols = 300 # 宽
for img_location in glob.glob('./data/test/*.png'): # 限制测试图像路径以及图像格式
img = load_img(img_location)
img = img_to_array(img)
#图像处理
img = preprocess_input(np.array(img).reshape(-1,img_cols,img_rows,3))
# out = model.predict(img)
# img = Image.open(img_location)
# img = np.array(img)
img_name = (img_location.strip("face\\")).rstrip(".png")
# img = img.reshape(-1, img_rows, img_cols, 3)
# img = img.astype('float64')
# img /= 255
# intermediate_layer_model = Model(inputs=model.input,
# outputs=model.get_layer('dense_1').output)
# intermediate_output = intermediate_layer_model.predict(img)
# print(intermediate_output)
# print(type(intermediate_output))
# file=open('feature.txt','w')
# file.write(str(intermediate_output)); # 打开test.txt 如果文件不存在,创建该文件。
pre_name = model.predict_classes(img) # 返回预测的标签值
print(pre_name)
pre = model.predict(img)
for i in pre_name:
for j in pre:
name = Name.get(i)
#print(name)
# if name != "Audrey_Landers":
acc = np.max(j) * 100
print("\nPicture name is [%s]\npPredicted as [%s] with [%f%c]\n" %(img_name, name, acc, '%'))
MainFrame = Tk()
MainFrame.title(img_name)
MainFrame.geometry('300x300')
img = Image.open(img_location)
img = ImageTk.PhotoImage(img)
label_img = ttk.Label(MainFrame, image = img)
label_img.pack()
MainFrame.mainloop()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
class Plotter(object):
def __init__(self, glw, hist_brush=(0,255,0,150), zoom_pen=(0,128,0,150), show_title=True):
super(Plotter, self).__init__()
self.glw = glw
self.show_title = show_title
self.init_plots(hist_brush, zoom_pen)
def init_plots(self, hist_brush, zoom_pen):
self.hists = []
self.hist_plots = []
for i in range(2):
plt = self.glw.addPlot(row=0, col=i)
if self.show_title:
plt.setTitle('pol%d hist' % i)
plt.getAxis('left').setStyle(tickTextHeight=5)
plt.setXRange(-128,127)
self.hists.append(plt.plot(stepMode=True, fillLevel=0, brush=hist_brush))
self.hist_plots.append(plt)
self.waves = []
self.wave_plots = []
for i in range(2):
plt = self.glw.addPlot(row=1, col=i)
if self.show_title:
plt.setTitle('pol%d waveform' % i)
plt.setYRange(-128,127)
plt.getAxis('left').setTicks([[(-128, '-128'), (-64, '-64'), (0, '0'), (64, '64'), (128, '128')]])
self.waves.append(plt.plot())
self.wave_plots.append(plt)
self.specs = []
self.spec_plots = []
for i in range(4):
plt = self.glw.addPlot(row=2+i//2, col=i%2)
if self.show_title:
titles = ['AA', 'BB', 'CR', 'CI']
plt.setTitle(titles[i])
plt.showGrid(y=True)
plt.setYRange(0, 32)
plt.getAxis('left').setTicks([[(0, '0'), (8, '2^8'), (16, '2^16'), (24, '2^24'), (32, '2^32')]])
self.specs.append(plt.plot())
self.spec_plots.append(plt)
self.zooms = []
self.spec_vbrs = []
for i in range(4):
plt = self.spec_plots[i]
vbr = pg.ViewBox()
plt.showAxis('right')
plt.scene().addItem(vbr)
axis = plt.getAxis('right')
axis.linkToView(vbr)
vbr.setXLink(plt)
axis.setPen(zoom_pen)
axis.setGrid(False)
if i < 2:
vbr.setYRange(0, 255)
axis.setTicks([[(0, '0'), (64, '64'), (128, '128'), (192, '192'), (256, '256')]])
else:
vbr.setYRange(-128,127)
axis.setTicks([[(-128, '-128'), (-64, '-64'), (0, '0'), (64, '64'), (128, '128')]])
curve = pg.PlotCurveItem(pen=zoom_pen)
vbr.addItem(curve)
plt.getViewBox().sigResized.connect(self.update_view)
self.zooms.append(curve)
self.spec_vbrs.append(vbr)
tickfont = QtGui.QFont()
tickfont.setPointSize(7)
tickwidth = QtGui.QFontMetrics(tickfont).width ('256') + 1
for plt in self.hist_plots + self.wave_plots + self.spec_plots:
plt.getAxis('left').setTickFont(tickfont)
plt.getAxis('right').setTickFont(tickfont)
plt.getAxis('right').setWidth(tickwidth)
plt.getAxis('bottom').setTickFont(tickfont)
def update_view(self, vbl):
for i in range(len(self.spec_plots)):
if vbl is self.spec_plots[i].getViewBox():
vbr = self.spec_vbrs[i]
vbr.setGeometry(vbl.sceneBoundingRect())
vbr.linkedViewChanged(vbl, vbr.XAxis)
break
@staticmethod
def rms(x):
return np.sqrt(x.dot(x) / x.size)
def update_plots(self, adc, spec, bitsel=(1, 1, 1, 1)):
for i in range(2):
self.waves[i].setData(adc[i][0:1024])
# if self.show_title:
# self.wave_plots[i].setTitle('MEAN %6.2f' % np.mean(adc[i]))
y, x = np.histogram(adc[i], adc[i].max() - adc[i].min() + 1)
self.hists[i].setData(x, y)
if self.show_title:
self.hist_plots[i].setTitle('RMS %.2f' % self.rms(adc[i]))
for i in range(2):
self.specs[i].setData(np.log2(np.fabs(spec[i]) + 1))
self.zooms[i].setData((spec[i] >> bitsel[i]*8) & 0xFF)
for i in range(2,4):
self.specs[i].setData(np.log2(np.fabs(spec[i]) + 1))
self.zooms[i].setData((spec[i] >> bitsel[i]*8))
if __name__ == '__main__':
import sys
import time
import struct
import logging
import os.path
import katcp_wrapper
def init_logger():
logname = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(logname)
log.setLevel(logging.DEBUG)
# log.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s %(name)s - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S'))
log.addHandler(handler)
# katcp_wrapper.log.setLevel(logging.DEBUG)
katcp_wrapper.log.setLevel(logging.INFO)
katcp_wrapper.log.addHandler(handler)
return log
def setup_gui(title, width = 640, height = 480):
global app, mw, plotter
mw = QtGui.QWidget()
mw.resize(width, height)
mw.setWindowTitle(title)
vbox = QtGui.QVBoxLayout(mw)
vbox.setContentsMargins(0, 0, 0, 0)
glw = pg.GraphicsLayoutWidget(mw)
vbox.addWidget(glw)
plotter = Plotter(glw, show_title=False)
mw.show()
def split_snapshot(snap):
len = snap['length']
all = struct.unpack('%db'%len, snap['data'])
segments = np.array(all).reshape(-1, 4)
p0 = segments[0::2, :].flatten()
p1 = segments[1::2, :].flatten()
return p0, p1
def plot_anim(unit):
global plotter, fpga
prefix = 'u{:d}_'.format(unit)
adc_name = 'zdok{:d}_scope'.format(unit)
snap = fpga.snapshot_get(adc_name, man_trig=True, man_valid=True)
adc = split_snapshot(snap)
spec = []
stokes = ['AA', 'BB', 'CR', 'CI']
for i in range(4):
scope_name = prefix + 'x4_vacc_scope_' + stokes[i]
snap = fpga.snapshot_get(scope_name, man_valid=True)
spec.append(np.array(struct.unpack('>%di' % (snap['length']/4), snap['data'])))
bitsel = fpga.read_uint('u{:d}_bit_select'.format(unit))
plotter.update_plots(adc, spec, (bitsel & 3, bitsel >> 2 & 3, bitsel >> 4 & 3, bitsel >> 6 & 3))
if len(sys.argv) < 2 or len(sys.argv) > 3:
print(("Usage: {0:s} roach_board [unit]\n" +
"e.g. {0:s} r1745 0").format(os.path.basename(sys.argv[0])))
exit()
log = init_logger()
roach = sys.argv[1]
katcp_port = 7147
unit = int(sys.argv[2]) if len(sys.argv) > 2 else 0
if not unit in (0, 1):
log.critical('Invalid unit number %d', unit)
exit()
try:
log.info('Connecting to server %s on port %i ... ' % (roach, katcp_port))
fpga = katcp_wrapper.FpgaClient(roach, katcp_port, timeout=1)
time.sleep(0.1)
if not fpga.is_connected():
log.error('ERROR connecting to server %s on port %i.\n' % (roach, katcp_port))
exit()
app = QtGui.QApplication([])
setup_gui(roach + ' - {:d}'.format(unit))
plot_anim(unit)
timer = QtCore.QTimer()
timer.timeout.connect(lambda: plot_anim(unit))
timer.start(1000)
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
finally:
if fpga:
fpga.stop()
|
"""Docker Sproc
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import socket
import sys
import time
import click
import docker
import six
from treadmill import cli
from treadmill import exc
from treadmill import utils
from treadmill import supervisor
from treadmill.appcfg import abort as app_abort
_LOGGER = logging.getLogger(__name__)
def _read_environ(envdirs):
"""Read a list of environ directories and return a full envrion ``dict``.
:returns:
``dict`` - Environ dictionary.
"""
environ = {}
for envdir in envdirs:
environ.update(supervisor.read_environ_dir(envdir))
return environ
def _get_image_user(image_attrs):
"""User is in Config data
"""
config = image_attrs.get('Config', {})
return config.get('User', None)
def _create_container(client, name, image_name, cmd, **args):
"""Create docker container from given app.
"""
# if success, pull returns an image object
image = client.images.pull(image_name)
container_args = {
'name': name,
'image': image_name,
'command': list(cmd),
'detach': True,
'stdin_open': True,
'tty': True,
'network_mode': 'host',
'pid_mode': 'host',
'ipc_mode': 'host',
# XXX: uts mode not supported by python lib yet
# 'uts_mode': 'host',
}
# assign user argument
user = _get_image_user(image.attrs)
if user is None or user == '':
uid = os.getuid()
gid = os.getgid()
container_args['user'] = '{}:{}'.format(uid, gid)
# add additonal container args
for key, value in six.iteritems(args):
if value is not None:
container_args[key] = value
try:
# The container might exist already
# TODO: start existing container with different ports
container = client.containers.get(name)
container.remove(force=True)
except docker.errors.NotFound:
pass
_LOGGER.info('Run docker: %r', container_args)
return client.containers.create(**container_args)
def _transform_volumes(volumes):
"""Transform volume mapping from list to dict regconized by docker lib
"""
dict_volume = {}
for volume in volumes:
# Example format:
# /var/tmp:/dest_var_tmp:rw => {
# /var/tmp': {
# 'bind': '/dest_var_tmp',
# 'mode': 'rw
# }
# }
(target, source, mode) = volume.split(':', 2)
dict_volume[target] = {'bind': source, 'mode': mode}
return dict_volume
class DockerSprocClient(object):
"""Docker Treadmill Sproc client
"""
__slots__ = (
'client',
'param',
'tm_env',
)
def __init__(self, param=None):
self.client = None
if param is None:
self.param = {}
else:
self.param = param
# wait for dockerd ready
time.sleep(1)
def _get_client(self):
"""Gets the docker client.
"""
if self.client is not None:
return self.client
self.client = docker.from_env(**self.param)
return self.client
def run(self, name, image, cmd, **args):
"""Run
"""
client = self._get_client()
try:
if 'volumes' in args:
args['volumes'] = _transform_volumes(args['volumes'])
if 'envdirs' in args:
args['environment'] = _read_environ(args.pop('envdirs'))
container = _create_container(
client, name, image, cmd, **args
)
except docker.errors.ImageNotFound:
raise exc.ContainerSetupError(
'Image {0} was not found'.format(image),
app_abort.AbortedReason.IMAGE
)
container.start()
container.reload()
logs_gen = container.logs(
stdout=True,
stderr=True,
stream=True,
follow=True
)
_LOGGER.info('Container %s is running', name)
while container.status == 'running':
try:
for log_lines in logs_gen:
sys.stderr.write(log_lines)
except socket.error:
pass
container.reload()
rc = container.wait()
if os.WIFSIGNALED(rc):
# Process died with a signal in docker
sig = os.WTERMSIG(rc)
os.kill(os.getpid(), sig)
else:
utils.sys_exit(os.WEXITSTATUS(rc))
def init():
"""Top level command handler."""
@click.command()
@click.option('--name', required=True, help='name of container')
@click.option('--image', required=True, help='container image')
@click.argument('cmd', nargs=-1)
@click.option('--user', required=False,
help='userid in the form UID:GID')
@click.option('--envdirs', type=cli.LIST, required=False, default='',
help='List of environ directory to pass into the container.')
@click.option('--read-only', is_flag=True, default=False,
help='Mount the docker image read-only')
@click.option('--volume', multiple=True, required=False,
help='Specify each volume as TARGET:SOURCE:MODE')
def configure(name, image, cmd, user, envdirs, read_only, volume):
"""Configure local manifest and schedule app to run."""
service_client = DockerSprocClient()
service_client.run(
# manditory parameters
name, image, cmd,
# optional parameters
user=user,
envdirs=envdirs,
read_only=read_only,
volumes=volume,
)
return configure
|
import logging
import sys
import inject
import datetime
sys.path.insert(0,'../../../python')
from model.config import Config
logging.getLogger().setLevel(logging.DEBUG)
from autobahn.asyncio.wamp import ApplicationSession
from asyncio import coroutine
'''
python3 persist.py dni name lastname city country address genre birthdate residence_city
python3 persist.py 31111120 "Pepe Pipo" "Pompin Pampon" "La Plata" "Argentina" "31 Nro 94" "Masculino" "25/09/1984" "La Plata"
'''
def config_injector(binder):
binder.bind(Config,Config('server-config.cfg'))
inject.configure(config_injector)
config = inject.instance(Config)
class WampMain(ApplicationSession):
def __init__(self,config=None):
logging.debug('instanciando WampMain')
ApplicationSession.__init__(self, config)
@coroutine
def onJoin(self, details):
logging.info("********** PERSISTIR USUARIO **********")
if len(sys.argv) < 10:
sys.exit("Error de parámetros")
birthdate = datetime.datetime.strptime(sys.argv[8], "%d/%m/%Y").date()
user = {
'dni':sys.argv[1],
'name':sys.argv[2],
'lastname':sys.argv[3],
'city':sys.argv[4],
'country':sys.argv[5],
'address':sys.argv[6],
'genre':sys.argv[7],
'birthdate':birthdate,
'residence_city':sys.argv[9],
'version':0
}
userId = yield from self.call('users.persistUser', user)
logging.info(userId)
if __name__ == '__main__':
from autobahn.asyncio.wamp import ApplicationRunner
from autobahn.wamp.serializer import JsonSerializer
url = config.configs['server_url']
realm = config.configs['server_realm']
debug = config.configs['server_debug']
json = JsonSerializer()
runner = ApplicationRunner(url=url,realm=realm,debug=debug, debug_wamp=debug, debug_app=debug, serializers=[json])
runner.run(WampMain)
|
# Generated by Django 3.2.4 on 2021-06-30 22:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='About',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('status', models.BooleanField(default=True)),
('titre', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.FileField(upload_to='')),
],
options={
'verbose_name': 'About',
'verbose_name_plural': 'Abouts',
},
),
migrations.CreateModel(
name='Banner',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('status', models.BooleanField(default=True)),
('titre', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.FileField(upload_to='')),
],
options={
'verbose_name': 'Banner',
'verbose_name_plural': 'Banners',
},
),
migrations.CreateModel(
name='Configuration',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('status', models.BooleanField(default=True)),
('entete_protect', models.CharField(max_length=255)),
('entete_news', models.CharField(max_length=255)),
('entete_newsletter', models.CharField(max_length=255)),
('copyrights', models.CharField(max_length=255)),
],
options={
'verbose_name': 'Configuration',
'verbose_name_plural': 'Configurations',
},
),
migrations.CreateModel(
name='Doctor',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('status', models.BooleanField(default=True)),
('titre', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.FileField(upload_to='')),
('lien', models.URLField()),
],
options={
'verbose_name': 'Doctor',
'verbose_name_plural': 'Doctors',
},
),
migrations.CreateModel(
name='News',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('status', models.BooleanField(default=True)),
('titre', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.FileField(upload_to='')),
],
options={
'verbose_name': 'News',
'verbose_name_plural': 'Newss',
},
),
migrations.CreateModel(
name='Newsletter',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('status', models.BooleanField(default=True)),
('email', models.EmailField(max_length=254)),
],
options={
'verbose_name': 'Newsletter',
'verbose_name_plural': 'Newsletters',
},
),
migrations.CreateModel(
name='Protect',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('status', models.BooleanField(default=True)),
('image', models.FileField(upload_to='')),
],
options={
'verbose_name': 'Protect',
'verbose_name_plural': 'Protects',
},
),
migrations.CreateModel(
name='Website',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('status', models.BooleanField(default=True)),
('description_protect', models.TextField()),
('description_news', models.TextField()),
('phone', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('map', models.CharField(max_length=255)),
],
options={
'verbose_name': 'Website',
'verbose_name_plural': 'Websites',
},
),
migrations.CreateModel(
name='Optionprotect',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('status', models.BooleanField(default=True)),
('nom', models.CharField(max_length=255)),
('protect', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ProtectOption', to='website.protect')),
],
options={
'verbose_name': 'Optionprotect',
'verbose_name_plural': 'Optionprotects',
},
),
]
|
import sys, sqlite3, os.path
from zipfile import ZipFile
if __name__ == '__main__':
args = sys.argv
def result(query):
with open('files-part1.txt', 'a') as output:
res = cur.execute(query)
for row in res:
output.write(str(row) + '\n')
#print (res.description)
directory = args[1]
with ZipFile(directory + '.zip', 'r') as zipObj:
stufflist = zipObj.namelist()
directories = []
onlyfiles = []
extensions = []
for item in stufflist:
temp = item.split('/')
fname = temp[-1]
check = list(fname)
ftemp = fname.split('.')
if fname.startswith('.') or fname.startswith('_'):
continue
elif len(ftemp) >= 2:
ext = ftemp[-1]
else:
ext = 'None'
#print(full, item)
temp2 = temp[0:len(temp)-1]
#path = currentdir + '/' + '/'.join(temp2)
path = '\\'.join(temp2)
extensions.append(ext)
onlyfiles.append(fname)
directories.append(path)
#print(path)
#print(onlyfiles[-7], 'and ', directories[-7], 'and ', extensions[-7])
if os.path.exists('filesdb'):
os.remove('filesdb')
conn = sqlite3.connect('filesdb')
cur = conn.cursor()
cur.execute("create table files (ext text, path text, fname text)")
for i in range(len(onlyfiles) - 1):
statement = "insert into files values(?, ?, ?)"
cur.execute(statement, (extensions[i], directories[i], onlyfiles[i]))
results = result('select * from files')
conn.commit()
conn.close()
|
# import nbformat
import os
import os.path
from jupyter_core.paths import jupyter_path
from lxml import html
from nbconvert.exporters.html import HTMLExporter
from nbconvert.filters.markdown_mistune import markdown2html_mistune as markdown2html
from traitlets.config import Config
class NBSGHTMLExporter(HTMLExporter):
export_from_notebook = "NBSG HTML"
template_name = "nbteach_html"
extra_template_basedirs = [os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates")]
def from_notebook_node(self, nb, resources=None, **kw):
for cell in nb.cells:
self.preprocess_multiple_choice(cell)
return super(NBSGHTMLExporter, self).from_notebook_node(nb, resources)
def preprocess_multiple_choice(self, cell):
if not self.is_multiple_choice(cell):
return
response = cell.metadata.nbsimplegrader.response
if response is None:
return
source_html = markdown2html(cell.source.strip())
wrapped = html.fragment_fromstring(source_html, create_parent='div')
element = wrapped.xpath(f'.//ul/li[{int(response)}]')[0]
element.set("class", " nbteach__mc--chosen")
wrapped_string = html.tostring(wrapped).decode()
cell.source = wrapped_string[5:-7]
def is_multiple_choice(self, cell):
return "nbsimplegrader" in cell.metadata and cell.metadata.nbsimplegrader.type == "multiple-choice"
|
# -*- coding: utf-8 -*-
class Solution:
def partitionLabels(self, S):
last_occurrences = {c: i for i, c in enumerate(S)}
result = []
current_first, current_last = 0, 0
for i, c in enumerate(S):
current_last = max(current_last, last_occurrences[c])
if i == current_last:
result.append(i - current_first + 1)
current_first = i + 1
return result
if __name__ == "__main__":
solution = Solution()
assert [9, 7, 8] == solution.partitionLabels("ababcbacadefegdehijhklij")
|
#!/bin/python3
import sys
if len(sys.argv) != 3:
print("Usage: ", sys.argv[0], "dump-file", "dest-dir")
exit(0)
f = open(sys.argv[1], "rb")
heap = f.read()
f.close()
keys = 0
key_path = sys.argv[2] + "/key_"
for off in range((len(heap)//16) - 1):
display = True
for i in range(31):
display &= (heap[(off*0x10)+i] != heap[(off*0x10)+i+1])
if display:
keys += 1
s = ''
s += "{0:08x}".format(off*0x10) + " "
for i in range(32):
if i == 8:
s += ' '
elif i == 16:
s += '\n'
s += "{0:08x}".format(off*0x10+16) + " "
elif i == 24:
s += ' '
s += "{0:02x}".format(heap[(off*0x10)+i]) + ' '
s = s[:-1]
print(s, end='\n\n')
output = open(key_path + "{0:02d}".format(keys), "wb")
output.write(heap[(off*0x10):(off*0x10)+0x20])
output.close()
print("Found", keys, "keys !")
|
__winc_id__ = 'ae539110d03e49ea8738fd413ac44ba8'
__human_name__ = 'files'
def main():
#----------------------------------------------------------------------
#Question 1
def clean_cache():
import os.path, os, shutil
current_directory = str(os.getcwd())
cache_folder_in_directory = current_directory + '\\cache'
cache_folder_exists = os.path.exists(cache_folder_in_directory)
if cache_folder_exists == True:
shutil.rmtree(cache_folder_in_directory)
os.mkdir(cache_folder_in_directory)
#Question 2
def cache_zip(path_to_zip: str, path_to_cache_dir: str):
from zipfile import ZipFile
with ZipFile(path_to_zip, 'r') as zip_to_unpack:
zip_to_unpack.extractall(path_to_cache_dir)
#Question 3
def cached_files():
import os, os.path
cache_folder = os.getcwd() + '\\cache'
list_of_files_in_cache = os.scandir(cache_folder)
list_of_absolute_paths = []
for file in list_of_files_in_cache:
absolute_path = os.path.abspath(file)
list_of_absolute_paths.append(absolute_path)
return list_of_absolute_paths
#Question 4
def find_password(list_of_file_paths: list):
for text_file in list_of_file_paths:
with open(text_file, 'r') as f:
lines = f.readlines()
for line in lines:
if 'password' in line:
line_with_password = line[line.find(' ')+1:-1]
break
return line_with_password
#-----------------------------------------------------------------------
if __name__ == '__main__':
main()
|
# -*-coding=utf-8-*-
__author__ = 'rocchen'
from lxml import html
from lxml import etree
import urllib2, requests
def lxml_test():
url = "http://www.caixunzz.com"
req = urllib2.Request(url=url)
resp = urllib2.urlopen(req)
#print(resp.read())
'''
parse_body=html.fromstring(resp.read())
href=parse_body.xpath('//a[@class="label"]/@href')
print(href)
#not working from above
'''
tree = etree.HTML(resp.read())
href = tree.xpath('//a[@class="label"]/@href')
#print(href.tag)
for i in href:
#print(html.tostring(i))
#print(type(i))
print(i)
print(type(href))
#not working yet
session = requests.session()
import cookielib
session.cookies = cookielib.LWPCookieJar(filename="cookies")
agent = 'Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0'
headers = {'Host': 'www.zhihu.com',
'Referer': 'https://www.zhihu.com',
'User-Agent': agent}
def lxml_text():
url = 'https://www.zhihu.com/question/20401952/answer/21764432'
s = requests.get(url, headers=headers).text
#print(s)
tree = etree.HTML(s)
content = tree.xpath('//div[@class="zm-editable-content clearfix"]')
print(content)
for i in content:
print(i.xpath('string(.)'))
def lxml_case():
r=requests.get('http://30daydo.com')
t=etree.HTML(r.text)
s=t.xpath('//div[@class="aw-item article"]')
print(s)
#lxml_text()
#lxml_case()
def lxml_case2():
#网站的例子
str1='''
<bookstore>
<book>
<title>Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book>
</bookstore>
'''
tree=etree.HTML(str1)
t1=tree.xpath('bookstore')
print(t1)
def lxml_case3():
text = '''
<div>
<ul>
<li class="item-0"><a href="link1.html">first item</a></li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-inactive"><a href="link3.html">third item><span>Hello world</span></a></li>
<li class="item-1"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a>
<li class="de-item-0"><a href="link5.html">fifth item</a>
</ul>
</div>
'''
tree=etree.HTML(text)
html_s=etree.tostring(tree)
print(html_s)
#print(tree.xpath('//li//span/text()')[0])
'''
reg_case=tree.xpath('//*[starts-with(@class,"item")]')
for i in reg_case:
print(i.xpath('.//a/@href'))
'''
result=tree.xpath(r'//*[re:match(@class, "item-0")]')
print(result)
for i in result[0]:
print(i.xpath('.//a/@href'))
lxml_case3()
|
# coding=utf-8
import os
import argparse
import numpy as np
from PIL import Image, ImageDraw, ImageOps
import cv2
import json
import pickle
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from torchvision.utils import save_image
class VtonDataset(data.Dataset):
"""
仮想試着用データセットクラス
"""
def __init__(self, args, root_dir, datamode = "train", pair_list_path = "train_pairs.csv" ):
super(VtonDataset, self).__init__()
self.args = args
# RGB 画像に対する transform : [-1,1]
self.transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] )
#self.transform = transforms.Compose( [ transforms.ToTensor(), transforms.Lambda(lambda x: (x*2)-1.0) ] )
# マスク画像に対する transform : [0,1]
self.transform_mask = transforms.Compose( [ transforms.ToTensor(), ] )
self.transform_mask_wNorm = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)) ] )
self.image_height = self.args.image_height
self.image_width = self.args.image_width
self.dataset_dir = os.path.join( root_dir, datamode )
self.cloth_names = []
self.poseA_names = []
self.poseB_names = []
with open( pair_list_path, "r" ) as f:
for line in f.readlines():
names = line.strip().split(",")
poseA_name = names[0]
poseB_name = names[1]
cloth_name = names[2]
self.poseA_names.append(poseA_name)
self.poseB_names.append(poseB_name)
self.cloth_names.append(cloth_name)
if( self.args.debug ):
print( "self.dataset_dir :", self.dataset_dir)
print( "self.poseA_names[0:5] :", self.poseA_names[0:5])
print( "self.poseB_names[0:5] :", self.poseB_names[0:5])
print( "self.cloth_names[0:5] :", self.cloth_names[0:5])
print( "len(self.poseA_names) :", len(self.poseA_names))
print( "len(self.poseB_names) :", len(self.poseB_names))
print( "len(self.cloth_names) :", len(self.cloth_names))
def __len__(self):
return len(self.cloth_names)
def get_cloth_part( self, parsing_img, pose_tsr ):
"""
人物パース画像から正解服(人物画像における服部分)のテンソルを取得
"""
parsing_np = np.array(parsing_img)
# 正解服
cloth_pos = [5,6,7]
cloth_mask = np.zeros(parsing_np.shape).astype(np.float32)
for pos in cloth_pos:
cloth_mask += (parsing_np == pos).astype(np.float32)
cloth_mask_tsr = torch.from_numpy(cloth_mask)
cloth_tsr = pose_tsr * cloth_mask_tsr + (1 - cloth_mask_tsr)
cloth_mask_tsr = cloth_mask_tsr.view(1, self.image_height, self.image_width)
return cloth_tsr, cloth_mask_tsr
def get_body_shape( self, parsing_img, downsampling_size = 16 ):
"""
人物パース画像からダウンサンプリングでぼかした BodyShape のテンソルを取得
"""
parsing_np = np.array(parsing_img)
bodyshape_mask_np = (parsing_np > 0).astype(np.float32)
bodyshape_mask_img = Image.fromarray((bodyshape_mask_np*255).astype(np.uint8))
bodyshape_mask_img = bodyshape_mask_img.resize((self.image_width // downsampling_size, self.image_height // downsampling_size), Image.BILINEAR)
bodyshape_mask_img = bodyshape_mask_img.resize((self.image_width, self.image_height), Image.BILINEAR)
bodyshape_mask_tsr = self.transform_mask_wNorm(bodyshape_mask_img)
return bodyshape_mask_tsr
def get_agnotic( self, parsing_img, pose_tsr, agnostic_type ):
"""
人物パース画像から agnotic 形状のテンソルを取得する
"""
parsing_np = np.array(parsing_img)
if( agnostic_type == "agnostic1" ):
# 顔のみあり
gmm_agnostic_pos = [1,2,4,13]
elif( agnostic_type == "agnostic2" ):
# 顔あり+首なし+腕(長袖の場合手のみ)あり+下半身あり
gmm_agnostic_pos = [1,2,3,4,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27]
elif( agnostic_type == "agnostic3" ):
# 顔あり+首なし+下半身あり
gmm_agnostic_pos = [1,2,4,8,9,12,13,16,17,18,19]
else:
gmm_agnostic_pos = [1,2,4,13]
gmm_agnostic_mask = np.zeros(parsing_np.shape).astype(np.float32)
for pos in gmm_agnostic_pos:
gmm_agnostic_mask += (parsing_np == pos).astype(np.float32)
gmm_agnostic_mask_tsr = torch.from_numpy(gmm_agnostic_mask)
gmm_agnostic_tsr = pose_tsr * gmm_agnostic_mask_tsr - (1 - gmm_agnostic_mask_tsr)
return gmm_agnostic_tsr
def get_keypoints( self, pose_keypoints_dir, pose_name ):
"""
姿勢情報の keypoints のテンソルを取得する
"""
if( os.path.exists(os.path.join(self.dataset_dir, pose_keypoints_dir, pose_name.replace(".jpg",".png").replace(".png","_keypoints.json")) ) ):
format = "json"
elif( os.path.exists(os.path.join(self.dataset_dir, pose_keypoints_dir, pose_name.replace(".jpg",".png").replace(".png",".pkl"))) ):
format = "pkl"
else:
format = "json"
if( format == "pkl" ):
# pkl ファイルから pose keypoints の座標値を取得
keypoints_dat = - np.ones((18, 2), dtype=int) # keypoints の x,y 座標値
with open(os.path.join(self.dataset_dir, pose_keypoints_dir, pose_name.replace(".jpg",".png").replace(".png",".pkl") ), 'rb') as f:
pose_label = pickle.load(f)
for i in range(18):
if pose_label['subset'][0, i] != -1:
keypoints_dat[i, :] = pose_label['candidate'][int(pose_label['subset'][0, i]), :2]
keypoints_dat = np.asarray(keypoints_dat)
else:
with open(os.path.join(self.dataset_dir, pose_keypoints_dir, pose_name.replace(".jpg",".png").replace(".png","_keypoints.json") ), 'rb') as f:
pose_label = json.load(f)
keypoints_dat = pose_label['people'][0]['pose_keypoints_2d']
#keypoints_dat = pose_label['people'][0]['pose_keypoints']
keypoints_dat = np.array(keypoints_dat)
if( self.args.debug ):
print( "keypoints_dat.shape : ", keypoints_dat.shape ) # shape = (54,)
# OpenPose からの keypoints フォーマット異常時の処理追加
if( keypoints_dat.shape[0] >= 3 ):
keypoints_dat = keypoints_dat.reshape((-1,3))
if( self.args.debug ):
print( "keypoints_dat.shape : ", keypoints_dat.shape ) # shape = (18, 3)
else:
keypoints_dat = np.zeros( (18, 3) )
print( "[Waring] pose_keypoints_2d format is not correct from OpenPose, set zero tensor (18,3) for avoiding error." )
# ネットワークに concat して入力するための keypoints テンソルと visualation 用のテンソルを作成
point_num = 18
if( self.image_height == 256 ):
r = 5
elif( self.image_height == 512 ):
r = 10
elif( self.image_height == 1024 ):
r = 20
else:
r = 5
pose_keypoints_tsr = torch.zeros(point_num, self.image_height, self.image_width) # ネットワークに concat して入力するための keypoints テンソル
pose_keypoints_img = Image.new('L', (self.image_width, self.image_height)) # 画像としての keypoints
pose_keypoints_img_draw = ImageDraw.Draw(pose_keypoints_img) #
for i in range(point_num):
one_map = Image.new('L', (self.image_width, self.image_height))
draw = ImageDraw.Draw(one_map)
point_x = keypoints_dat[i, 0]
point_y = keypoints_dat[i, 1]
if( format == "pkl" ):
point_x = point_x * self.image_width / 762
point_y = point_y * self.image_height / 1000
if point_x > 1 and point_y > 1:
draw.rectangle((point_x - r, point_y - r, point_x + r, point_y + r), 'white', 'white')
pose_keypoints_img_draw.rectangle((point_x - r, point_y - r, point_x + r, point_y + r), 'white', 'white')
one_map = self.transform_mask_wNorm(one_map)
pose_keypoints_tsr[i] = one_map[0]
pose_keypoints_img_tsr = self.transform_mask_wNorm(pose_keypoints_img) # 画像としての keypoints のテンソル
return pose_keypoints_tsr, pose_keypoints_img_tsr
def get_tom_wuton_agnotic( self, parsing_img, pose_img ):
"""
WUTON 形式の agnotic 形式のテンソルを取得する
"""
parsing_np = np.array(parsing_img)
tom_agnostic_wErase_pos = [5, 6, 7, 10]
tom_agnostic_woErase_pos = [1, 2, 3, 4, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]
pose_np = np.array(pose_img).astype(np.float32) / 255 # 0.0f ~ 1.0f
# 背景
pose_agnotic_bg_np = (parsing_np == 0).astype(np.float32)
pose_agnotic_bg_np_RGB = np.zeros( (pose_agnotic_bg_np.shape[0], pose_agnotic_bg_np.shape[1], 3) ).astype(np.float32)
pose_agnotic_bg_np_RGB[:,:,0], pose_agnotic_bg_np_RGB[:,:,1], pose_agnotic_bg_np_RGB[:,:,2] = pose_agnotic_bg_np, pose_agnotic_bg_np, pose_agnotic_bg_np
pose_agnotic_np = (pose_np * pose_agnotic_bg_np_RGB)
#save_image( torch.from_numpy(pose_agnotic_bg_np), "_debug/pose_agnotic_bg_np.png" )
# 灰色以外の部分
pose_agnotic_woErase_np = np.zeros(parsing_np.shape).astype(np.float32)
for pos in tom_agnostic_woErase_pos:
pose_agnotic_woErase_np += (parsing_np == pos).astype(np.float32)
pose_agnotic_woErase_np_RGB = np.zeros( (pose_agnotic_woErase_np.shape[0], pose_agnotic_woErase_np.shape[1], 3) ).astype(np.float32)
pose_agnotic_woErase_np_RGB[:,:,0], pose_agnotic_woErase_np_RGB[:,:,1], pose_agnotic_woErase_np_RGB[:,:,2] = pose_agnotic_woErase_np, pose_agnotic_woErase_np, pose_agnotic_woErase_np
pose_agnotic_np += (pose_np * pose_agnotic_woErase_np_RGB)
#save_image( torch.from_numpy(pose_agnotic_woErase_np), "_debug/pose_agnotic_woErase_np.png" )
# 灰色部分
pose_agnotic_wErase_np = np.zeros(parsing_np.shape).astype(np.float32)
for pos in tom_agnostic_wErase_pos:
pose_agnotic_wErase_np += (parsing_np == pos).astype(np.float32)
kernel = np.ones((self.args.wuton_agnotic_kernel_size, self.args.wuton_agnotic_kernel_size), np.uint8)
pose_agnotic_wErase_np = cv2.dilate(pose_agnotic_wErase_np, kernel)
pose_agnotic_wErase_np_RGB = np.zeros( (pose_agnotic_wErase_np.shape[0],pose_agnotic_wErase_np.shape[1],3) ).astype(np.float32)
pose_agnotic_wErase_np_RGB[:,:,0], pose_agnotic_wErase_np_RGB[:,:,1], pose_agnotic_wErase_np_RGB[:,:,2] = pose_agnotic_wErase_np, pose_agnotic_wErase_np, pose_agnotic_wErase_np
# 灰色部分の貼り付け
color = 100
pose_agnotic_wErase_img = Image.fromarray(np.uint8(pose_agnotic_wErase_np_RGB*color), mode="RGB").convert('RGB')
pose_agnotic_wErase_mask_img = Image.fromarray(np.uint8(pose_agnotic_wErase_np * 255), mode="L").convert("L")
#pose_agnotic_wErase_img.save( "_debug/pose_agnotic_wErase_img.png" )
#pose_agnotic_wErase_mask_img.save( "_debug/pose_agnotic_wErase_mask_img.png" )
pose_agnotic_img = Image.fromarray(np.uint8(pose_agnotic_np*255), mode="RGB").convert('RGB') # 0 ~ 255
pose_agnotic_img = Image.composite(pose_agnotic_wErase_img, pose_agnotic_img, pose_agnotic_wErase_mask_img)
# 顔部分の貼り付け
face_pos = [1, 2, 4, 13]
pose_agnotic_face_np = np.zeros(parsing_np.shape).astype(np.float32)
for pos in face_pos:
pose_agnotic_face_np += (parsing_np == pos).astype(np.float32)
pose_agnotic_face_np_RGB = np.zeros( (pose_agnotic_face_np.shape[0],pose_agnotic_face_np.shape[1],3) ).astype(np.float32)
pose_agnotic_face_np_RGB[:,:,0], pose_agnotic_face_np_RGB[:,:,1], pose_agnotic_face_np_RGB[:,:,2] = pose_agnotic_face_np, pose_agnotic_face_np, pose_agnotic_face_np
pose_agnotic_face_np_RGB = ( pose_np * pose_agnotic_face_np_RGB ) * 255
pose_agnotic_face_img = Image.fromarray(np.uint8(pose_agnotic_face_np_RGB), mode="RGB").convert('RGB')
pose_agnotic_face_mask_img = Image.fromarray(np.uint8(pose_agnotic_face_np*255), mode="L").convert("L")
pose_agnotic_img = Image.composite(pose_agnotic_face_img, pose_agnotic_img, pose_agnotic_face_mask_img)
#pose_agnotic_img.save( "_debug/pose_agnotic_img.png" )
# 反転画像
pose_agnotic_woErase_mask_img = ImageOps.invert(pose_agnotic_wErase_mask_img)
# Tensor 型に cast して正規化
pose_wuton_agnotic_tsr = self.transform(pose_agnotic_img)
pose_wuton_agnotic_woErase_mask_tsr = self.transform_mask(pose_agnotic_woErase_mask_img)
return pose_wuton_agnotic_tsr, pose_wuton_agnotic_woErase_mask_tsr
def __getitem__(self, index):
cloth_name = self.cloth_names[index]
poseA_name = self.poseA_names[index]
poseB_name = self.poseB_names[index]
#---------------------
# cloth
#---------------------
cloth_img = Image.open( os.path.join(self.dataset_dir, "cloth", cloth_name) ).convert("RGB")
cloth_tsr = self.transform(cloth_img)
#---------------------
# cloth mask
#---------------------
cloth_mask_img = Image.open( os.path.join(self.dataset_dir, "cloth_mask", cloth_name) ).convert("L")
cloth_mask_tsr = self.transform_mask(cloth_mask_img)
#---------------------
# pose img
#---------------------
poseA_img = Image.open( os.path.join(self.dataset_dir, "poseA", poseA_name) ).convert("RGB")
poseA_tsr = self.transform(poseA_img)
poseB_img = Image.open( os.path.join(self.dataset_dir, "poseB", poseA_name) ).convert("RGB")
poseB_tsr = self.transform(poseB_img)
#---------------------
# pose parsing
#---------------------
poseA_parsing_img = Image.open( os.path.join(self.dataset_dir, "poseA_parsing", poseA_name) ).convert("L")
poseA_parsing_tsr = self.transform_mask(poseA_parsing_img)
poseB_parsing_img = Image.open( os.path.join(self.dataset_dir, "poseB_parsing", poseA_name) ).convert("L")
poseB_parsing_tsr = self.transform_mask(poseB_parsing_img)
# 正解服
poseA_cloth_tsr, poseA_cloth_mask_tsr = self.get_cloth_part( poseA_parsing_img, poseA_tsr )
poseB_cloth_tsr, poseB_cloth_mask_tsr = self.get_cloth_part( poseB_parsing_img, poseB_tsr )
# BodyShape
poseA_bodyshape_mask_tsr = self.get_body_shape( poseA_parsing_img, self.args.poseA_bodyshape_downsampling_size )
poseB_bodyshape_mask_tsr = self.get_body_shape( poseB_parsing_img, self.args.poseB_bodyshape_downsampling_size )
# GMM agnostic の形状
poseA_gmm_agnostic_tsr = self.get_agnotic( poseA_parsing_img, poseA_tsr, self.args.gmm_agnostic_type )
poseB_gmm_agnostic_tsr = self.get_agnotic( poseB_parsing_img, poseB_tsr, self.args.gmm_agnostic_type )
poseA_tom_agnostic_tsr = self.get_agnotic( poseA_parsing_img, poseA_tsr, self.args.tom_agnostic_type )
poseB_tom_agnostic_tsr = self.get_agnotic( poseB_parsing_img, poseB_tsr, self.args.tom_agnostic_type )
#---------------------
# pose keypoints
#---------------------
poseA_keypoints_tsr, poseA_keypoints_img_tsr = self.get_keypoints( "poseA_keypoints", poseA_name )
poseB_keypoints_tsr, poseB_keypoints_img_tsr = self.get_keypoints( "poseB_keypoints", poseB_name )
#---------------------
# pose human identity (TOM agnotic)
#---------------------
poseA_wuton_agnotic_tsr, poseA_wuton_agnotic_woErase_mask_tsr = self.get_tom_wuton_agnotic( poseA_parsing_img, poseA_img )
poseB_wuton_agnotic_tsr, poseB_wuton_agnotic_woErase_mask_tsr = self.get_tom_wuton_agnotic( poseB_parsing_img, poseB_img )
#---------------------
# Grid image
#---------------------
grid_img = Image.open('grid.png')
grid_tsr = self.transform(grid_img)
results_dict = {
"cloth_name" : cloth_name, # 服のファイル名
"poseA_name" : poseA_name, # 人物画像(ポーズA)のファイル名
"poseB_name" : poseB_name, # 人物画像(ポーズB)のファイル名
"cloth_tsr" : cloth_tsr, # 服画像
"cloth_mask_tsr" : cloth_mask_tsr, # 服マスク画像
"poseA_tsr" : poseA_tsr, # 人物(ポーズA)の人物画像
"poseA_parsing_tsr" : poseA_parsing_tsr, # 人物(ポーズA)の人物パース画像
"poseA_cloth_tsr" : poseA_cloth_tsr, # 人物(ポーズA)の正解服
"poseA_cloth_mask_tsr" : poseA_cloth_mask_tsr, # 人物(ポーズA)の正解服のマスク画像
"poseA_bodyshape_mask_tsr" : poseA_bodyshape_mask_tsr, # 人物(ポーズA)のダウンサンプリング後の BodyShape
"poseA_gmm_agnostic_tsr" : poseA_gmm_agnostic_tsr, # 人物(ポーズA)の GMM に入力する agnotic 形状
"poseA_tom_agnostic_tsr" : poseA_tom_agnostic_tsr, # 人物(ポーズA)の TOM に入力する agnotic 形状
"poseA_keypoints_tsr" : poseA_keypoints_tsr, # 人物(ポーズA)のネットワークに入力する keypoints 情報
"poseA_keypoints_img_tsr" : poseA_keypoints_img_tsr, # 人物(ポーズA)の表示用の keypoints 情報
"poseA_wuton_agnotic_tsr" : poseA_wuton_agnotic_tsr, # 人物(ポーズA)の WUTON 形式の agnotic 画像
"poseA_wuton_agnotic_woErase_mask_tsr" : poseA_wuton_agnotic_woErase_mask_tsr, # 人物(ポーズA)の WUTON 形式の agnotic 画像の灰色部分以外のマスク画像
"poseB_tsr" : poseB_tsr, # 人物(ポーズB)の人物画像
"poseB_parsing_tsr" : poseB_parsing_tsr, # 人物(ポーズB)の人物パース画像
"poseB_cloth_tsr" : poseB_cloth_tsr, # 人物(ポーズB)の正解服
"poseB_cloth_mask_tsr" : poseB_cloth_mask_tsr, # 人物(ポーズB)の正解服のマスク画像
"poseB_bodyshape_mask_tsr" : poseB_bodyshape_mask_tsr, # 人物(ポーズB)のダウンサンプリング後の BodyShape
"poseB_gmm_agnostic_tsr" : poseB_gmm_agnostic_tsr, # 人物(ポーズB)の GMM に入力する agnotic 形状
"poseB_tom_agnostic_tsr" : poseB_tom_agnostic_tsr, # 人物(ポーズA)の TOM に入力する agnotic 形状
"poseB_keypoints_tsr" : poseB_keypoints_tsr, # 人物(ポーズB)のネットワークに入力する keypoints 情報
"poseB_keypoints_img_tsr" : poseB_keypoints_img_tsr, # 人物(ポーズB)の表示用の keypoints 情報
"poseB_wuton_agnotic_tsr" : poseB_wuton_agnotic_tsr, # 人物(ポーズB)の WUTON 形式の agnotic 画像
"poseB_wuton_agnotic_woErase_mask_tsr" : poseB_wuton_agnotic_woErase_mask_tsr, # 人物(ポーズB)の WUTON 形式の agnotic 画像の灰色部分以外のマスク画像
"grid_tsr" : grid_tsr,
}
return results_dict
class VtonDataLoader(object):
def __init__(self, dataset, batch_size = 1, shuffle = True):
super(VtonDataLoader, self).__init__()
self.data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle
)
self.dataset = dataset
self.batch_size = batch_size
self.data_iter = self.data_loader.__iter__()
def next_batch(self):
try:
batch = self.data_iter.__next__()
except StopIteration:
self.data_iter = self.data_loader.__iter__()
batch = self.data_iter.__next__()
return batch
|
from pydub import AudioSegment
import os
import librosa
import matplotlib.pyplot as plt
from scipy.io import wavfile
import numpy as np
def gen_spectrumgram(folder, filename):
spec_filename = filename[:len(filename) - 4] + ".png"
samplingFrequency, signalData = wavfile.read(folder + filename)
# Plot the signal read from wav file
window = np.hamming(64)
plt.specgram(signalData, NFFT=64, Fs=samplingFrequency, window=window, noverlap=48, cmap='jet')
plt.axis('off')
frame = plt.gca()
frame.axes.get_yaxis().set_visible(False)
frame.axes.get_xaxis().set_visible(False)
# check directory is existed
directory = folder + 'specgram/'
if not os.path.exists(directory):
os.makedirs(directory)
plt.savefig(directory + spec_filename, bbox_inches="tight")
plt.clf()
def audio_seg(folder, filename, start, end, total):
ds_filename = filename[:len(filename)-4] + "_ds.wav"
digit = filename[len(filename) - 6: len(filename) - 5]
seg_filename = filename[:len(filename) - 4] + "_" + str(digit) + ".wav"
# audio file down sampling
y, s = librosa.load(folder + filename, sr=5000)
librosa.output.write_wav(folder + ds_filename, y, s)
# audio file segmentation
myaudio = AudioSegment.from_file(folder + ds_filename)
a_len = len(myaudio)
start_time = int(a_len * start / total)
end_time = int(a_len * end / total)
word = myaudio[start_time:end_time]
# check directory is existed
directory = folder + 'result/'
if not os.path.exists(directory):
os.makedirs(directory)
word.export(directory + seg_filename, format="wav")
# delete the down sampling file
os.remove(folder + ds_filename)
def seg_wav_files():
filepath = "../dataset/"
foldernames = os.listdir(filepath)
for folder in foldernames:
filenames = os.listdir(filepath + folder)
for filename in filenames:
if len(filename) > 5 and filename[-4:] == ".wav":
align_filename = filepath + folder + '/align/' + filename[:len(filename) - 4] + '.align'
print(align_filename)
# read the align file
try:
f = open(align_filename, 'r')
content = f.readlines()
digits = content[5].split(" ")
last = content[7].split(" ")
start = digits[0]
end = digits[1]
total = last[1]
print("%s - %s - %s" % (start, end, total))
print(filepath + folder + '/')
audio_seg(filepath + folder + '/', filename, int(start), int(end), int(total))
f.close()
digit = filename[len(filename) - 6: len(filename) - 5]
seg_filename = filename[:len(filename) - 4] + "_" + str(digit) + ".wav"
gen_spectrumgram(filepath + folder + '/result/', seg_filename)
except IOError as err:
print('File Error:' + str(err))
print(filepath + folder + '/' + filename)
if __name__ == '__main__':
seg_wav_files()
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the robustness and resiliency of vtworkers."""
from collections import namedtuple
import urllib
import urllib2
import logging
import unittest
from vtdb import keyrange_constants
import base_sharding
import environment
import tablet
import utils
KEYSPACE_ID_TYPE = keyrange_constants.KIT_UINT64
class ShardTablets(namedtuple('ShardTablets', 'master replicas rdonlys')):
"""ShardTablets is a container for all the tablet.Tablets of a shard.
`master` should be a single Tablet, while `replicas` and `rdonlys` should be
lists of Tablets of the appropriate types.
"""
@property
def all_tablets(self):
"""Returns a list of all the tablets of the shard.
Does not guarantee any ordering on the returned tablets.
Returns:
List of all tablets of the shard.
"""
return [self.master] + self.replicas + self.rdonlys
@property
def replica(self):
"""Returns the first replica Tablet instance for the shard, or None."""
if self.replicas:
return self.replicas[0]
else:
return None
@property
def rdonly(self):
"""Returns the first replica Tablet instance for the shard, or None."""
if self.rdonlys:
return self.rdonlys[0]
else:
return None
def __str__(self):
return """master %s
replicas:
%s
rdonlys:
%s
""" % (self.master,
'\n'.join(' %s' % replica for replica in self.replicas),
'\n'.join(' %s' % rdonly for rdonly in self.rdonlys))
# initial shard, covers everything
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_rdonly1 = tablet.Tablet()
# split shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly1 = tablet.Tablet()
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly1 = tablet.Tablet()
all_shard_tablets = ShardTablets(shard_master, [shard_replica], [shard_rdonly1])
shard_0_tablets = ShardTablets(
shard_0_master, [shard_0_replica], [shard_0_rdonly1])
shard_1_tablets = ShardTablets(
shard_1_master, [shard_1_replica], [shard_1_rdonly1])
def init_keyspace():
"""Creates a `test_keyspace` keyspace with a sharding key."""
utils.run_vtctl(
['CreateKeyspace', '-sharding_column_name', 'keyspace_id',
'-sharding_column_type', KEYSPACE_ID_TYPE, 'test_keyspace'])
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [
shard_master.init_mysql(),
shard_replica.init_mysql(),
shard_rdonly1.init_mysql(),
shard_0_master.init_mysql(),
shard_0_replica.init_mysql(),
shard_0_rdonly1.init_mysql(),
shard_1_master.init_mysql(),
shard_1_replica.init_mysql(),
shard_1_rdonly1.init_mysql(),
]
utils.wait_procs(setup_procs)
init_keyspace()
logging.debug('environment set up with the following shards and tablets:')
logging.debug('=========================================================')
logging.debug('TABLETS: test_keyspace/0:\n%s', all_shard_tablets)
logging.debug('TABLETS: test_keyspace/-80:\n%s', shard_0_tablets)
logging.debug('TABLETS: test_keyspace/80-:\n%s', shard_1_tablets)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [
shard_master.teardown_mysql(),
shard_replica.teardown_mysql(),
shard_rdonly1.teardown_mysql(),
shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_0_rdonly1.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica.teardown_mysql(),
shard_1_rdonly1.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_master.remove_tree()
shard_replica.remove_tree()
shard_rdonly1.remove_tree()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_0_rdonly1.remove_tree()
shard_1_master.remove_tree()
shard_1_replica.remove_tree()
shard_1_rdonly1.remove_tree()
class TestBaseSplitClone(unittest.TestCase, base_sharding.BaseShardingTest):
"""Abstract test base class for testing the SplitClone worker."""
def __init__(self, *args, **kwargs):
super(TestBaseSplitClone, self).__init__(*args, **kwargs)
self.num_insert_rows = utils.options.num_insert_rows
def run_shard_tablets(
self, shard_name, shard_tablets, create_table=True):
"""Handles all the necessary work for initially running a shard's tablets.
This encompasses the following steps:
1. (optional) Create db
2. Starting vttablets and let themselves init them
3. Waiting for the appropriate vttablet state
4. Force reparent to the master tablet
5. RebuildKeyspaceGraph
7. (optional) Running initial schema setup
Args:
shard_name: the name of the shard to start tablets in
shard_tablets: an instance of ShardTablets for the given shard
create_table: boolean, True iff we should create a table on the tablets
"""
# Start tablets.
#
# NOTE: The future master has to be started with type 'replica'.
shard_tablets.master.start_vttablet(
wait_for_state=None, init_tablet_type='replica',
init_keyspace='test_keyspace', init_shard=shard_name,
binlog_use_v3_resharding_mode=False)
for t in shard_tablets.replicas:
t.start_vttablet(
wait_for_state=None, init_tablet_type='replica',
init_keyspace='test_keyspace', init_shard=shard_name,
binlog_use_v3_resharding_mode=False)
for t in shard_tablets.rdonlys:
t.start_vttablet(
wait_for_state=None, init_tablet_type='rdonly',
init_keyspace='test_keyspace', init_shard=shard_name,
binlog_use_v3_resharding_mode=False)
# Block until tablets are up and we can enable replication.
# All tables should be NOT_SERVING until we run InitShardMaster.
for t in shard_tablets.all_tablets:
t.wait_for_vttablet_state('NOT_SERVING')
# Reparent to choose an initial master and enable replication.
utils.run_vtctl(
['InitShardMaster', '-force', 'test_keyspace/%s' % shard_name,
shard_tablets.master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# Enforce a health check instead of waiting for the next periodic one.
# (saves up to 1 second execution time on average)
for t in shard_tablets.replicas:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
for t in shard_tablets.rdonlys:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
# Wait for tablet state to change after starting all tablets. This allows
# us to start all tablets at once, instead of sequentially waiting.
# NOTE: Replication has to be enabled first or the health check will
# set a a replica or rdonly tablet back to NOT_SERVING.
for t in shard_tablets.all_tablets:
t.wait_for_vttablet_state('SERVING')
create_table_sql = (
'create table worker_test('
'id bigint unsigned,'
'msg varchar(64),'
'keyspace_id bigint(20) unsigned not null,'
'primary key (id),'
'index by_msg (msg)'
') Engine=InnoDB'
)
if create_table:
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_sql,
'test_keyspace'],
auto_log=True)
def copy_schema_to_destination_shards(self):
for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'):
utils.run_vtctl(['CopySchemaShard',
'--exclude_tables', 'unrelated',
shard_rdonly1.tablet_alias,
keyspace_shard],
auto_log=True)
def _insert_values(self, vttablet, id_offset, msg, keyspace_id, num_values):
"""Inserts values into MySQL along with the required routing comments.
Args:
vttablet: the Tablet instance to modify.
id_offset: offset for the value of `id` column.
msg: the value of `msg` column.
keyspace_id: the value of `keyspace_id` column.
num_values: number of rows to be inserted.
"""
# For maximum performance, multiple values are inserted in one statement.
# However, when the statements are too long, queries will timeout and
# vttablet will kill them. Therefore, we chunk it into multiple statements.
def chunks(full_list, n):
"""Yield successive n-sized chunks from full_list."""
for i in xrange(0, len(full_list), n):
yield full_list[i:i+n]
max_chunk_size = 100*1000
k = utils.uint64_to_hex(keyspace_id)
for chunk in chunks(range(1, num_values+1), max_chunk_size):
logging.debug('Inserting values for range [%d, %d].', chunk[0], chunk[-1])
values_str = ''
for i in chunk:
if i != chunk[0]:
values_str += ','
values_str += "(%d, '%s', 0x%x)" % (id_offset + i, msg, keyspace_id)
vttablet.mquery(
'vt_test_keyspace', [
'begin',
'insert into worker_test(id, msg, keyspace_id) values%s '
'/* vtgate:: keyspace_id:%s */' % (values_str, k),
'commit'],
write=True)
def insert_values(self, vttablet, num_values, num_shards, offset=0,
keyspace_id_range=2**64):
"""Inserts simple values, one for each potential shard.
Each row is given a message that contains the shard number, so we can easily
verify that the source and destination shards have the same data.
Args:
vttablet: the Tablet instance to modify.
num_values: The number of values to insert.
num_shards: the number of shards that we expect to have.
offset: amount that we should offset the `id`s by. This is useful for
inserting values multiple times.
keyspace_id_range: the number of distinct values that the keyspace id
can have.
"""
shard_width = keyspace_id_range / num_shards
shard_offsets = [i * shard_width for i in xrange(num_shards)]
# TODO(mberlin): Change the "id" column values from the keyspace id to a
# counter starting at 1. The incrementing ids must
# alternate between the two shards. Without this, the
# vtworker chunking won't be well balanced across shards.
for shard_num in xrange(num_shards):
self._insert_values(
vttablet,
shard_offsets[shard_num] + offset,
'msg-shard-%d' % shard_num,
shard_offsets[shard_num],
num_values)
def assert_shard_data_equal(
self, shard_num, source_tablet, destination_tablet):
"""Asserts source and destination tablets have identical shard data.
Args:
shard_num: The shard number of the shard that we want to verify.
source_tablet: Tablet instance of the source shard.
destination_tablet: Tablet instance of the destination shard.
"""
select_query = (
'select * from worker_test where msg="msg-shard-%s" order by id asc' %
shard_num)
# Make sure all the right rows made it from the source to the destination
source_rows = source_tablet.mquery('vt_test_keyspace', select_query)
destination_rows = destination_tablet.mquery(
'vt_test_keyspace', select_query)
self.assertEqual(source_rows, destination_rows)
# Make sure that there are no extra rows on the destination
count_query = 'select count(*) from worker_test'
destination_count = destination_tablet.mquery(
'vt_test_keyspace', count_query)[0][0]
self.assertEqual(destination_count, len(destination_rows))
def run_split_diff(self, keyspace_shard, source_tablets, destination_tablets):
"""Runs a vtworker SplitDiff on the given keyspace/shard.
Sets all former rdonly slaves back to rdonly.
Args:
keyspace_shard: keyspace/shard to run SplitDiff on (string)
source_tablets: ShardTablets instance for the source shard
destination_tablets: ShardTablets instance for the destination shard
"""
_ = source_tablets, destination_tablets
logging.debug('Running vtworker SplitDiff for %s', keyspace_shard)
_, _ = utils.run_vtworker(
['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--min_healthy_rdonly_tablets', '1',
keyspace_shard], auto_log=True)
def setUp(self):
"""Creates shards, starts the tablets, and inserts some data."""
try:
self.run_shard_tablets('0', all_shard_tablets)
# create the split shards
self.run_shard_tablets(
'-80', shard_0_tablets, create_table=False)
self.run_shard_tablets(
'80-', shard_1_tablets, create_table=False)
logging.debug('Start inserting initial data: %s rows',
self.num_insert_rows)
self.insert_values(shard_master, self.num_insert_rows, 2)
logging.debug(
'Done inserting initial data, waiting for replication to catch up')
utils.wait_for_replication_pos(shard_master, shard_rdonly1)
logging.debug('Replication on source rdonly tablet is caught up')
except:
self.tearDown()
raise
def tearDown(self):
"""Does the minimum to reset topology and tablets to their initial states.
When benchmarked, this seemed to take around 30% of the time of
(setupModule + tearDownModule).
FIXME(aaijazi): doing this in parallel greatly reduces the time it takes.
See the kill_tablets method in tablet.py.
"""
for shard_tablet in [all_shard_tablets, shard_0_tablets, shard_1_tablets]:
for t in shard_tablet.all_tablets:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
# _vt.vreplication should be dropped to avoid interference between
# test cases
t.mquery('', 'drop table if exists _vt.vreplication')
t.kill_vttablet()
# we allow failures here as some tablets will be gone sometimes
# (the master tablets after an emergency reparent)
utils.run_vtctl(['DeleteTablet', '-allow_master', t.tablet_alias],
auto_log=True, raise_on_error=False)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
for shard in ['0', '-80', '80-']:
utils.run_vtctl(
['DeleteShard', '-even_if_serving', 'test_keyspace/%s' % shard],
auto_log=True)
class TestBaseSplitCloneResiliency(TestBaseSplitClone):
"""Tests that the SplitClone worker is resilient to particular failures."""
def setUp(self):
try:
super(TestBaseSplitCloneResiliency, self).setUp()
self.copy_schema_to_destination_shards()
except:
self.tearDown()
raise
def verify_successful_worker_copy_with_reparent(self, mysql_down=False):
"""Verifies that vtworker can successfully copy data for a SplitClone.
Order of operations:
1. Run a background vtworker
2. Wait until the worker successfully resolves the destination masters.
3. Reparent the destination tablets
4. Wait until the vtworker copy is finished
5. Verify that the worker was forced to reresolve topology and retry writes
due to the reparent.
6. Verify that the data was copied successfully to both new shards
Args:
mysql_down: boolean. If True, we take down the MySQL instances on the
destination masters at first, then bring them back and reparent away.
Raises:
AssertionError if things didn't go as expected.
"""
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--use_v3_resharding_mode=false'],
auto_log=True)
# --max_tps is only specified to enable the throttler and ensure that the
# code is executed. But the intent here is not to throttle the test, hence
# the rate limit is set very high.
# --chunk_count is 2 because rows are currently ordered by primary key such
# that all rows of the first shard come first and then the second shard.
# TODO(mberlin): Remove --offline=false once vtworker ensures that the
# destination shards are not behind the master's replication
# position.
args = ['SplitClone',
'--offline=false',
'--destination_writer_count', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999']
# Make the clone as slow as necessary such that there is enough time to
# run PlannedReparent in the meantime.
# TODO(mberlin): Once insert_values is fixed to uniformly distribute the
# rows across shards when sorted by primary key, remove
# --chunk_count 2, --min_rows_per_chunk 1 and set
# --source_reader_count back to 1.
args.extend(['--source_reader_count', '2',
'--chunk_count', '2',
'--min_rows_per_chunk', '1',
'--write_query_max_rows', '1'])
args.append('test_keyspace/0')
workerclient_proc = utils.run_vtworker_client_bg(args, worker_rpc_port)
if mysql_down:
# vtworker is blocked at this point. This is a good time to test that its
# throttler server is reacting to RPCs.
self.check_throttler_service('localhost:%d' % worker_rpc_port,
['test_keyspace/-80', 'test_keyspace/80-'],
9999)
utils.poll_for_vars(
'vtworker', worker_port,
'WorkerState == cloning the data (online)',
condition_fn=lambda v: v.get('WorkerState') == 'cloning the'
' data (online)')
logging.debug('Worker is in copy state, Shutting down mysqld on destination masters.')
utils.wait_procs(
[shard_0_master.shutdown_mysql(),
shard_1_master.shutdown_mysql()])
# If MySQL is down, we wait until vtworker retried at least once to make
# sure it reached the point where a write failed due to MySQL being down.
# There should be two retries at least, one for each destination shard.
utils.poll_for_vars(
'vtworker', worker_port,
'WorkerRetryCount >= 2',
condition_fn=lambda v: v.get('WorkerRetryCount') >= 2)
logging.debug('Worker has retried at least once per shard, starting reparent now')
# Bring back masters. Since we test with semi-sync now, we need at least
# one replica for the new master. This test is already quite expensive,
# so we bring back the old master as a replica rather than having a third
# replica up the whole time.
logging.debug('Restarting mysqld on destination masters')
utils.wait_procs(
[shard_0_master.start_mysql(),
shard_1_master.start_mysql()])
# Reparent away from the old masters.
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/-80',
'-new_master', shard_0_replica.tablet_alias], auto_log=True)
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/80-',
'-new_master', shard_1_replica.tablet_alias], auto_log=True)
else:
# NOTE: There is a race condition around this:
# It's possible that the SplitClone vtworker command finishes before the
# PlannedReparentShard vtctl command, which we start below, succeeds.
# Then the test would fail because vtworker did not have to retry.
#
# To workaround this, the test takes a parameter to increase the number of
# rows that the worker has to copy (with the idea being to slow the worker
# down).
# You should choose a value for num_insert_rows, such that this test
# passes for your environment (trial-and-error...)
# Make sure that vtworker got past the point where it picked a master
# for each destination shard ("finding targets" state).
utils.poll_for_vars(
'vtworker', worker_port,
'WorkerState == cloning the data (online)',
condition_fn=lambda v: v.get('WorkerState') == 'cloning the'
' data (online)')
logging.debug('Worker is in copy state, starting reparent now')
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/-80',
'-new_master', shard_0_replica.tablet_alias], auto_log=True)
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/80-',
'-new_master', shard_1_replica.tablet_alias], auto_log=True)
utils.wait_procs([workerclient_proc])
# Verify that we were forced to re-resolve and retry.
worker_vars = utils.get_vars(worker_port)
self.assertGreater(worker_vars['WorkerRetryCount'], 1,
"expected vtworker to retry each of the two reparented"
" destination masters at least once, but it didn't")
self.assertNotEqual(worker_vars['WorkerRetryCount'], {},
"expected vtworker to retry, but it didn't")
utils.kill_sub_process(worker_proc, soft=True)
# Wait for the destination RDONLYs to catch up or the following offline
# clone will try to insert rows which already exist.
# TODO(mberlin): Remove this once SplitClone supports it natively.
utils.wait_for_replication_pos(shard_0_replica, shard_0_rdonly1)
utils.wait_for_replication_pos(shard_1_replica, shard_1_rdonly1)
# Run final offline clone to enable filtered replication.
_, _ = utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitClone',
'--online=false',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'], auto_log=True)
# Make sure that everything is caught up to the same replication point
self.run_split_diff('test_keyspace/-80', all_shard_tablets, shard_0_tablets)
self.run_split_diff('test_keyspace/80-', all_shard_tablets, shard_1_tablets)
self.assert_shard_data_equal(0, shard_master, shard_0_tablets.replica)
self.assert_shard_data_equal(1, shard_master, shard_1_tablets.replica)
class TestReparentDuringWorkerCopy(TestBaseSplitCloneResiliency):
def __init__(self, *args, **kwargs):
super(TestReparentDuringWorkerCopy, self).__init__(*args, **kwargs)
self.num_insert_rows = utils.options.num_insert_rows_before_reparent_test
def test_reparent_during_worker_copy(self):
"""Simulates a destination reparent during a worker SplitClone copy.
The SplitClone command should be able to gracefully handle the reparent and
end up with the correct data on the destination.
Note: this test has a small possibility of flaking, due to the timing issues
involved. It's possible for the worker to finish the copy step before the
reparent succeeds, in which case there are assertions that will fail. This
seems better than having the test silently pass.
"""
self.verify_successful_worker_copy_with_reparent()
class TestMysqlDownDuringWorkerCopy(TestBaseSplitCloneResiliency):
def test_mysql_down_during_worker_copy(self):
"""This test simulates MySQL being down on the destination masters."""
self.verify_successful_worker_copy_with_reparent(mysql_down=True)
class TestVtworkerWebinterface(unittest.TestCase):
def setUp(self):
# Run vtworker without any optional arguments to start in interactive mode.
self.worker_proc, self.worker_port, _ = utils.run_vtworker_bg([])
def tearDown(self):
utils.kill_sub_process(self.worker_proc)
def test_webinterface(self):
worker_base_url = 'http://localhost:%d' % int(self.worker_port)
# Wait for /status to become available.
timeout = 10
while True:
done = False
try:
urllib2.urlopen(worker_base_url + '/status').read()
done = True
except urllib2.URLError:
pass
if done:
break
timeout = utils.wait_step(
'worker /status webpage must be available', timeout)
# Run the command twice to make sure it's idempotent.
for _ in range(2):
# Run Ping command.
try:
urllib2.urlopen(
worker_base_url + '/Debugging/Ping',
data=urllib.urlencode({'message': 'pong'})).read()
raise Exception('Should have thrown an HTTPError for the redirect.')
except urllib2.HTTPError as e:
self.assertEqual(e.code, 307)
# Wait for the Ping command to finish.
utils.poll_for_vars(
'vtworker', self.worker_port,
'WorkerState == done',
condition_fn=lambda v: v.get('WorkerState') == 'done')
# Verify that the command logged something and it's available at /status.
status = urllib2.urlopen(worker_base_url + '/status').read()
self.assertIn(
"Ping command was called with message: 'pong'", status,
'Command did not log output to /status: %s' % status)
# Reset the job.
urllib2.urlopen(worker_base_url + '/reset').read()
status_after_reset = urllib2.urlopen(worker_base_url + '/status').read()
self.assertIn(
'This worker is idle.', status_after_reset,
'/status does not indicate that the reset was successful')
class TestMinHealthyRdonlyTablets(TestBaseSplitCloneResiliency):
def split_clone_fails_not_enough_health_rdonly_tablets(self):
"""Verify vtworker errors if there aren't enough healthy RDONLY tablets."""
_, stderr = utils.run_vtworker(
['-cell', 'test_nj',
'--wait_for_healthy_rdonly_tablets_timeout', '1s',
'--use_v3_resharding_mode=false',
'SplitClone',
'--min_healthy_rdonly_tablets', '2',
'test_keyspace/0'],
auto_log=True,
expect_fail=True)
self.assertIn('findTargets() failed: FindWorkerTablet() failed for'
' test_nj/test_keyspace/0: not enough healthy RDONLY'
' tablets to choose from in (test_nj,test_keyspace/0),'
' have 1 healthy ones, need at least 2', stderr)
def add_test_options(parser):
parser.add_option(
'--num_insert_rows', type='int', default=100,
help='The number of rows, per shard, that we should insert before '
'resharding for this test.')
parser.add_option(
'--num_insert_rows_before_reparent_test', type='int', default=4500,
help='The number of rows, per shard, that we should insert before '
'running TestReparentDuringWorkerCopy (supersedes --num_insert_rows in '
'that test). There must be enough rows such that SplitClone takes '
'several seconds to run while we run a planned reparent.')
if __name__ == '__main__':
utils.main(test_options=add_test_options)
|
from bs4 import BeautifulSoup
import requests
import re
class Scrape:
def __init__(self):
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.13 Safari/537.36",
"Referer": "http://tools.iedb.org/bcell/"
}
self.url = "http://tools.iedb.org/bcell/"
def get_result(self, seq="", method="Parker"):
if seq == "":
raise Exception("没有蛋白质序列")
session = requests.Session()
response = session.get(self.url, headers=self.headers)
soup = BeautifulSoup(response.text, "lxml")
result = soup.find_all("input", attrs={"name": "csrfmiddlewaretoken"})[0]
token = result["value"]
print("Token get as", token)
data = {
"csrfmiddlewaretoken": token,
"pred_tool": "bcell",
"source": "html",
"form_name": "submission_form",
"sequence_text": seq,
"method": method,
"submit": "Submit",
"swissprot": ""
}
print("Waiting for response")
response = session.post(self.url, headers=self.headers, data=data)
print("Got")
soup1 = BeautifulSoup(response.text, "lxml")
res = soup1.find_all("div", attrs={"id": "content"})[0].text
pat = "(-?\d+\.\d{3})"
result = re.compile(pat).findall(res)
return result
if __name__ == "__main__":
Sc = Scrape()
print(Sc.get_result(seq="HAAVWNAQEAQADFAK"))
|
def tail_swap(arr):
fmt = '{}:{}'.format
(head, tail), (head_2, tail_2) = (a.split(':') for a in arr)
return [fmt(head, tail_2), fmt(head_2, tail)]
|
# Generated by Django 2.1.4 on 2018-12-28 18:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=1)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('type', models.CharField(choices=[(1, 'HOME'), (2, 'Work'), (0, 'Other')], max_length=20)),
('street_line1', models.CharField(blank=True, max_length=100)),
('street_line2', models.CharField(blank=True, max_length=100)),
('city', models.CharField(blank=True, max_length=100)),
('state', models.CharField(blank=True, max_length=100)),
('zipcode', models.CharField(blank=True, max_length=5)),
('country', models.CharField(blank=True, max_length=100)),
('created_by', models.ForeignKey(default='0', on_delete=django.db.models.deletion.PROTECT, related_name='address_Creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(default='0', on_delete=django.db.models.deletion.PROTECT, related_name='address_modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='class1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=30)),
],
options={
'db_table': '_App1_class1',
},
),
migrations.CreateModel(
name='class2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=30)),
('class1s', models.ManyToManyField(to='base.class1')),
],
options={
'db_table': '_App1_class2',
},
),
migrations.CreateModel(
name='Mule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=1)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=30)),
('address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='base.Address')),
('created_by', models.ForeignKey(default='0', on_delete=django.db.models.deletion.PROTECT, related_name='mule_Creator', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=1)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=30)),
('created_by', models.ForeignKey(default='0', on_delete=django.db.models.deletion.PROTECT, related_name='person_Creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(default='0', on_delete=django.db.models.deletion.PROTECT, related_name='person_modified_by', to=settings.AUTH_USER_MODEL)),
('userId', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Rider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=1)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=30)),
('address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='base.Address')),
('created_by', models.ForeignKey(default='0', on_delete=django.db.models.deletion.PROTECT, related_name='rider_Creator', to=settings.AUTH_USER_MODEL)),
('member', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='base.Person')),
('modified_by', models.ForeignKey(default='0', on_delete=django.db.models.deletion.PROTECT, related_name='rider_modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=1)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=30)),
('address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='base.Address')),
('created_by', models.ForeignKey(default='0', on_delete=django.db.models.deletion.PROTECT, related_name='service_Creator', to=settings.AUTH_USER_MODEL)),
('member', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='base.Person')),
('modified_by', models.ForeignKey(default='0', on_delete=django.db.models.deletion.PROTECT, related_name='service_modified_by', to=settings.AUTH_USER_MODEL)),
('parentService', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='base.Service')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='mule',
name='member',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='base.Person'),
),
migrations.AddField(
model_name='mule',
name='modified_by',
field=models.ForeignKey(default='0', on_delete=django.db.models.deletion.PROTECT, related_name='mule_modified_by', to=settings.AUTH_USER_MODEL),
),
]
|
######### CLASS TREE #########
import Node
import json
class Tree:
def __init__(self, root):
self.root = root
def parse(self,node):
result = {}
if node.children:
for child in node.children:
if node.handled_data in result:
result[node.handled_data].append(self.parse(child))
else:
result[node.handled_data] = [self.parse(child)]
else:
return {node.handled_data:[]}
return result
def toList(self):
return self.root.build_list_with_child()
def toJson(self,node):
return json.dumps(self.parse(node))
######### END OF CLASS #########
|
#!env/bin/python
from dotenv import load_dotenv
from connection import Mercari, ROOT_PATH
import os
import json
from datetime import datetime
import argparse
from linebot import LineBotApi
from linebot.models import TextSendMessage
from linebot.exceptions import LineBotApiError
load_dotenv()
mercari_api = Mercari()
line_bot_api = LineBotApi(os.getenv("CONNECTION_TOKEN"))
parser = argparse.ArgumentParser(prog="main.py", description="Automatically search Mercari and send all unseen items to your line account")
parser.add_argument("keyword", help="Search keyword")
parser.add_argument("--price-min")
parser.add_argument("--price-max")
parser.add_argument("-e", "--electronics", help="Search all electronics", action="store_true")
parser.add_argument("-c", "--computers", help="Search specifically for computer related items", action="store_true")
parser.add_argument("-p", "--pc-parts", help="Search even more specifically for pc parts", action="store_true")
args = parser.parse_args()
def previously_viewed_item_check(item_list: list):
data_file_path = None
if os.name == "nt":
data_file_path = f"{ROOT_PATH}\\data.json"
elif os.name == "posix":
data_file_path = f"{ROOT_PATH}/data.json"
if not os.path.exists(data_file_path):
json_file = open(data_file_path, "w")
json.dump({}, json_file)
json_file.close()
json_file = open(data_file_path)
data = json.load(json_file)
json_file.close()
previously_viewed_items = [key for key in data.keys()]
new_items = []
for item in item_list:
if item[1] not in previously_viewed_items:
new_items.append(item)
if len(new_items) > 0:
print("There are unseen items.")
print(f"Sending new items to line_msg {new_items}")
json_file = open(data_file_path)
data = json.load(json_file)
json_file.close()
for item in new_items:
data[item[1]] = {"price": item[0], "viewed": str(datetime.now())}
with open(data_file_path, "w") as json_file:
json.dump(data, json_file)
json_file.close()
return new_items
print("There are no new items")
return False
def line_msg(data_to_send):
for item in data_to_send:
message = f"Hey Russell,\n\nThere is a new item.\n\nPrice: {item[0]}円\nLink: {item[1]}"
try:
line_bot_api.push_message(os.getenv("USER_ID"), TextSendMessage(text=message))
except LineBotApiError as e:
print(f"[ERROR]:{e}")
if __name__ == "__main__":
print("Checking Mercari for items")
results = mercari_api.fetch_items_pagination(
keyword=args.keyword,
price_min=args.price_min,
price_max=args.price_max,
e_flag=args.electronics,
c_flag=args.computers,
p_flag=args.pc_parts)
print(f"There are {len(results[0])} results")
print("Checking to see if items have been previously seen")
items_to_message = previously_viewed_item_check(results[0])
if items_to_message is not False:
line_msg(items_to_message)
|
import requests
import json
import xlrd
import pymysql
def header():
header = {'Content-Type': 'application/json'}
def token_header():
header = {'Content-Type': 'application/json'}
con_url = 'http://172.18.1.143:8888/admin/login/verify'
con_body = {"captcha": "999999", # 验证码999999
"phone": "17633607554"} # 此处phone应该是上面获取到的phone
Confirm_login = requests.post(headers=header, url=con_url, data=json.dumps(con_body))
# Authorization = 'Admin ' + Confirm_login.text.split('"')[7]
token_header = {'Content-Type': 'application/json','Authorization': 'Admin eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJwaG9uZSI6IjE3NjMzNjA3NTU0IiwiaWQiOjIsInBhc3N3b3JkIjoiMjI0Y2YyYjY5NWE1ZThlY2FlY2ZiOTAxNTE2MWZhNGIiLCJleHAiOjE2MTkyMjgzMzMsImlzcyI6ImdvIHdlYiB0ZW1wbGF0ZSJ9.NcEk7NrD305NEXD2sPqH-RgEIeIL_jyJh7YFYe3fWv0'}
# token_header = {'Content-Type': 'application/json','Authorization':Authorization}
return token_header
def commect_table(row,clo):
data = xlrd.open_workbook(r'/Users/dingyanpan/Desktop/私有云接口自动化/数据管理.xls')
data.sheet_names()
table = data.sheet_by_name('Sheet1')
list = table.cell_value(row,clo)
return list
|
import sys
import os
import re
import time
matchCpp = re.compile(r".*\.cpp")
matchExe = re.compile(r".*\.exe")
matchTxt = re.compile(r".*\.txt")
def toCompileCommand(file):
return "g++ -std=c++17 -O2 " + file + " -o " + file[:-3]+"exe"
def main():
command = ""
if len(sys.argv) == 2:
command = sys.argv[1]
else:
command = "build"
if command == "build":
if (len(sys.argv) >= 3):
shellCommand = toCompileCommand(sys.argv[2])
print(shellCommand)
os.system(shellCommand)
else:
for file in list(os.walk("."))[0][2]:
if matchCpp.match(file):
shellCommand = toCompileCommand(file)
print(shellCommand)
os.system(shellCommand)
time.sleep(2)
elif command == "clean":
for file in list(os.walk("."))[0][2]:
if matchExe.match(file) or matchTxt.match(file):
os.remove(file)
else:
print("invalid argument ")+command
if __name__ == '__main__':
main()
|
from msgs import fatal
# class to save user options
class userOptions(object):
def __init__(self):
self.opt = {}
def setGenType(self, fmt):
self.gentype = fmt
def getGenType(self):
return self.gentype
def addopt(self,k,v):
# print("adding {}:{}".format(k, v))
self.opt[k] = v
def getopt(self,k,v = ""):
if k in self.opt:
return self.opt[k]
else:
return v
def getOptEnum(self, k, map, default):
tagValue = self.getopt(k)
if tagValue == '':
return default
elif tagValue in map:
return map[tagValue]
else:
fatal("Option " + k + ": Illegal value '" + tagValue +
"'. Legal values are: " + ', '.join(k for k in map))
return None
def isOpt(self, k, default):
if k in self.opt:
if self.opt[k] == "true":
return True
elif self.opt[k] == "false":
return False
else:
fatal("Option " + k + ": must be true or false, not " + self.opt[k])
return default
|
"""
В области информационных технологий, очередь это структура данных с принципом
доступа к элементам «первый пришёл — первый вышел» (FIFO, First In — First Out).
Добавление элемента (принято обозначать словом "enqueue" — поставить в очередь
или "push") возможно лишь в конец очереди, выборка — только из начала очереди
(что принято называть словом "dequeue" — убрать из очереди или "pop"), при этом
выбранный элемент из очереди удаляется. То есть чтобы добраться до нового добав-
ленного элемента, нам надо "вытащить" элементы, которые были добавлены ранее.
Попробуем сделать модель очереди на Python. Вам дана последовательность команд:
- "PUSH X" -- поставить в очередь X, где X - это буква в верхнем регистре.
- "POP" -- убрать из начала очереди элемент. Если очередь пустая, то это команда
ничего не делает.
Очередь содержит только буквы.
Вам необходимо обработать все команды и собрать все буквы, которые остались в
очереди, в одно слово, от начала до конца очереди.
Рассмотрим пример. Дана последовательность команд:
["PUSH A", "POP", "POP", "PUSH Z", "PUSH D", "PUSH O", "POP", "PUSH T"]
Команда Очередь Примечания
------------------------------------------
PUSH A A Добавили "A" в пустую очередь
POP Убрали "A"
POP Очередь уже пуста
PUSH Z Z
PUSH D ZD
PUSH O ZDO
POP DO
PUSH T DOT Результат
Входные данные: Последовательность команд, как список (list) строк (str).
Выходные данные: Содержание очереди, как строка (str).
"""
import re
def letter_queue(commands):
stack = []
match_re = re.compile("(POP|PUSH)(?:(?:\Z)|(?:\s([A-Z]{1})\Z))")
for command in commands:
match_obj = match_re.match(command)
if match_obj:
if match_obj.group(1) == 'PUSH':
stack.append(match_obj.group(2))
elif match_obj.group(1) == 'POP':
if stack:
stack.pop(0)
return "".join(stack)
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert letter_queue(
["PUSH A", "POP", "POP", "PUSH Z", "PUSH D", "PUSH O", "POP",
"PUSH T"]) == "DOT", "dot example"
assert letter_queue(["POP", "POP"]) == "", "Pop, Pop, empty"
assert letter_queue(["PUSH H", "PUSH I"]) == "HI", "Hi!"
assert letter_queue([]) == "", "Nothing"
|
# more like quteutils
import os
def send_to_qute(msg):
with open(os.environ["QUTE_FIFO"], "w") as f:
f.write("{}\n".format(msg))
def qute_print_cmd(msg, cmd):
send_to_qute("{} '{}'".format(cmd, msg.translate("".maketrans("", "", "\"'"))))
def qute_print(msg):
qute_print_cmd(msg, "message-info")
def qute_eprint(msg):
qute_print_cmd(msg, "message-error")
def qute_wprint(msg):
qute_print_cmd(msg, "message-warning")
def qute_jseval(js, quiet=True):
send_to_qute("jseval {}{}".format("-q " if quiet else "", js))
def get_qute_page():
from bs4 import BeautifulSoup
with open(os.environ["QUTE_HTML"], "r") as html_file:
return BeautifulSoup(html_file, "lxml")
def started_from_qute():
return (
"QUTE_FIFO" in os.environ
and "QUTE_HTML" in os.environ
and "QUTE_URL" in os.environ
)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
class Animal(object):
def reply(self):
self.speak()
class Mammal(Animal):
def speak(self):
print('Mammal!')
class Cat(Mammal):
def speak(self):
print('meow')
class Dog(Mammal):
def speak(self):
print('wong')
class Primate(Mammal):
def speak(self):
print('Hello world!')
class Hacker(Primate):
pass
|
# Import necessary libraries
import serial
import time
# Create a variable that will create
# a communication between the Raspberry Pi
# and the Arduino
# Serial(arg1, arg2)
# The first argument, arg1 to the Serial variable
# is the port name.
# The second argument arg2 to the Serial variable
# is the baud rate.
# In this example, 'COM8' is the port where
# Arduino is connected. We are using Windows
# here. Refer to the document named READ ME (Step 4).docx
# on how to do this is Raspberry Pi. But for now let us do
# this on Windows.
port=serial.Serial('COM8',9600)
port.setDTR(False)
time.sleep(1)
port.flushInput()
port.setDTR(True)
# try and except block
# The try and except block are
# used to handles errors.
# If any errors has been detected in the
# try block, it will stop and jump
# to the except block.
# For example, you are writing data to the serial port
# but you have disconnected the hardware connection,
# then that connection does not exists anymore.
# If we do not have a try and except block, the program
# will throw an error and stop executing.
try:
# Create an infinite loop to continously run
while True:
# Send a string to the connected device.
# May /n used as terminator.
# Hanggang dun lang tayo magbabasa ibig sabihin.
# May b sa unahin bago yung string, meaning
# by bytes siya sinesend, hindi whole string agad.
port.write(b'Jezzamae\n')
print("Writing to port...")
# Delay for five seconds.
time.sleep(5)
except:
# Close the port if some error occurs.
# You can also press Ctrl+C to stop the infinite loop above
# and get to this line.
port.close()
|
import time
from Pages.base_page import BasePage
from Utils.locators import *
class ModalsPage(BasePage):
def __init__(self, driver):
self.locator = ModalsLocators
super().__init__(driver)
def launch_single_modal(self):
button = self.driver.find_element(*self.locator.launch_modal_button)
if button.is_enabled() is True:
time.sleep(2)
button.click()
else:
print("Button is not displayedQ!")
time.sleep(3)
def get_single_modal_body(self):
body = self.driver.find_element(*self.locator.single_modal_title)
if body.is_enabled() is True:
time.sleep(3)
return body.text
else:
print("Modal body is not displayed!")
def click_save_single_modal(self):
button = self.driver.find_element(*self.locator.save_changes_button)
if button.is_enabled() is True:
time.sleep(3)
button.click()
else:
print("Button is not displayed!")
time.sleep(3)
def launch_first_multi_modals(self):
button = self.driver.find_element(*self.locator.launch_multi_modal_button1)
time.sleep(3)
if button.is_enabled() is True:
time.sleep(3)
button.click()
else:
print("Button is not displayedQ!")
time.sleep(3)
def get_first_multi_modal_body(self):
time.sleep(3)
body = self.driver.find_element(*self.locator.first_modal_title)
if body.is_enabled() is True:
time.sleep(3)
return body.text
else:
print("Modal body is not displayedQ")
def launch_second_modal(self):
button = self.driver.find_element(*self.locator.launch_multi_modal_button2)
if button.is_enabled() is True:
time.sleep(3)
button.click()
else:
print("Button is not displayedQ!")
def get_second_modal_title(self):
time.sleep(3)
body = self.driver.find_element(*self.locator.second_modal_title)
if body.is_enabled() is True:
time.sleep(3)
return body.text
else:
print("Modal body is not displayedQ")
def click_save_modal2_button(self):
time.sleep(3)
button = self.driver.find_element(*self.locator.save_changes_modal2_button)
if button.is_enabled() is True:
time.sleep(3)
button.click()
else:
print("Button is not displayedQ!")
def click_save_modal1_button(self):
time.sleep(3)
button = self.driver.find_element(*self.locator.save_changes_modal1_button)
if button.is_enabled() is True:
time.sleep(3)
button.click()
else:
print("Button is not displayed!")
def click_close_modal2_button(self):
time.sleep(3)
button = self.driver.find_element(*self.locator.close_modal2_button)
if button.is_enabled() is True:
time.sleep(3)
button.click()
else:
print("Button is not displayedQ!")
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten3d(nn.Module):
def forward(self, x):
N, C, D, H, W = x.size()
return x.view(N, -1)
class OctNet(nn.Module):
def __init__(self):
super(OctNet, self).__init__()
def forward(self, cubes):
return self.cnn(cubes).squeeze()
def train_step(self, cubes, targets):
logits = self(cubes)
loss = self.loss_fn(logits, targets)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss
class Oct200Net(OctNet):
def __init__(self):
super(Oct200Net, self).__init__()
self.cnn = nn.Sequential(
nn.Conv3d(1, 16, kernel_size=8),
nn.BatchNorm3d(num_features=16),
nn.LeakyReLU(),
nn.MaxPool3d(kernel_size=4),
nn.Conv3d(16, 16, kernel_size=4),
nn.BatchNorm3d(num_features=16),
nn.LeakyReLU(),
nn.MaxPool3d(kernel_size=4),
Flatten3d(),
nn.Linear(224, 1)
)
self.optimizer = torch.optim.Adam(self.parameters())
self.loss_fn = nn.BCEWithLogitsLoss()
|
num_int = int(input("Enter a number "))
num_int += 2
num_int *= 3
num_int -= 6
num_int /= 3
print("Number is: ", num_int)
|
def can_permute_palindrome(s):
"""
:type s: str
:rtype: bool
"""
letters = {}
for letter in s:
if letters.has_key(letter):
letters[letter] += 1
else:
letters.setdefault(letter, 1)
odd_allowed = True
for key in letters:
if letters[key] & 1 == 1:
if len(s) & 1 == 0:
return False
elif odd_allowed:
odd_allowed = False
else:
return False
return True
print can_permute_palindrome("carerac")
|
import numpy as np
from collections import defaultdict
def string_to_index_list(s, char_to_index, end_token):
"""Converts a sentence into a list of indexes (for each character).
"""
return [char_to_index[char] for char in s] # Adds the end token to each index list
def getIndexList(data,char_to_index):
a = data.split("<end>\n<start>")
for i in range(len(a)):
if i == 0:
a[i] = a[i] + '<end>'
continue
if i == (len(a)-1):
a[i] = '<start>' + a[i]
continue
a[i] = '<start>' + a[i] +'<end>'
result = []
for a_i in a:
result.append([char_to_index[c] for c in a_i])
return result
def load_data(filename, idx_dict = None):
data = open(filename).read()
vocab_size = 0
if idx_dict == None:
#mapping character to index
char_to_index = {ch: i for (i, ch) in enumerate(sorted(list(set(data))))}
print("Number of unique characters in our whole tunes database = {}".format(len(char_to_index)))
index_to_char = {i: ch for (ch, i) in char_to_index.items()}
#all_characters = np.asarray([char_to_index[c] for c in data], dtype = np.int32)
#print("Total number of characters = "+str(all_characters.shape[0]))
vocab_size = len(char_to_index) + 1
end_token = vocab_size
char_to_index['EOS'] = end_token
index_to_char[93] = 'EOS'
idx_dict = { 'char_to_index': char_to_index,
'index_to_char': index_to_char,
'end_token': end_token}
index_list = getIndexList(data,idx_dict['char_to_index'])
#return all_characters , vocab_size, idx_dict
return index_list, vocab_size,idx_dict
|
#Li Xin
#Student number: 014696390
#xin.li@helsinki.fi
import socket
import traceback
def listy(host, port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, int(port)))
s.listen(5)
loop = True
except Exception:
print(traceback.print_exc())
loop = False
#create cmsg file or clear the previous content of it
f = open('cmsg', 'w')
f.close()
while loop:
try:
conn, addr = s.accept()
msg = conn.recv(1024).decode("utf8")
f = open('cmsg', 'a')
f.write(msg + '\n')
f.close()
except Exception:
print(traceback.print_exc())
loop = False
if msg[0] == 'G':
s.close()
loop = False
|
from django.db import models
from datetime import datetime
# Create your models here.
class Post(models.Model):
user_id = models.IntegerField()
owner = models.CharField(max_length=20, default="???")
title = models.CharField(max_length=200)
pub_date = models.DateTimeField('date_published')
body = models.TextField()
views = models.IntegerField(default=0)
def __str__(self):
return self.title
def increaseViews(self):
self.views +=1
self.save()
class Comment(models.Model):
post = models.ForeignKey('freeboard.Post', on_delete=models.CASCADE, related_name='comments')
author = models.CharField(max_length=200)
text = models.TextField()
def __str__(self):
return self.text
|
"""Models related to domain aliases management."""
from reversion import revisions as reversion
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.utils.encoding import smart_str
from django.utils.translation import gettext as _, gettext_lazy
from modoboa.core import models as core_models, signals as core_signals
from modoboa.lib.exceptions import BadRequest, Conflict
from .base import AdminObject
from .domain import Domain
class DomainAliasManager(models.Manager):
def get_for_admin(self, admin):
"""Return the domain aliases belonging to this admin.
The result is a ``QuerySet`` object, so this function can be used
to fill ``ModelChoiceField`` objects.
"""
if admin.is_superuser:
return self.get_queryset()
return self.get_queryset().filter(owners__user=admin)
class DomainAlias(AdminObject):
"""Domain aliases."""
name = models.CharField(gettext_lazy("name"), max_length=100, unique=True,
help_text=gettext_lazy("The alias name"))
target = models.ForeignKey(
Domain, verbose_name=gettext_lazy("target"),
help_text=gettext_lazy("The domain this alias points to"),
on_delete=models.CASCADE
)
enabled = models.BooleanField(
gettext_lazy("enabled"),
help_text=gettext_lazy("Check to activate this alias"),
default=True
)
owners = GenericRelation(core_models.ObjectAccess)
objects = DomainAliasManager()
class Meta:
app_label = "admin"
def __str__(self):
return smart_str(self.name)
def from_csv(self, user, row):
"""Create a domain alias from a CSV row
Expected format: ["domainalias", domain alias name, targeted domain,
enabled]
:param user: a ``User`` object
:param row: a list containing the alias definition
"""
if len(row) < 4:
raise BadRequest(_("Invalid line"))
self.name = row[1].strip().lower()
for model in [DomainAlias, Domain]:
if model.objects.filter(name=self.name).exists():
raise Conflict
domname = row[2].strip()
try:
self.target = Domain.objects.get(name=domname)
except Domain.DoesNotExist:
raise BadRequest(_("Unknown domain %s") % domname)
core_signals.can_create_object.send(
sender="import", context=self.target, object_type="domain_aliases")
self.enabled = row[3].strip().lower() in ["true", "1", "yes", "y"]
self.save(creator=user)
def to_csv_row(self):
"""Export to row that can be included in a CSV file."""
return ["domainalias", self.name,
self.target.name, self.enabled]
def to_csv(self, csvwriter):
"""Export a domain alias using CSV format
:param csvwriter: a ``csv.writer`` object
"""
csvwriter.writerow(self.to_csv_row())
reversion.register(DomainAlias)
|
# 문제 1
def solution(a, b, n):
answer = 0
while n >= a:
answer += n // a * b
n = n // a * b + n % a
return answer
# 문제 2
def solution(s):
answer = []
queue = []
for i in range(len(s)):
if s[i] not in queue:
answer.append(-1)
queue += s[i]
else:
for j in range(len(queue) - 1, -1, -1):
if queue[j] == s[i]:
answer.append(i - j)
queue += s[i]
break
return answer
|
import urllib
import time
import os
import numpy as np
import pandas as pd
import csv
import h5py
import pyodbc
from netCDF4 import Dataset
from ftplib import FTP
from datetime import time, timedelta, date
import datetime
import schedule
global geoid
geoid = 0
def declaring_variables():
# declaring all used date variables
global yesterday
global yyyymmdd
global yyyymmdd_slash
global year
yesterday = date.today() - timedelta(days=3)
# print yesterday
yyyymmdd = datetime.datetime.strptime(str(yesterday), "%Y-%m-%d").strftime("%Y%m%d")
yyyymmdd_slash = datetime.datetime.strptime(str(yesterday), "%Y-%m-%d").strftime("%Y/%m/%d")
year = yesterday.year
# variables for downloading and working dir
global directory
directory = "C:/geospatial/database/temp/"
global down_file
down_file = "early_gridmet_" + yyyymmdd + '.nc'
global df4
df4 = pd.DataFrame()
global crop_list
crop_list = ('soybeans', 'corn', 'wheat', 'cotton') # type: Tuple[str, str, str, str]
global var_list
var_list = []
def downloading():
# downloading the file
urllib.urlretrieve('https://www.northwestknowledge.net/metdata/data/early/' + str(year) + '/' + down_file,
directory + down_file[14:])
def processing():
# creating lists and dicts to loop in variables
df_list = list()
# creating list for empy dataframes
for n in crop_list:
df_list.append("df_concat_" + str(n))
for i in range(0, 4):
exec '%s=%s' % (str(df_list[i]), 'pd.DataFrame()')
crop_dict = {}
# creating list of csv values
for n in crop_list:
crop_dict.update({"df_" + n + "_ref": "C:/geospatial/gis/aoi/" + n + "_Georref.csv"})
for k in crop_dict.keys():
globals()[k] = pd.read_csv("C:/geospatial/gis/aoi/" + k.split("_")[1] + "_Georref.csv",
sep=";")
# creating empty dfs for vars
df_vars = pd.DataFrame()
# loading the NC file
dataset = Dataset(directory + down_file[14:])
# filling a list with variables available
for i in dataset.variables:
var_list.append(str(i))
del var_list[0:4]
i = 0
# creating an intial DF to serve as reference
a = dataset.variables['precipitation_amount'][:]
m, n = a.shape
r, c = np.mgrid[:m, :n]
out = np.column_stack((r.ravel()[:], c.ravel()[:], a.ravel()[:]))
df_latlon = pd.DataFrame(out)
df_latlon.columns = ['lat', 'long', 'precipitation_amount']
df_latlon.drop(['precipitation_amount'], axis=1, inplace=True)
# creating a dataframe with all variables
for variable in var_list:
# extracting the values from the NC file
var = (variable)
b = str(var)
a = dataset.variables[b][:]
m, n = a.shape
R, C = np.mgrid[:m, :n]
out = np.column_stack((R.ravel()[:], C.ravel()[:], a.ravel()[:]))
# creating the dataframe
df = pd.DataFrame(out)
df.columns = ['lat', 'long', b]
df_cut = df[b]
df_cut.columns = [b]
df_vars = pd.concat([df_vars, df_cut], axis=1)
# cleaning the DF with all variables on it
df_variables = pd.concat([df_latlon, df_vars], axis=1)
df_variables['index1'] = df_variables.index
# removing useless areas
df2 = df_variables.loc[df_variables['long'] > 300]
df3 = df2.loc[df2['long'] < 1050]
# reshaping the x,y values
df3['lat'] = df3['lat'] * 0.0416666666
df3['long'] = df3['long'] * 0.0416666666
df3['lat'] = df3['lat'] * (-1)
df3['lat'] = df3['lat'] + 49.40000000000000
df3['long'] = df3['long'] - 124.7666666333333
# removing useless latitudes
df4 = df3.loc[df3['lat'] > 29.5]
# inserting a new column with date
df4['date'] = yyyymmdd_slash
global df4
def cleaning():
crop_dict = {}
df_concat = pd.DataFrame()
for n in crop_list:
crop_dict.update({"df_" + n + "_ref": "C:/geospatial/gis/aoi/" + n + "_Georref.csv"})
for k in crop_dict.keys():
globals()[k] = pd.read_csv("C:/geospatial/gis/aoi/" + k.split("_")[1] + "_Georref.csv",
sep=";")
column_list = ['GeoId', 'lat_caller', 'long_caller', 'lat_other', 'long_other']
for dataframe in crop_dict.values():
crop = dataframe.split("/")[4].split("_")[0]
df = pd.read_csv(dataframe, sep=";")
df_joined = df.join(df4.set_index('index1'), on='GeoId', lsuffix='_caller', rsuffix='_other')
df_avg = df_joined.groupby("County").mean()
df_avg['date'] = yyyymmdd_slash
# removing useless columns
for column in column_list:
df_avg.drop([column], axis=1, inplace=True)
# final dataframe organized
df_concat = pd.concat([df_concat, df_avg])
df_concat['test'] = df_concat.index
df_concat[['State', 'County']] = df_concat['test'].str.split('_', expand=True)
df_concat['State'] = df_concat['State'].replace('01', 'Alabama') \
.replace('04', 'Arizona') \
.replace('05', 'Arkansas') \
.replace('06', 'California') \
.replace('08', 'Colorado') \
.replace('09', 'Connecticut') \
.replace('12', 'Florida') \
.replace('13', 'Georgia') \
.replace('17', 'Illinois') \
.replace('18', 'Indiana') \
.replace('19', 'Iowa') \
.replace('20', 'Kansas') \
.replace('21', 'Kentucky') \
.replace('22', 'Louisiana') \
.replace('23', 'Maine') \
.replace('24', 'Maryland') \
.replace('25', 'Massachusetts') \
.replace('26', 'Michigan') \
.replace('27', 'Minnesota') \
.replace('28', 'Mississippi') \
.replace('29', 'Missouri') \
.replace('30', 'Montana') \
.replace('31', 'Nebraska') \
.replace('32', 'Nevada') \
.replace('33', 'New Hampshire') \
.replace('34', 'New Jersey') \
.replace('35', 'New Mexico') \
.replace('36', 'New York') \
.replace('37', 'North Carolina') \
.replace('38', 'North Dakota') \
.replace('39', 'Ohio') \
.replace('40', 'Oklahoma') \
.replace('41', 'Oregon') \
.replace('42', 'Pennsylvania') \
.replace('44', 'Rhode Island') \
.replace('45', 'South Carolina') \
.replace('46', 'South Dakota') \
.replace('47', 'Tennessee') \
.replace('48', 'Texas') \
.replace('49', 'Utah') \
.replace('50', 'Vermont') \
.replace('51', 'Virginia') \
.replace('53', 'Washington') \
.replace('54', 'West Virginia') \
.replace('55', 'Wisconsin') \
.replace('56', 'Wyoming') \
.replace('16', 'Idaho')
df_concat.drop('test', axis=1, inplace=True)
df_concat.to_csv(directory + "csv/" + yyyymmdd + "_" + "weather" + "_" + crop + ".csv", index=False)
# cleaning_dfs
df_concat = pd.DataFrame()
df_avg = pd.DataFrame()
df_joined = pd.DataFrame()
def publishing():
global geoid
concat_dict = {"1": "df_concat_soybeans", "2": 'df_concat_corn', "3": 'df_concat_wheat', "4": 'df_concat_cotton'}
conn = pyodbc.connect(
'DRIVER={ODBC Driver 17 for SQL Server};SERVER=localhost;DATABASE=Geospatial;Trusted_Connection=Yes')
cursor = conn.cursor()
for dataframe in concat_dict.values():
try:
# loop trhu the dataframes
crop = dataframe.split("_")[2]
dataframe = pd.read_csv(directory + "csv/" + yyyymmdd + "_" + "weather" + "_" + crop + ".csv")
shape = dataframe.shape[0]
index = 0
# looping in each variable
while index < shape:
for n in var_list[:17]:
try:
date = dataframe.iloc[index]['date']
state = dataframe.iloc[index]['State']
county = dataframe.iloc[index]['County']
value = dataframe.iloc[index][n]
country = "US"
geoid = str(state) + "_" + county
index += 1
# print geoid, country, state, county, value, n, crop, date
cursor.execute(
"INSERT INTO dbo.allvariables(geoid, country, state, county, value, variable, crop, date) VALUES (?,?,?,?,?,?,?,?)",
geoid, country, state, county, value, n, crop, date)
conn.commit()
except:
# print "Values *" + str(geoid), str(country), str(state), str(county), str(value), str(n), str(crop), str(date) + " could not be published!"
index = + 1
continue
index = 0
continue
except:
# print "File not worked for crop " + dataframe + "!"
continue
def main():
print "Starting the update of Weather data at " + str(datetime.datetime.now())
declaring_variables()
downloading()
processing()
cleaning()
publishing()
print "Updated latest Weather data at " + str(datetime.datetime.now())
# schedule.every().day.at("21:10").do(main)
main()
# while 1:
# schedule.run_pending()
# time.sleep(1)
|
from flip_a_coin import *
def two_sided_p_value(x: float, mu: float = 0, sigma: float = 1) -> float:
if x >= mu:
return 2 * normal_probability_above(x, mu, sigma)
else:
return 2 * normal_probability_below(x, mu, sigma)
two_sided_p_value(529.5, mu_0, sigma_0)
|
import matplotlib.pyplot as plt
import numpy as np
from . EquationException import EquationException
from . PrescribedParameter import PrescribedParameter
from . PrescribedInitialParameter import PrescribedInitialParameter
from . UnknownQuantity import UnknownQuantity
from .. TransportSettings import TransportSettings
TYPE_PRESCRIBED = 1
TYPE_SELFCONSISTENT = 2
RECOMBINATION_RADIATION_INCLUDED = True
RECOMBINATION_RADIATION_NEGLECTED = False
class ColdElectronTemperature(PrescribedParameter,PrescribedInitialParameter,UnknownQuantity):
def __init__(self, settings, ttype=TYPE_PRESCRIBED, temperature=None, radius=0, times=0, recombination=RECOMBINATION_RADIATION_NEGLECTED):
"""
Constructor.
"""
super().__init__(settings=settings)
self.setType(ttype=ttype)
self.temperature = None
self.radius = None
self.times = None
self.transport = TransportSettings(kinetic=False)
self.recombination = recombination
if (ttype == TYPE_PRESCRIBED) and (temperature is not None):
self.setPrescribedData(temperature=temperature, radius=radius, times=times)
elif ttype == TYPE_SELFCONSISTENT:
self.setInitialProfile(temperature=temperature, radius=radius)
###################
# SETTERS
###################
def setInitialProfile(self, temperature, radius=0):
"""
Sets the initial temperature profile T=T(r) for when the temperature is
evolved self-consistently.
:param temperature: Scalar or vector giving the initial temperature profile.
:param radius: If ``temperature`` is a vector, contains the corresponding radial grid on which ``temperature`` is defined.
"""
_data, _rad = self._setInitialData(data=temperature, radius=radius)
self.temperature = _data
self.radius = _rad
self.times = None
self.verifySettingsPrescribedInitialData()
def setPrescribedData(self, temperature, radius=0, times=0):
"""
Prescribes a temperature evolution in time and space.
:param temperature: Scalar, vector or matrix giving the temperature throughout the simulation.
:param radius: If ``temperature`` is a function of radius, contains the radial grid on which it is defined.
:param times: If ``temperature`` is a function of time, contains the time grid on which it is defined.
"""
_t, _rad, _tim = self._setPrescribedData(temperature, radius, times)
self.temperature = _t
self.radius = _rad
self.times = _tim
self.verifySettingsPrescribedData()
def setType(self, ttype):
"""
Specifies whether to evolve the electron temperature according to a
prescribed function, or self-consistently.
:param ttype: Type of evolution. Can take one of the following values:
- ``TYPE_PRESCRIBED``: Evolve according to prescribed function.
- ``TYPE_SELFCONSISTENT``: Evolve self-consistently.
"""
if ttype == TYPE_PRESCRIBED:
self.type = ttype
elif ttype == TYPE_SELFCONSISTENT:
self.type = ttype
# Set T=0 if 'setInitialProfile' has not been previously called
# (if 'setInitialProfile()' has been called, 'self.radius != None'
# and 'self.times == None')
if (self.radius) is None or (self.times is not None):
self.setInitialProfile(temperature=-1)
else:
raise EquationException("T_cold: Unrecognized cold electron temperature type: {}".format(self.type))
def setRecombinationRadiation(self, recombination=RECOMBINATION_RADIATION_NEGLECTED):
"""
Specify whether or not to include recombination radiation when evolving
the temperature self-consistently.
"""
self.recombination = recombination
def fromdict(self, data):
self.type = data['type']
if self.type == TYPE_PRESCRIBED:
self.temperature = data['data']['x']
self.radius = data['data']['r']
self.times = data['data']['t']
elif self.type == TYPE_SELFCONSISTENT:
self.temperature = data['init']['x']
self.radius = data['init']['r']
if 'transport' in data:
self.transport.fromdict(data['transport'])
else:
raise EquationException("T_cold: Unrecognized cold electron temperature type: {}".format(self.type))
if 'recombination' in data:
self.recombination = data['recombination']
self.verifySettings()
def todict(self):
"""
Returns a Python dictionary containing all settings of
this ColdElectrons object.
"""
data = { 'type': self.type }
data['recombination'] = self.recombination
if self.type == TYPE_PRESCRIBED:
data['data'] = {
'x': self.temperature,
'r': self.radius,
't': self.times
}
elif self.type == TYPE_SELFCONSISTENT:
data['init'] = {
'x': self.temperature,
'r': self.radius
}
data['transport'] = self.transport.todict()
else:
raise EquationException("T_cold: Unrecognized cold electron temperature type: {}".format(self.type))
return data
def verifySettings(self):
"""
Verify that the settings of this unknown are correctly set.
"""
if self.type == TYPE_PRESCRIBED:
if type(self.temperature) != np.ndarray:
raise EquationException("T_cold: Temperature prescribed, but no temperature data provided.")
elif type(self.times) != np.ndarray:
raise EquationException("T_cold: Temperature prescribed, but no time data provided, or provided in an invalid format.")
elif type(self.radius) != np.ndarray:
raise EquationException("T_cold: Temperature prescribed, but no radial data provided, or provided in an invalid format.")
self.verifySettingsPrescribedData()
elif self.type == TYPE_SELFCONSISTENT:
if type(self.temperature) != np.ndarray:
raise EquationException("T_cold: Temperature prescribed, but no temperature data provided.")
elif type(self.radius) != np.ndarray:
raise EquationException("T_cold: Temperature prescribed, but no radial data provided, or provided in an invalid format.")
self.verifySettingsPrescribedInitialData()
self.transport.verifySettings()
else:
raise EquationException("T_cold: Unrecognized equation type specified: {}.".format(self.type))
def verifySettingsPrescribedData(self):
self._verifySettingsPrescribedData('T_cold', self.temperature, self.radius, self.times)
def verifySettingsPrescribedInitialData(self):
self._verifySettingsPrescribedInitialData('T_cold', data=self.temperature, radius=self.radius)
|
"""
Create dense matrix from 10x MTX output
Michael Heskett
"""
import os
import csv
import scipy
import pandas as pd
import scipy.io
from sys import argv
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert 10x files to a dense table")
parser.add_argument("--data",
type=str,
metavar="[10x out dir]",
required=True,
help="directory containing genes.tsv barcodes.tsv and matrix.tsv")
parser.add_argument("--name",
type=str,
metavar="[sample_name]",
required=True,
help="name of the sample")
parser.add_argument('--output',
type=str,
metavar="[output dir]",
required=True,
help="where to write output")
args = parser.parse_args()
data = {}
data["name"] = args.name
for filename in os.listdir(args.data):
if os.path.basename(filename) == "barcodes.tsv":
data["barcodes"] = [row[0] for row in csv.reader(open(args.data + filename), delimiter="\t")]
print("Loaded barcodes")
elif os.path.basename(filename) == "genes.tsv":
data["genes"] = [row[1] for row in csv.reader(open(args.data + filename), delimiter="\t")]
#data["ensemble_id"] = [row[0] for row in csv.reader(open(args.data + filename), delimiter="\t")]
print("Loaded genes")
elif os.path.basename(filename) == "matrix.mtx":
data["matrix"] = scipy.io.mmread(args.data + filename).todense()
print("Loaded matrix")
#if len(data) != 5:
#print("missing argument")
#quit()
# Write dense matrix to CSV file
with open(args.output + args.name + ".txt", 'w') as csvfile:
writer = csv.writer(csvfile, delimiter='\t', quotechar='"')
writer.writerow([''] + data["barcodes"])
for i in range(data["matrix"].shape[0]):
writer.writerow([data["genes"][i]] + data["matrix"][i,:].tolist()[0])
csvfile.close()
print("Wrote dense matrix to CSV file")
# Read the CSV and remove duplicate rows, keeping the higher expressed row
print("Loading dense matrix")
df = pd.read_table(args.output + args.name + ".txt", header=0, sep='\t')
names = df.columns.values[0]
print("Loaded dense matrix")
df = df.join(pd.Series(df.sum(axis="columns"), name="sums"))\
.sort_values(by=[names,"sums"], ascending=[True,False])\
.drop_duplicates(subset=names, keep="first")\
.drop(labels="sums", axis="columns")\
.set_index(names)\
.astype("int")\
.transpose()\
.to_csv(args.output + args.name + "_dedup.txt", sep="\t", header=True, index=True)
print("Dropped duplicate genes, transposed, wrote to tab separated table")
quit()
|
import numpy as np
#use GLoVe
#imput string arr size [N, sentence_length_not_same]
#imput numpy arr size [N, max_size, glove_embedded_size]
def embedder(sentence_batch,max_size,glove_embedded_size,glove_dict,UNK_vec,EOS_vec):
N = len(sentence_batch)
embedded_sentences = np.zeros((N,max_size,glove_embedded_size))
for sentence_index in range(N):
word_count = 0
#word translation
for word_index in range(len(sentence_batch[sentence_index])):
word = sentence_batch[sentence_index][word_index]
if word in glove_dict:
embedded_sentences[sentence_index][word_index] = glove_dict[word]
else:
embedded_sentences[sentence_index][word_index] = UNK_vec #not in dict then use UNK
word_count += 1
#error handling
if word_count >= max_size:
print("ERROR: word_count ", word_count, " >= max_size ", max_size)
#padding with EOS
embedded_sentences[sentence_index][(word_count+1):] = EOS_vec
return embedded_sentences
#pretrained answer classification
#input: a batch of embedded sentence of same dict
def classifier(embedded_answers, useful_param_to_predict):
N = embedded_answers.shape[0]
class_label = np.zeros((N,)) #trivial version, TO BE IMPLEMENTED (unsupervised learning on result of wr+pca)
return class_label
|
from django.contrib import admin
from models import UserData, Profile, Post, Comment, Follow, Agree
# Register your models here.
admin.site.register(UserData)
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Agree)
admin.site.register(Follow)
admin.site.register(Profile)
|
# Copyright (c) 2015 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import datetime
import sqlalchemy as sa
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy import sql
from neutron.db import common_db_mixin as common_db_api
from neutron.db.quota import models as quota_models
# Wrapper for utcnow - needed for mocking it in unit tests
def utcnow():
return datetime.datetime.utcnow()
class QuotaUsageInfo(collections.namedtuple(
'QuotaUsageInfo', ['resource', 'tenant_id', 'used', 'reserved', 'dirty'])):
@property
def total(self):
"""Total resource usage (reserved and used)."""
return self.reserved + self.used
class ReservationInfo(collections.namedtuple(
'ReservationInfo', ['reservation_id', 'tenant_id',
'expiration', 'deltas'])):
"""Information about a resource reservation."""
def get_quota_usage_by_resource_and_tenant(context, resource, tenant_id,
lock_for_update=False):
"""Return usage info for a given resource and tenant.
:param context: Request context
:param resource: Name of the resource
:param tenant_id: Tenant identifier
:param lock_for_update: if True sets a write-intent lock on the query
:returns: a QuotaUsageInfo instance
"""
query = common_db_api.model_query(context, quota_models.QuotaUsage)
query = query.filter_by(resource=resource, tenant_id=tenant_id)
if lock_for_update:
query = query.with_lockmode('update')
result = query.first()
if not result:
return
return QuotaUsageInfo(result.resource,
result.tenant_id,
result.in_use,
result.reserved,
result.dirty)
def get_quota_usage_by_resource(context, resource):
query = common_db_api.model_query(context, quota_models.QuotaUsage)
query = query.filter_by(resource=resource)
return [QuotaUsageInfo(item.resource,
item.tenant_id,
item.in_use,
item.reserved,
item.dirty) for item in query]
def get_quota_usage_by_tenant_id(context, tenant_id):
query = common_db_api.model_query(context, quota_models.QuotaUsage)
query = query.filter_by(tenant_id=tenant_id)
return [QuotaUsageInfo(item.resource,
item.tenant_id,
item.in_use,
item.reserved,
item.dirty) for item in query]
def set_quota_usage(context, resource, tenant_id,
in_use=None, reserved=None, delta=False):
"""Set resource quota usage.
:param context: instance of neutron context with db session
:param resource: name of the resource for which usage is being set
:param tenant_id: identifier of the tenant for which quota usage is
being set
:param in_use: integer specifying the new quantity of used resources,
or a delta to apply to current used resource
:param reserved: integer specifying the new quantity of reserved resources,
or a delta to apply to current reserved resources
:param delta: Specififies whether in_use or reserved are absolute numbers
or deltas (default to False)
"""
query = common_db_api.model_query(context, quota_models.QuotaUsage)
query = query.filter_by(resource=resource).filter_by(tenant_id=tenant_id)
usage_data = query.first()
with context.session.begin(subtransactions=True):
if not usage_data:
# Must create entry
usage_data = quota_models.QuotaUsage(
resource=resource,
tenant_id=tenant_id)
context.session.add(usage_data)
# Perform explicit comparison with None as 0 is a valid value
if in_use is not None:
if delta:
in_use = usage_data.in_use + in_use
usage_data.in_use = in_use
if reserved is not None:
if delta:
reserved = usage_data.reserved + reserved
usage_data.reserved = reserved
# After an explicit update the dirty bit should always be reset
usage_data.dirty = False
return QuotaUsageInfo(usage_data.resource,
usage_data.tenant_id,
usage_data.in_use,
usage_data.reserved,
usage_data.dirty)
def set_quota_usage_dirty(context, resource, tenant_id, dirty=True):
"""Set quota usage dirty bit for a given resource and tenant.
:param resource: a resource for which quota usage if tracked
:param tenant_id: tenant identifier
:param dirty: the desired value for the dirty bit (defaults to True)
:returns: 1 if the quota usage data were updated, 0 otherwise.
"""
query = common_db_api.model_query(context, quota_models.QuotaUsage)
query = query.filter_by(resource=resource).filter_by(tenant_id=tenant_id)
return query.update({'dirty': dirty})
def set_resources_quota_usage_dirty(context, resources, tenant_id, dirty=True):
"""Set quota usage dirty bit for a given tenant and multiple resources.
:param resources: list of resource for which the dirty bit is going
to be set
:param tenant_id: tenant identifier
:param dirty: the desired value for the dirty bit (defaults to True)
:returns: the number of records for which the bit was actually set.
"""
query = common_db_api.model_query(context, quota_models.QuotaUsage)
query = query.filter_by(tenant_id=tenant_id)
if resources:
query = query.filter(quota_models.QuotaUsage.resource.in_(resources))
# synchronize_session=False needed because of the IN condition
return query.update({'dirty': dirty}, synchronize_session=False)
def set_all_quota_usage_dirty(context, resource, dirty=True):
"""Set the dirty bit on quota usage for all tenants.
:param resource: the resource for which the dirty bit should be set
:returns: the number of tenants for which the dirty bit was
actually updated
"""
query = common_db_api.model_query(context, quota_models.QuotaUsage)
query = query.filter_by(resource=resource)
return query.update({'dirty': dirty})
def create_reservation(context, tenant_id, deltas, expiration=None):
# This method is usually called from within another transaction.
# Consider using begin_nested
with context.session.begin(subtransactions=True):
expiration = expiration or (utcnow() + datetime.timedelta(0, 120))
resv = quota_models.Reservation(tenant_id=tenant_id,
expiration=expiration)
context.session.add(resv)
for (resource, delta) in deltas.items():
context.session.add(
quota_models.ResourceDelta(resource=resource,
amount=delta,
reservation=resv))
# quota_usage for all resources involved in this reservation must
# be marked as dirty
set_resources_quota_usage_dirty(
context, deltas.keys(), tenant_id)
return ReservationInfo(resv['id'],
resv['tenant_id'],
resv['expiration'],
dict((delta.resource, delta.amount)
for delta in resv.resource_deltas))
def get_reservation(context, reservation_id):
query = context.session.query(quota_models.Reservation).filter_by(
id=reservation_id)
resv = query.first()
if not resv:
return
return ReservationInfo(resv['id'],
resv['tenant_id'],
resv['expiration'],
dict((delta.resource, delta.amount)
for delta in resv.resource_deltas))
def remove_reservation(context, reservation_id, set_dirty=False):
delete_query = context.session.query(quota_models.Reservation).filter_by(
id=reservation_id)
# Not handling MultipleResultsFound as the query is filtering by primary
# key
try:
reservation = delete_query.one()
except orm_exc.NoResultFound:
# TODO(salv-orlando): Raise here and then handle the exception?
return
tenant_id = reservation.tenant_id
resources = [delta.resource for delta in reservation.resource_deltas]
num_deleted = delete_query.delete()
if set_dirty:
# quota_usage for all resource involved in this reservation must
# be marked as dirty
set_resources_quota_usage_dirty(context, resources, tenant_id)
return num_deleted
def get_reservations_for_resources(context, tenant_id, resources,
expired=False):
"""Retrieve total amount of reservations for specified resources.
:param context: Neutron context with db session
:param tenant_id: Tenant identifier
:param resources: Resources for which reserved amounts should be fetched
:param expired: False to fetch active reservations, True to fetch expired
reservations (defaults to False)
:returns: a dictionary mapping resources with corresponding deltas
"""
if not resources:
# Do not waste time
return
now = utcnow()
resv_query = context.session.query(
quota_models.ResourceDelta.resource,
quota_models.Reservation.expiration,
sql.func.sum(quota_models.ResourceDelta.amount)).join(
quota_models.Reservation)
if expired:
exp_expr = (quota_models.Reservation.expiration < now)
else:
exp_expr = (quota_models.Reservation.expiration >= now)
resv_query = resv_query.filter(sa.and_(
quota_models.Reservation.tenant_id == tenant_id,
quota_models.ResourceDelta.resource.in_(resources),
exp_expr)).group_by(
quota_models.ResourceDelta.resource,
quota_models.Reservation.expiration)
return dict((resource, total_reserved)
for (resource, exp, total_reserved) in resv_query)
def remove_expired_reservations(context, tenant_id=None):
now = utcnow()
resv_query = context.session.query(quota_models.Reservation)
if tenant_id:
tenant_expr = (quota_models.Reservation.tenant_id == tenant_id)
else:
tenant_expr = sql.true()
resv_query = resv_query.filter(sa.and_(
tenant_expr, quota_models.Reservation.expiration < now))
return resv_query.delete()
|
from django.shortcuts import render
from rest_framework import routers, serializers, viewsets
from django.http import HttpResponse
from eleicao.models import *
from eleicao.serializers import *
# Create your views here.
class EleicaoViewSet(viewsets.ModelViewSet):
queryset = Eleicao.objects.all()
serializer_class = EleicaoSerializer
class CandidatoViewSet(viewsets.ModelViewSet):
queryset = Candidato.objects.all()
serializer_class = CandidatoSerializer
class TokenViewSet(viewsets.ModelViewSet):
queryset = Token.objects.all()
serializer_class = TokenSerializer
class VagaViewSet(viewsets.ModelViewSet):
queryset = Vaga.objects.all()
serializer_class = VagaSerializer
class EleitorViewSet(viewsets.ModelViewSet):
queryset = Eleitor.objects.all()
serializer_class = EleitorSerializer
class VotacaoViewSet(viewsets.ModelViewSet):
queryset = Votacao.objects.all()
serializer_class = VotacaoSerializer
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['AttendanceTestCase::test_nonexisting_user_cannot_subscribe_to_event 1'] = {
'data': {
'attendEvent': None
},
'errors': [
{
'locations': [
{
'column': 13,
'line': 3
}
],
'message': 'AndelaUserProfile matching query does not exist.',
'path': [
'attendEvent'
]
}
]
}
snapshots['AttendanceTestCase::test_user_can_attend_an_event 1'] = {
'data': {
'attendEvent': None
},
'errors': [
{
'locations': [
{
'column': 13,
'line': 3
}
],
'message': "''",
'path': [
'attendEvent'
]
}
]
}
snapshots['AttendanceTestCase::test_user_can_change_event_status 1'] = {
'data': {
'attendEvent': None
},
'errors': [
{
'locations': [
{
'column': 13,
'line': 3
}
],
'message': "''",
'path': [
'attendEvent'
]
}
]
}
snapshots['AttendanceTestCase::test_user_cannot_subscribe_to_nonexisting_event 1'] = {
'data': {
'attendEvent': None
},
'errors': [
{
'locations': [
{
'column': 13,
'line': 3
}
],
'message': 'Event matching query does not exist.',
'path': [
'attendEvent'
]
}
]
}
|
# -*- coding: utf-8 -*-
# @Time : 2019-12-24
# @Author : mizxc
# @Email : xiangxianjiao@163.com
from mongoengine import *
class Plan(EmbeddedDocument):
title = StringField(max_length=1000, required=True)
level = StringField(max_length=100, required=True)
isDone = BooleanField(default=False)
class YearlyPlan(Document):
title = StringField(max_length=1000, required=True)
startTime = DateTimeField(required=True)
endTime = DateTimeField(required=True)
plans = ListField(EmbeddedDocumentField(Plan))
doneCount = IntField(default=0)
summarize = StringField(max_length=20000)
class MonthlyPlan(Document):
title = StringField(max_length=1000, required=True)
startTime = DateTimeField(required=True)
endTime = DateTimeField(required=True)
plans = ListField(EmbeddedDocumentField(Plan))
doneCount = IntField(default=0)
summarize = StringField(max_length=20000)
yearlyPlan = ReferenceField(YearlyPlan)
class WeeklyPlan(Document):
title = StringField(max_length=1000, required=True)
startTime = DateTimeField(required=True)
endTime = DateTimeField(required=True)
plans = ListField(EmbeddedDocumentField(Plan))
doneCount = IntField(default=0)
summarize = StringField(max_length=20000)
monthlyPlan = ReferenceField(MonthlyPlan)
class DailyPlan(Document):
title = StringField(max_length=1000, required=True)
whichDay = StringField(max_length=100, required=True)
plans = ListField(EmbeddedDocumentField(Plan))
doneCount = IntField(default=0)
summarize = StringField(max_length=20000)
weeklyPlan = ReferenceField(WeeklyPlan)
|
def traverse_list(self):
if self.start_node is None:
print("List has no element")
return
else:
n = self.start_node
while n is not None:
print(n.item, " ")
n = n.nref
|
# Generated by Django 3.0.6 on 2020-06-07 16:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('musicRun', '0008_auto_20200607_1729'),
]
operations = [
migrations.AlterField(
model_name='song',
name='duration',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='spotifyuser',
name='songs',
field=models.ManyToManyField(blank=True, to='musicRun.Song'),
),
]
|
import sys
from io import StringIO
class CaptureOutErr(object):
"""Context manager to capture the content of stdout and stderr.
Example:
>>> with CaptureOutErr() as cm:
>>> ...run_code()
>>> print(cm)
"""
def __enter__(self):
self.stdout = []
self.stderr = []
self._out = StringIO()
self._err = StringIO()
sys.stdout = self._out
sys.stderr = self._err
return self
def __exit__(self, *args):
self.stdout.extend(self._out.getvalue().splitlines())
self.stderr.extend(self._err.getvalue().splitlines())
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
|
from rest_framework import routers
from sneakers_colors_sizes_rel.api import SneakersColorsSizesRelViewSet, SneakersColorsSizesRelSearch
router = routers.DefaultRouter()
router.register('api/v0/sneak_col_siz_rel', SneakersColorsSizesRelViewSet, 'sneak_col_siz_rel')
urlpatterns = router.urls
|
import pymongo
import time
import datetime
from pymongo import MongoClient
from datetime import datetime
from bson.json_util import dumps
class UploadDB:
client = ""
db = ""
def __init__(self):
self.client = MongoClient()
self.db = self.client.AppInsight_DB
self.db.repairDatabase()
def insertData(self):
self.db.repairDatabase()
ts = datetime.now().strftime('%Y%m%d%H%M%S')
print(ts)
print(time.time())
media = "10.182.2.117"
record = {}
isodate1 = datetime.fromtimestamp(time.time(),None)
isodate2 = datetime.fromtimestamp(1466679309,None)
print("hi")
def getPerformanceData(self,mediaIP):
collection = self.db.AppInsight
response = []
data = collection.find({"media":mediaIP}).sort([("_id",-1)]).limit(1)
print('final ===%s'%data)
return dumps(data)
connect = UploadDB()
connect.insertData()
#connect.db.AppInsight_DB.drop()
|
# api.py
from flask import Flask, request, render_template
from preprocessing import preprocessing
from werkzeug.utils import secure_filename
from openpyxl import load_workbook
import pickle
import mysql.connector
import os
import json
import simplejson
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database = "skripsi"
)
app = Flask(__name__)
vectorizer = pickle.load(open("vectorizer.b", "rb"))
ex = pickle.load(open("ex.b", "rb"))
def parseSentiment(sentiment):
if sentiment == 1.0:
return "1"
else:
return "0"
# return "0"
# localhost:5000/
# @app.route("/")
# def homepage():
# html = "<h1>Hello</h1>"
# html += "Prediksi sentimen"
# return html
# localhost:5000/help
@app.route('/')
def index():
return render_template('index.html')
@app.route("/help")
def help():
html = "<h1>Help</h1>"
return html
@app.route("/predict")
def predict():
input_user = request.args.get("input", "")
preproces = preprocessing(str(input_user))
X = vectorizer.transform([preproces])
sentiment = ex.predict(X)
cursor = db.cursor()
# sql = "INSERT INTO tanggapan (pesan, sentimen) VALUES (%s, %s)"
# val = (input_user, parseSentiment(sentiment[0]))
# cursor.execute(sql, val)
db.commit()
html = "<h1>Predict</h1>"
html += "Input: " + input_user
html += "<br>"
html += "Hasil preprocessing: " + preproces
html += "<br>"
html += "Hasil prediksi: " + parseSentiment(sentiment[0])
print("{} data ditambahkan".format(cursor.rowcount))
return html
# contoh cara menangkap post request
# parameter input name 'test'
@app.route("/test", methods=["POST"])
def test():
return request.form["test"]
ALLOWED_EXTENSION = set(['xlsx'])
app.config['UPLOAD_FOLDER'] = 'uploads'
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSION
@app.route('/input', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
file = request.files['file']
if 'file' not in request.files:
# return render_template('input.html')
return 'file tidak dapat di simpan' + ' <a href="/input">kembali</a>'
if file.filename == '':
return render_template('input.html')
if file and allowed_file(file.filename):
# filename = secure_filename(file.filename)
filename = 'data'+'.xlsx'
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# return 'file ' + filename +' di simpan' + ' <a href="/uploads">kembali</a>'
return render_template('input.html')
@app.route('/proses')
def proses():
dat_file = "uploads/data.xlsx"
wb = load_workbook(dat_file)
sheet = wb.active
jum_baris = sheet.max_row
# jum_baris = 15
for i in range(2, jum_baris):
waktu = sheet.cell(row=i,column=1)
nim = sheet.cell(row=i,column=3)
pelatihan = sheet.cell(row=i,column=5)
pesan = sheet.cell(row=i,column=9)
preproces = preprocessing(str(pesan))
X = vectorizer.transform([preproces])
sentiment = ex.predict(X)
senti = parseSentiment(sentiment[0])
# print(i, "=", sentiment[0],"=",pesan.value," | ",senti)
# senti = None
# senti = parseSentiment(sentiment[0])
cursor = db.cursor()
sql = "INSERT INTO tanggapan (waktu, nim, pelatihan, pesan, sentimen) VALUES (%s, %s, %s, %s, %s)"
val = (waktu.value, nim.value, pelatihan.value, pesan.value, senti)
# preproces = None
# X = None
# sentiment = None
# senti = None
cursor.execute(sql, val)
db.commit()
# html = "<h1>Predict</h1>"
# html += "Input: " + pesan.value
# html += "<br>"
# html += "Hasil preprocessing: " + preproces
# html += "<br>"
# print("{} data ditambahkan".format(cursor.rowcount))
# return "0"
return render_template('rekap.html')
@app.route('/rekap')
def rekap():
cursor = db.cursor()
# sql = "SELECT waktu, pelatihan, sentimen FROM tanggapan"
# sql = "SELECT waktu, pelatihan, COUNT(sentimen) as sentimen FROM tanggapan WHERE sentimen=1 GROUP BY waktu"
sql = "SELECT pesan_id, waktu, pesan_asli, replace(replace(sentimen, '1', 'positif'), '0', 'negatif') FROM tanggapan GROUP BY waktu, sentimen"
# val = (pesan_id.value, waktu.value, nim.value, pelatihan.value, pesan.value, 0)
cursor.execute(sql)
rv = cursor.fetchall()
return render_template("rekap.html", value=rv)
# @app.route('proses')
# def proses():
# # Open the workbook and define the worksheet
# book = xlrd.open_workbook("files/data.xlsx")
# sheet = book.sheet_by_index(1)
# return html
@app.route('/grafik')
def grafik():
return render_template('grafik.php')
# @app.route('/graph')
# def graph():
# pcen = db.cursor()
# sql_pcen = "SELECT round((SELECT COUNT(*) FROM `tanggapan` WHERE sentimen = '1') / (SELECT COUNT(*) FROM `tanggapan`) * 100) as positif, round((SELECT COUNT(*) FROM `tanggapan` WHERE sentimen = '0') / (SELECT COUNT(*) FROM `tanggapan`) * 100) as negatif from tanggapan LIMIT 1"
# pcen.execute(sql_pcen)
# pce = pcen.fetchall()
# arr=[]
# for product in pce:
# vals = {}
# vals['Positif']=product[0]
# vals['Negatif']=product[1]
# arr.append(vals)
# jsongr = json.dumps(arr)
# return jsongr
if __name__ == "__main__":
app.run(debug=True, port=5000)
|
from gevent.pool import Pool
from gevent.queue import JoinableQueue as Queue
import logging as log
import time
class GeventPool(object):
"""
Caller should ensure that gevent monkey patch is run, if necessary.
"""
def __init__(self, size):
self._running = True
self._queue = Queue()
self._pool = Pool(size)
for i in xrange(size):
self._pool.spawn(self._run, i)
def _run(self, greenlet_id):
while True:
func, args, kwargs = self._queue.get()
if not self._running:
return
try:
func(*args, **kwargs)
except:
log.exception('gevent_pool_exception|greenlet_id=%s,func=%s,args=%s,kwargs=%s',
greenlet_id, func.__name__, str(args), str(kwargs))
finally:
self._queue.task_done()
def add_task(self, func, args=(), kwargs=None):
if not self._running:
raise RuntimeError('cannot add task to stopped pool')
if kwargs is None:
kwargs = {}
self._queue.put((func, args, kwargs))
def join(self):
"""
Blocks until all tasks in queue are processed
"""
self._queue.join()
def stop(self):
"""
Terminates all greenlets in pool
"""
self._running = False
for _ in self._pool:
self._queue.put((None, None, None))
self._pool.join()
def join_and_stop(self):
self.join()
self.stop()
es_pool = GeventPool(2)
if __name__ == '__main__':
def func1(a):
print(a)
start = time.time()
for i in range(10):
es_pool.add_task(func1, args=('task 1',))
#es_pool.join()
print("done")
|
import rips
import time
import grpc
import math
import os
from operator import itemgetter
from ecl.eclfile import EclFile
from ecl.grid import EclGrid
from ecl.summary import EclSum
import matplotlib.pyplot as plt
# Define the function to calculate energy changes in well bottomholes
def energywell():
# Connect to ResInsight
resinsight = rips.Instance.find()
case = resinsight.project.cases()[0]
num_tsteps = len(case.time_steps())
name = case.name
grids = case.grids()
for grid in grids:
dimension = grid.dimensions()
Nx = dimension.i
Ny = dimension.j
Nz = dimension.k
class well:
def __init__(self, name, idx, welltype):
self.name = name
self.idx = idx
self.type = welltype
# Read EGRID, RST and INIT files
summary_file = EclSum("%s.UNSMRY" % name)
egrid_file = EclFile("%s.EGRID" % name)
rst_file = EclFile("%s.UNRST" % name)
timestep_width = []
days = []
for tstep in range(num_tsteps):
if tstep==0:
width = rst_file.iget_restart_sim_days(tstep)
else:
width = rst_file.iget_restart_sim_days(tstep) - rst_file.iget_restart_sim_days(tstep-1)
timestep_width.append(width)
days.append(rst_file.iget_restart_sim_days(tstep))
# Read ACTNUM numbers from EGRID file
actnum = egrid_file["ACTNUM"][0]
active_cells = []
for i in range(len(actnum)):
if actnum[i] == 1:
active_cells.append(i)
# Convert summary file timesteps to restart file timesteps
summary_days = summary_file.days
idx_trim = []
for d in days[2:]:
add = summary_days.index(d)
idx_trim.append(add)
energy_balance = []
energy_external = []
energy_internal = []
energy_dissipated = []
for tstep in range(2,num_tsteps):
print("Timestep", tstep, "of", num_tsteps)
# List active wells in the timestep
zwel = rst_file.iget_named_kw("ZWEL",tstep)
nwells = int(len(zwel)/3)
well_list = []
for wel in range(nwells):
welname = rst_file["ZWEL"][tstep][wel*3]
welname = welname.rstrip()
niwelz = int(len(rst_file["IWEL"][tstep]) / nwells)
if rst_file["IWEL"][tstep][wel*niwelz + 6] == 1:
weltype = "PROD"
else:
weltype = "INJE"
ncwmax = int(len(rst_file["ICON"][tstep]) / (25*nwells))
welidx = []
for ncw in range(ncwmax):
numicon = 25*(wel*ncwmax + ncw)
weli = rst_file["ICON"][tstep][numicon+1]
welj = rst_file["ICON"][tstep][numicon+2]
welk = rst_file["ICON"][tstep][numicon+3]
if weli != 0:
welidx.append(int(welk-1)*(Nx*Ny) + int(welj-1)*(Nx) + int(weli-1))
well_list.append(well(welname,welidx,weltype))
# Read results into list
porv = case.active_cell_property('STATIC_NATIVE', 'PORV', 0)
pres = case.active_cell_property('DYNAMIC_NATIVE', 'PRESSURE', tstep)
bo = case.active_cell_property('DYNAMIC_NATIVE', 'BO', tstep)
bw = case.active_cell_property('DYNAMIC_NATIVE', 'BW', tstep)
bg = case.active_cell_property('DYNAMIC_NATIVE', 'BG', tstep)
# Fetch results from the reservoir energy calculation
edis = case.active_cell_property('GENERATED', 'Energy Dissipation', tstep)
eint = case.active_cell_property('GENERATED', 'Internal Energy Change', tstep)
# Calculate energy changes in well bottomholes
e_external = []
e_internal_well = []
ed_well = []
for wel in well_list:
idx = idx_trim[tstep-2]
pcel = case.active_cell_property('DYNAMIC_NATIVE', 'PRESSURE', tstep)
for wel_idx in wel.idx:
wel_idx_act = active_cells.index(wel_idx)
pwel = pcel[wel_idx_act]
bo_wel = bo[wel_idx_act]
bw_wel = bw[wel_idx_act]
bg_wel = bg[wel_idx_act]
bhp = summary_file["WBHP:%s" % wel.name][idx].value
if wel.type == "PROD":
wpr = summary_file["CWPR:%s:%i" % (wel.name,wel_idx+1)][idx].value
opr = summary_file["COPR:%s:%i" % (wel.name,wel_idx+1)][idx].value
gpr = summary_file["CGPR:%s:%i" % (wel.name,wel_idx+1)][idx].value
e_external.append(-bhp * (wpr*bw_wel+opr*bo_wel+gpr*bg_wel) * 1e5/86400)
e_internal_well.append(-pwel * (wpr*bw_wel+opr*bo_wel+gpr*bg_wel) * 1e5/86400)
ed_well.append((wpr*bw_wel+opr*bo_wel+gpr*bg_wel) * (pwel-bhp) * 1e5/86400)
else:
wir = summary_file["CWIR:%s:%i" % (wel.name,wel_idx+1)][idx].value
gir = summary_file["CGIR:%s:%i" % (wel.name,wel_idx+1)][idx].value
e_external.append(bhp * (wir*bw_wel+gir*bg_wel) * 1e5/86400)
e_internal_well.append(pwel * (wir*bw_wel+gir*bg_wel) * 1e5/86400)
ed_well.append((wir*bw_wel+gir*bg_wel) * (bhp-pwel) * 1e5/86400)
edis_res = sum(edis)
edis_well = sum(ed_well)
e_dissipated = edis_res + edis_well
e_internal = sum(eint) + sum(e_internal_well)
# Calculate each component in energy balance
e_balance = (sum(e_external) - (e_dissipated + e_internal))
energy_balance.append(e_balance)
energy_dissipated.append(e_dissipated)
energy_external.append(sum(e_external))
energy_internal.append(e_internal)
days = days[2:]
return(days,energy_balance,energy_external,energy_internal,energy_dissipated)
|
from pkg_resources import resource_filename
import tensorflow as tf
from keras.models import load_model
import numpy as np
import ctd_model
graph = tf.get_default_graph()
inference_model = None
labels = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
def dask_setup(service=None):
model_path = resource_filename(ctd_model.__name__, "model.h5")
global inference_model
inference_model = load_model(model_path)
print("Model loaded")
def predict(img):
#preprocess
img = img[np.newaxis, ...].astype('float32') / 255
with graph.as_default():
prediction = inference_model.predict(img)
idx = np.argmax(prediction)
return labels[int(idx)], float(prediction[0, idx])
|
from flask import Flask,flash,render_template,request,redirect,url_for
from flask_mail import Mail,Message
from decouple import config
from flask_mysqldb import MySQL
from itsdangerous import URLSafeTimedSerializer, SignatureExpired
import MySQLdb.cursors
import re
import os
from dotenv import load_dotenv
load_dotenv()
from random import randint
app = Flask(__name__)
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
mail = Mail(app)
app.config["MAIL_SERVER"]= config('MAIL_SERVER')
app.config["MAIL_PORT"] = 465
app.config["MAIL_USERNAME"] = '********************' //Put your own email id over here
app.config['MAIL_PASSWORD'] = '*****************' //Put your own email password
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
s = URLSafeTimedSerializer('Thisisasecret!')
app.config['MYSQL_HOST'] = config('MYSQL_HOST')
app.config['MYSQL_USER'] = config('MYSQL_USER')
app.config['MYSQL_PASSWORD'] = config('MYSQL_PASSWORD')
app.config['MYSQL_DB'] = config('MYSQL_DB')
app.config['MYSQL_CURSORCLASS'] = config('MYSQL_CURSORCLASS')
mysql = MySQL(app)
otp = randint(00000000,99999999)
@app.route('/')
#def index():
#return render_template("homepage.html")
@app.route('/verify/<email>',methods =['GET', 'POST'])
def verify(email):
#email = request.form["email"]
token = s.dumps(email, salt='email-confirm')
msg = Message('Hi welcome to justhire.com, Complete your sign up verification using this verification link below-',sender = 'ashwinikumarnit19@gmail.com', recipients = [email])
link = url_for('validate', token=token, _external=True)
msg.body = 'Your link is {}'.format(link)
mail.send(msg)
return '<h1>The email you entered is {}. The token is {}</h1>'.format(email, token)
@app.route('/validate/<token>')
def validate(token):
try:
email = s.loads(token, salt='email-confirm', max_age=3600)
except SignatureExpired:
return '<h1>The token is expired!</h1>'
return redirect(os.getenv('PORT_ONE'), code=302)
if __name__ == '__main__':
app.run(host="localhost", port=5001, debug=True)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
import os
import ujson
import uuid
import aiofiles
from sanic import Blueprint
from sanic import response
from sanic.log import logger
from sanic.request import Request
from sanic_jwt import inject_user, scoped, protected
from web_backend.nvlserver.helper.request_wrapper import populate_response_format
from web_backend.nvlserver.helper.process_request_args import proc_arg_to_int
from .service import (
get_support_list, get_support_list_count, create_support_element,
get_support_element, update_support_element, delete_support_element
)
api_support_blueprint = Blueprint('api_support', url_prefix='/api/support')
@api_support_blueprint.route('/', methods=['GET'])
@inject_user()
# @scoped(['user', 'billing', 'admin'], require_all=True, require_all_actions=True)
async def api_support_get(request: Request, user):
"""
:param request:
:param user:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
size = proc_arg_to_int(request.args.get('size', '1'), 1)
page = proc_arg_to_int(request.args.get('page', '1'), 1)
email = request.args.get('email', None)
offset = (page - 1) * size
if request.method == 'GET':
try:
if user:
if user.get('user_id', None):
support_list = await get_support_list(request, email=email, limit=size, offset=offset)
support_count = await get_support_list_count(request, email=email)
if support_list:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
res_data_formatted = await populate_response_format(
support_list, support_count, size=size, page=page)
ret_val['data'] = res_data_formatted
status = 200
else:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = []
status = 200
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_support_get -> GET erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_support_blueprint.route('/', methods=['POST'])
@inject_user()
# @scoped(['admin'], require_all=True, require_all_actions=True)
async def api_support_post(request: Request, user):
"""
:param request:
-- :param user:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
support_data_dir = request.app.support_store_dir
if request.method == 'POST':
try:
# if user:
if True:
# if user.get('user_id'):
if True:
# if user.get('user_id') and user.get('is_superuser'):
if True:
email = request.json.get('email', None)
user_id = request.json.get('user_id', None)
subject = request.json.get('subject', None)
message = request.json.get('message', False)
active = request.json.get('active', False)
file = request.files.get('file')
if file:
file_data = file.body
supp_file_name = request.json.get('file_name', '')
file_uuid = str(uuid.uuid4())
else:
supp_file_name = ''
file_data = None
file_uuid = None
if True:
if file_data:
async with aiofiles.open(
os.path.join(support_data_dir, file_uuid), mode='wb+') as f:
await f.write(file_data)
support = await create_support_element(
request, email=email, user_id=user_id, subject=subject,
file_uuid=file_uuid, file_name=supp_file_name, message=message, active=active)
if support:
ret_val['data'] = support
ret_val['success'] = True
status = 201
ret_val['message'] = 'server.object_created'
else:
status = 412
ret_val['message'] = 'server.query_condition_failed'
else:
status = 403
ret_val['message'] = 'server.forbidden'
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_support_post -> POST erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_support_blueprint.route('/<support_id:int>', methods=['GET'])
@inject_user()
# @scoped(['user', 'billing', 'admin'], require_all=True, require_all_actions=True)
async def api_support_element_get(request: Request, user, support_id: int = 0):
"""
:param request:
:param user:
:param support_id:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
if request.method == 'GET':
try:
# if user:
if True:
# if user.get('user_id', None) and support_id:
if True:
support_element = await get_support_element(request, support_id)
if support_element:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = support_element
status = 200
else:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
status = 200
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_support_element_get -> GET erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_support_blueprint.route('/<support_id:int>', methods=['PUT'])
@inject_user()
# @scoped(['admin'], require_all=True, require_all_actions=True)
async def api_support_element_put(request: Request, user, support_id: int = 0):
"""
:param request:
:param user:
:param support_id:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
support_data_dir = request.app.support_store_dir
if request.method == 'PUT':
try:
# if user:
if True:
# if user.get('user_id'):
if True:
# TODO: IMPLEMENT USER ACCESS if user.get('is_superuser'):
if True and support_id:
email = request.json.get('email', None)
user_id = request.json.get('user_id', None)
subject = request.json.get('subject', None)
message = request.json.get('message', False)
active = request.json.get('active', False)
file = request.files.get('file')
if file:
file_data = file.body
supp_file_name = request.json.get('file_name', '')
file_uuid = str(uuid.uuid4())
else:
supp_file_name = ''
file_data = None
file_uuid = None
if True:
if file_data:
async with aiofiles.open(
os.path.join(support_data_dir, file_uuid), mode='wb+') as f:
await f.write(file_data)
support_element = await update_support_element(
request, support_id=support_id, email=email, user_id=user_id, subject=subject,
file_uuid=file_uuid, file_name=supp_file_name, message=message, active=active)
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = support_element
status = 202
ret_val['message'] = 'server.accepted'
else:
status = 412
ret_val['message'] = 'server.query_condition_failed'
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_support_element_put -> PUT erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_support_blueprint.route('/<support_id:int>', methods=['DELETE'])
@inject_user()
# @scoped(['admin'], require_all=True, require_all_actions=True)
async def api_support_element_delete(request: Request, user, support_id: int = 0):
"""
:param request:
:param user:
:param support_id:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
if request.method == 'DELETE':
try:
# if user:
if True:
# if user.get('user_id'):
if True:
# TODO: IMPLEMENT USER ACCESS if user.get('is_superuser'):
if True and support_id:
support = await delete_support_element(request, support_id)
if support:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = None
status = 202
ret_val['message'] = 'server.accepted'
else:
status = 412
ret_val['message'] = 'server.query_condition_failed'
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_support_element_delete -> DELETE erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
|
import binascii
from enum import Enum
import time
import struct
import os, shutil
"""
FPFF file type enum
"""
class FileType(Enum):
ASCII = 1
UTF8 = 2
WORDS = 3
DWORDS = 4
DOUBLES = 5
COORD = 6
REF = 7
PNG = 8
GIF87 = 9
GIF89 = 10
"""
Contains FPFF functions
"""
class FPFF():
@staticmethod
def reverse_bytearray(s):
rev = bytearray()
for i in range(len(s)-1, -1, -1):
rev.append(s[i])
return rev
@staticmethod
def remove_padding(data):
while data[0] == 0:
data.pop(0)
return data
@staticmethod
def add_padding(data, l):
if len(data) > l:
raise OverflowError("Data too large to be padded!")
while len(data) < l:
data.insert(0, 0)
return data
def __init__(self, file=None, author=None):
self.version = 1
self.timestamp = None
self.author = None
self.sect_num = 0
self.stypes = list()
self.svalues = list()
if file != None:
self.read(file)
"""
Read in FPFF
"""
def read(self, file):
with open(file, "rb") as f:
data = bytearray(f.read())
magic = FPFF.reverse_bytearray(data[0:4])
self.version = int.from_bytes(data[4:8], "little")
self.timestamp = int.from_bytes(data[8:12], "little")
self.author = FPFF.remove_padding(FPFF.reverse_bytearray(data[12:20])).decode('ascii')
self.sect_num = int.from_bytes(data[20:24], "little")
self.stypes = list()
self.svalues = list()
# checks
if magic != b'\xbe\xfe\xda\xde':
raise ValueError("Not a valid FPFF stream.")
if self.version != 1:
raise ValueError("Unsupported version. Only version 1 is supported.")
if self.sect_num <= 0:
raise ValueError("Section length must be greater than 0.")
# read sections
count = 24
for i in range(self.sect_num):
stype = int.from_bytes(data[count:count+4], "little")
slen = int.from_bytes(data[count+4:count+8], "little")
count += 8
svalue = data[count:count+slen]
# ascii
if stype == 1:
self.stypes.append(FileType.ASCII)
self.svalues.append(svalue.decode('ascii'))
# utf-8
elif stype == 2:
self.stypes.append(FileType.UTF8)
self.svalues.append(svalue.decode('utf8'))
# words
elif stype == 3:
self.stypes.append(FileType.WORDS)
self.svalues.append([bytes(svalue[j:j+4]) for j in range(0, slen, 4)])
# dwords
elif stype == 4:
self.stypes.append(FileType.DWORDS)
self.svalues.append([bytes(svalue[j:j+8]) for j in range(0, slen, 8)])
# doubles
elif stype == 5:
self.stypes.append(FileType.DOUBLES)
self.svalues.append([int.from_bytes(svalue[j:j+8], "big") for j in range(0, slen, 8)])
# coord
elif stype == 6:
self.stypes.append(FileType.COORD)
self.svalues.append( (int.from_bytes(svalue[0:8],"big"), int.from_bytes(svalue[8:16],"big")) )
# ref
elif stype == 7:
self.stypes.append(FileType.REF)
self.svalues.append(int.from_bytes(svalue[0:4], "big"))
# png
elif stype == 8:
self.stypes.append(FileType.PNG)
sig = b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A'
out = sig + svalue[0:slen]
self.svalues.append(out)
#gif87a
elif stype == 9:
self.stypes.append(FileType.GIF87)
sig = b'\x47\x49\x46\x38\x37\x61'
out = sig + svalue[0:slen]
self.svalues.append(out)
#gif89a
elif stype == 10:
self.stypes.append(FileType.GIF89)
sig = b'\x47\x49\x46\x38\x39\x61'
out = sig + svalue[0:slen]
self.svalues.append(out)
else:
raise ValueError("Stream contained an unsupported type.")
count += slen
# validate
self.validate_fpff()
"""
Checks if imported FPFF is valid
"""
def validate_fpff(self):
for i in range(self.sect_num):
if self.stypes[i] == FileType.WORDS:
for w in self.svalues[i]:
if len(w) != 4:
raise ValueError("FPFF is not valid. Improper word length.")
elif self.stypes[i] == FileType.DWORDS:
for w in self.svalues[i]:
if len(w) != 8:
raise ValueError("FPFF is not valid. Improper dword length.")
elif self.stypes[i] == FileType.REF:
if self.svalues[i] > self.sect_num:
raise ValueError("FPFF is not valid. Reference out of bounds.")
"""
Write to FPFF file
"""
def write(self, file):
# convert to bytes
w_magic = bytearray(b'\xDE\xDA\xFE\xBE')
w_version = FPFF.reverse_bytearray(FPFF.add_padding(struct.pack(">I", self.version), 4))
w_timestamp = FPFF.reverse_bytearray(FPFF.add_padding(struct.pack(">I", int(time.time())), 4))
w_author = FPFF.reverse_bytearray(FPFF.add_padding(bytearray(self.author, 'ascii'), 8))
w_sect_num = FPFF.reverse_bytearray(FPFF.add_padding(struct.pack(">I", self.sect_num), 4))
w_sections = list()
for i in range(self.sect_num):
w_svalue = None
if self.stypes[i] == FileType.ASCII:
w_svalue = bytearray(self.svalues[i], 'ascii')
elif self.stypes[i] == FileType.UTF8:
w_svalue = bytearray(self.svalues[i], 'utf8')
elif self.stypes[i] == FileType.WORDS:
w_svalue = b''.join(self.svalues[i])
elif self.stypes[i] == FileType.DWORDS:
w_svalue = b''.join(self.svalues[i])
elif self.stypes[i] == FileType.DOUBLES:
w_svalue = bytearray()
for b in self.svalues[i]:
w_svalue.extend(FPFF.add_padding(bytearray.fromhex(hex(b)[2:]), 8))
elif self.stypes[i] == FileType.COORD:
w_svalue = bytearray()
w_svalue.extend(FPFF.add_padding(bytearray.fromhex(hex(self.svalues[i][0])[2:]), 8))
w_svalue.extend(FPFF.add_padding(bytearray.fromhex(hex(self.svalues[i][1])[2:]), 8))
elif self.stypes[i] == FileType.REF:
w_svalue = FPFF.add_padding(bytearray.fromhex(hex(self.svalues[i])[2:]), 4)
elif self.stypes[i] == FileType.PNG:
w_svalue = bytearray(self.svalues[i])
del w_svalue[:8]
elif self.stypes[i] == FileType.GIF87:
w_svalue = bytearray(self.svalues[i])
del w_svalue[:6]
elif self.stypes[i] == FileType.GIF89:
w_svalue = bytearray(self.svalues[i])
del w_svalue[:6]
w_slen = len(w_svalue)
w_section = bytearray()
w_section.extend(FPFF.reverse_bytearray(FPFF.add_padding(struct.pack(">I", int(self.stypes[i].value)), 4)))
w_section.extend(FPFF.reverse_bytearray(FPFF.add_padding(struct.pack(">I", w_slen), 4)))
w_section.extend(w_svalue)
w_sections.extend(w_section)
# construct and write
out_data = bytearray()
out_data.extend(w_magic)
out_data.extend(w_version)
out_data.extend(w_timestamp)
out_data.extend(w_author)
out_data.extend(w_sect_num)
out_data.extend(w_sections)
with open(file, 'wb') as f:
f.write(bytes(out_data))
f.close()
"""
Export FPFF data to folder
"""
def export(self, path):
# create path
dirpath = "/".join(path.split('/')[:-1])
dirname = path.split('/')[-1]
if dirpath != "":
dirpath += "/"
if os.path.exists(dirpath+dirname+"-data"):
shutil.rmtree(dirpath+dirname+"-data")
os.makedirs(dirpath+dirname+"-data")
# export files
for i in range(self.sect_num):
out_name = dirpath+dirname+"-data/"+dirname+"-"+str(i+1)
w_svalue = None
if self.stypes[i] in [FileType.ASCII, FileType.UTF8, FileType.WORDS, FileType.DWORDS, FileType.DOUBLES, FileType.COORD, FileType.REF]:
if self.stypes[i] == FileType.ASCII:
w_svalue = self.svalues[i]
elif self.stypes[i] == FileType.UTF8:
w_svalue = self.svalues[i]
elif self.stypes[i] == FileType.WORDS:
w_svalue = " ".join([ val.hex() for val in self.svalues[i]])
elif self.stypes[i] == FileType.DWORDS:
w_svalue = " ".join([ val.hex() for val in self.svalues[i]])
elif self.stypes[i] == FileType.DOUBLES:
w_svalue = " ".join([ str(val) for val in self.svalues[i]])
elif self.stypes[i] == FileType.COORD:
w_svalue = "LAT: " + str(self.svalues[i][0]) + "\nLON: " + str(self.svalues[i][1])
elif self.stypes[i] == FileType.REF:
w_svalue = "REF: " + str(self.svalues[i])
with open(out_name+".txt", 'w') as f:
f.write(w_svalue)
f.close()
else:
if self.stypes[i] == FileType.PNG:
with open(out_name+".png", 'wb') as f:
f.write(self.svalues[i])
f.close()
elif self.stypes[i] == FileType.GIF87:
with open(out_name+".gif", 'wb') as f:
f.write(self.svalues[i])
f.close()
elif self.stypes[i] == FileType.GIF89:
with open(out_name+".gif", 'wb') as f:
f.write(self.svalues[i])
f.close()
"""
Add data
"""
def add(self, obj_data, obj_type, i=0):
# rudimentry type check
if obj_type == FileType.ASCII and type(obj_data) == str:
self.svalues.insert(i, obj_data)
elif obj_type == FileType.UTF8 and type(obj_data) == str:
self.svalues.insert(i, obj_data)
elif obj_type == FileType.WORDS and type(obj_data) == list:
self.svalues.insert(i, obj_data)
elif obj_type == FileType.DWORDS and type(obj_data) == list:
self.svalues.insert(i, obj_data)
elif obj_type == FileType.DOUBLES and type(obj_data) == list:
self.svalues.insert(i, obj_data)
elif obj_type == FileType.COORD and type(obj_data) == tuple:
self.svalues.insert(i, obj_data)
elif obj_type == FileType.REF and type(obj_data) == int:
self.svalues.insert(i, obj_data)
elif obj_type == FileType.PNG and type(obj_data) == bytearray:
self.svalues.insert(i, obj_data)
elif obj_type == FileType.GIF87 and type(obj_data) == bytearray:
self.svalues.insert(i, obj_data)
elif obj_type == FileType.GIF89 and type(obj_data) == bytearray:
self.svalues.insert(i, obj_data)
else:
raise TypeError("Object data not valid for object type.")
self.stypes.insert(i, obj_type)
self.sect_num += 1
"""
Remove data
"""
def remove(self, i):
del self.svalues[i]
del self.stypes[i]
self.sect_num -= 1
def __repr__(self):
return str(self.stypes)
|
# Determine if a quadratic equation has no, equal or distinct roots
import math
# Get coefficients of x^2, x and constant
a = int(input("a: "))
b = int(input("b: "))
c = int(input("c: "))
if (b*b - 4*a*c < 0):
print("No real roots")
elif (b*b - 4*a*c == 0): # equal roots
print(-b/(2*a))
else: # (b*b - 4*a*c > 0) i.e. distinct roots
print((-b+math.sqrt(b*b-4*a*c))/(2*a), (-b-math.sqrt(b*b-4*a*c))/(2*a))
|
import os
import torch
import torch.utils.data as data
import torchvision
from torchvision import transforms
def get_dataset(dataset_name, if_download, batch_size, num_workers):
# getting data for training; just CIFAR10(?)
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
transform_test = transforms.Compose([
transforms.ToTensor()
])
# specify the directory name for saving
dir_name = os.path.join('data', dataset_name)
if dataset_name == 'cifar10':
print('cifar10')
trainset = torchvision.datasets.CIFAR10(root=dir_name, train=True, download=True, transform=transform_train)
trainloader = data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
testset = torchvision.datasets.CIFAR10(root=dir_name, train=False, download=True, transform=transform_test)
testloader = data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
if dataset_name == 'mnist':
print('mnist')
trainset = torchvision.datasets.MNIST(root=dir_name, train=True, download=True, transform=transform_train)
trainloader = data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
testset = torchvision.datasets.MNIST(root=dir_name, train=False, download=True, transform=transform_test)
testloader = data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
if dataset_name == 'chest_xray':
print('getting chest xray dataset')
return trainset, trainloader, testset, testloader
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
user_data_by_id_query = '''
SELECT usr.id AS user_id,
usr.email AS email,
usr.password AS password,
usr.fullname AS fullname,
usr.locked AS locked,
usr.active AS active,
usr.address AS address,
usr.city AS city,
usr.companyname AS companyname,
usr.country AS country,
usr.gendar AS gendar,
usr.postalcode AS postalcode,
usr.webpage AS webpage,
usr.vatid AS vatid,
usr.distance_unit AS distance_unit,
(usr.meta_information->'timezone_id'#>>'{}')::BIGINT AS timezone_id,
usr.meta_information->'timezone_name'#>>'{}' AS timezone_name,
(usr.meta_information->'map_pool_time'#>>'{}')::BIGINT AS map_pool_time,
lng.id AS language_id,
lng.name AS language_name,
lng.short_code AS language_short_code,
usr.account_type_id AS account_type_id,
coalesce(act.name, '') AS account_type_name
FROM public.user AS usr
LEFT OUTER JOIN public.language AS lng ON lng.id = usr.language_id
LEFT OUTER JOIN public.account_type as act on act.id = usr.account_type_id
WHERE usr.id = $1::BIGINT
AND usr.deleted is FALSE
AND usr.active is TRUE
AND usr.locked is FALSE
'''
user_login_by_email_query = '''
SELECT usr.id AS user_id,
usr.email AS email,
usr.password AS password,
usr.fullname AS fullname,
usr.locked AS locked,
usr.active AS active,
usr.distance_unit AS distance_unit,
(usr.meta_information->'timezone_id'#>>'{}')::BIGINT AS timezone_id,
usr.meta_information->'timezone_name'#>>'{}' AS timezone_name,
(usr.meta_information->'map_pool_time'#>>'{}')::BIGINT AS map_pool_time,
lng.id AS language_id,
lng.name AS language_name,
lng.short_code AS language_short_code,
usr.account_type_id AS account_type_id,
coalesce(act.name, '') AS account_type_name
FROM public.user AS usr
LEFT OUTER JOIN public.language AS lng ON lng.id = usr.language_id
LEFT OUTER JOIN public.account_type as act on act.id = usr.account_type_id
WHERE usr.email = $1::VARCHAR
AND usr.deleted is FALSE
AND usr.active is TRUE
AND usr.locked is FALSE
LIMIT 1;
'''
get_user_list_query = '''
SELECT usr.id AS id,
usr.email AS email,
usr.password AS password,
usr.fullname AS fullname,
usr.locked AS locked,
usr.active AS active,
usr.address AS address,
usr.city AS city,
usr.companyname AS companyname,
usr.country AS country,
usr.gendar AS gendar,
usr.postalcode AS postalcode,
usr.webpage AS webpage,
usr.vatid AS vatid,
usr.distance_unit AS distance_unit,
(usr.meta_information->'timezone_id'#>>'{}')::BIGINT AS timezone_id,
usr.meta_information->'timezone_name'#>>'{}' AS timezone_name,
(usr.meta_information->'map_pool_time'#>>'{}')::BIGINT AS map_pool_time,
lng.id AS language_id,
lng.name AS language_name,
lng.short_code AS language_short_code,
usr.account_type_id AS account_type_id,
coalesce(act.name, '') AS account_type_name
FROM public.user AS usr
LEFT OUTER JOIN public.language AS lng ON lng.id = usr.language_id
LEFT OUTER JOIN public.account_type as act on act.id = usr.account_type_id
WHERE usr.deleted is FALSE
AND (
$1::VARCHAR is NULL OR
usr.email ILIKE $1::VARCHAR || '%' OR
usr.email ILIKE '%' || $1::VARCHAR || '%' OR
usr.email ILIKE $1::VARCHAR || '%')
AND (
$2::VARCHAR is NULL OR
usr.fullname ILIKE $2::VARCHAR || '%' OR
usr.fullname ILIKE '%' || $2::VARCHAR || '%' OR
usr.fullname ILIKE $2::VARCHAR || '%')
'''
get_user_list_by_fullname_query = '''
SELECT usr.id AS id,
usr.fullname AS fullname
FROM public.user AS usr
WHERE usr.deleted is FALSE
AND ($1::VARCHAR is NULL OR usr.fullname ILIKE $1::VARCHAR || '%')
'''
get_user_list_count_query = '''
SELECT count(*) AS city_count
FROM public."user" AS usr
WHERE usr.deleted is FALSE
AND (
$1::VARCHAR is NULL OR
usr.email ILIKE $1::VARCHAR || '%' OR
usr.email ILIKE '%' || $1::VARCHAR || '%' OR
usr.email ILIKE $1::VARCHAR || '%')
AND (
$2::VARCHAR is NULL OR
usr.fullname ILIKE $2::VARCHAR || '%' OR
usr.fullname ILIKE '%' || $2::VARCHAR || '%' OR
usr.fullname ILIKE $2::VARCHAR || '%')
'''
get_user_element_query = '''
SELECT usr.id AS id,
usr.email AS email,
usr.fullname AS fullname,
usr.password AS password,
usr.fullname AS fullname,
usr.locked AS locked,
usr.active AS active,
usr.address AS address,
usr.city AS city,
usr.companyname AS companyname,
usr.country AS country,
usr.gendar AS gendar,
usr.postalcode AS postalcode,
usr.webpage AS webpage,
usr.vatid AS vatid,
usr.distance_unit AS distance_unit,
(usr.meta_information->'timezone_id'#>>'{}')::BIGINT AS timezone_id,
usr.meta_information->'timezone_name'#>>'{}' AS timezone_name,
(usr.meta_information->'map_pool_time'#>>'{}')::BIGINT AS map_pool_time,
lng.id AS language_id,
lng.name AS language_name,
lng.short_code AS language_short_code,
usr.account_type_id AS account_type_id,
coalesce(act.name, '') AS account_type_name
FROM public.user AS usr
LEFT OUTER JOIN public.language AS lng ON lng.id = usr.language_id
LEFT OUTER JOIN public.account_type as act on act.id = usr.account_type_id
WHERE usr.deleted is FALSE
AND usr.id = $1;
'''
|
# -*- coding: utf-8 -*-
import urllib.parse
import scrapy
class PubMedSpider(scrapy.Spider):
name = 'pubmed'
def __init__(self, term='', *args, **kwargs):
super(PubMedSpider, self).__init__(*args, **kwargs)
self.term = term
def start_requests(self):
url = 'https://pubmed.ncbi.nlm.nih.gov/'
params = urllib.parse.quote_plus(self.term)
yield scrapy.Request(url=f'{url}/?term={params}', callback=self.parse_list)
def parse_list(self, response):
for next_page in response.xpath(
'//div[@class="docsum-content"]/a[@class="docsum-title"]/@href').getall():
if not next_page:
continue
yield response.follow(next_page, callback=self.parse)
def parse(self, response):
title = response.xpath('//h1[@class="heading-title"]/text()').get().strip()
abstract = ' '.join(
[s.strip() for s in response.xpath('//div[@id="enc-abstract"]//text()').getall()
if s.strip()]
)
abstract = ' '.join(abstract.split(' ')).strip()
authors = '; '.join(
[s.strip() for s
in response.xpath('//div[@class="authors-list"]/span/a/text()').getall()
if s.strip()])
orig_url = response.url
link = response.xpath('//a[@class="id-link"][@data-ga-action="DOI"]/@href').get()
yield {
'title': title,
'abstract': abstract,
'url': orig_url,
'authors': authors,
'link': link,
}
|
from router_solver import *
import game_engine.constants
from game_engine.constants import *
from game_engine.spritesheet import *
class Character(pygame.sprite.Sprite):
def __init__(self, x, y, width, height, speed):
super().__init__()
self.x = x
self.y = y
self.width = width
self.height = height
self.__speed = speed
self.board_x = None
self.board_y = None
self.hat = 0
# Guarda la posición de los sprites de los personajes
self.walking_frames = {
0: {
"R": [],
"L": [],
},
1: {
"R": [],
"L": [],
},
2: {
"R": [],
"L": [],
},
3: {
"R": [],
"L": [],
},
4: {
"R": [],
"L": [],
},
}
self.moving = False
self.sprite_direction = "R"
self.jump_image = 0
self.image = None
self.rect = None
# Construye la animación y los distintos sombreros del personaje
def construct_animation(self):
x = 1
y = 1
w = Constants.FROG_SPRITE_WIDTH // Constants.FROG_SPRITE_NUMBER
h = Constants.FROG_SPRITE_HEIGHT // Constants.FROG_HATS_NUMBER
sprite_sheet_l = SpriteSheet(Constants.FROG_IMAGE_L)
sprite_sheet_r = SpriteSheet(Constants.FROG_IMAGE_R)
# Get all images from spritesheet
for _ in range(14):
image_l = sprite_sheet_l.get_image(x, y, w, h)
image_r = sprite_sheet_r.get_image(x, y, w, h)
self.walking_frames[0]["L"].append(image_l)
self.walking_frames[0]["R"].append(image_r)
y += h
image_l = sprite_sheet_l.get_image(x, y, w, h)
image_r = sprite_sheet_r.get_image(x, y, w, h)
self.walking_frames[1]["L"].append(image_l)
self.walking_frames[1]["R"].append(image_r)
y += h
image_l = sprite_sheet_l.get_image(x, y, w, h)
image_r = sprite_sheet_r.get_image(x, y, w, h)
self.walking_frames[2]["L"].append(image_l)
self.walking_frames[2]["R"].append(image_r)
y += h
image_l = sprite_sheet_l.get_image(x, y, w, h)
image_r = sprite_sheet_r.get_image(x, y, w, h)
self.walking_frames[3]["L"].append(image_l)
self.walking_frames[3]["R"].append(image_r)
y += h
image_l = sprite_sheet_l.get_image(x, y, w, h)
image_r = sprite_sheet_r.get_image(x, y, w, h)
self.walking_frames[4]["L"].append(image_l)
self.walking_frames[4]["R"].append(image_r)
x += w
y = 1
self.image = self.walking_frames[self.hat][self.sprite_direction][0]
self.rect = self.image.get_rect()
################## SPRITE METHODS ##################
def update(self):
if self.moving:
self.image = self.walking_frames[self.hat][self.sprite_direction][
self.jump_image
]
if self.jump_image < 13:
self.jump_image += 1
else:
self.jump_image = 0
self.moving = False
################## GAMEPLAY METHODS ##################
# Verifica que una posición esté disponible en el tablero
def available_position(self, board, x, y):
fixed_x = x // Constants.FROG_WIDTH
fixed_y = y // Constants.FROG_HEIGHT
position = board[fixed_y][fixed_x]
if position == None:
return True
elif type(position) == Character:
return False
elif position.type == "Fly":
position.eaten = True
return True
return False
# Regresa la dirección del tablero
def fix_return_board_position(self):
x = self.x // Constants.FROG_WIDTH
y = self.y // Constants.FROG_HEIGHT
return [x, y]
################## ACTION FUNCTIONS ##################
def move_down(self, times, board):
times = int(times)
movement = self.height * times
new_y = self.y + movement
is_available = self.available_position(board, self.x, new_y)
if self.y + self.height + movement <= Constants.DISPLAY_HEIGHT and is_available:
self.y += movement
self.rect.y = self.y
self.moving = True
return self.fix_return_board_position()
def move_up(self, times, board):
times = int(times)
movement = self.height * times
new_y = self.y - movement
is_available = self.available_position(board, self.x, new_y)
if self.y - movement >= 0 and is_available:
self.y -= movement
self.rect.y = self.y
self.moving = True
return self.fix_return_board_position()
def move_right(self, times, board):
times = int(times)
movement = self.width * times
new_x = self.x + movement
is_available = self.available_position(board, new_x, self.y)
if self.x + self.width + movement <= Constants.DISPLAY_WIDTH and is_available:
self.sprite_direction = "R"
self.x += movement
self.rect.x = self.x
self.moving = True
return self.fix_return_board_position()
def move_left(self, times, board):
times = int(times)
movement = self.width * times
new_x = self.x - movement
is_available = self.available_position(board, new_x, self.y)
if self.x - self.width - movement >= 0 and is_available:
self.sprite_direction = "L"
self.x -= movement
self.rect.x = self.x
self.moving = True
return self.fix_return_board_position()
# Cambia el sombrero del personaje
def change_hat(self, hat_id):
self.hat = int(hat_id)
|
#list
squares = [1, 4, 9, 16, 25]
print (squares)
print (squares[0])
print (squares[-1])
print (squares[-3:])
print ('______________________________________')
print (squares + [36,49,64,81, 100])
print ('______________________________________')
cubes = [1,8,27,65,125]
print (cubes)
cubes [3] = 64
print (cubes)
cubes.append(216)
cubes.append(7 ** 3)
print (cubes)
print ('______________________________________')
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
print (letters)
letters[2:5] = ['C', 'D', 'E']
print (letters)
letters[2:5] = []
print (letters)
letters[:] = []
print (letters)
print ('______________________________________')
letters = ['a', 'b', 'c', 'd']
print (len (letters))
print ('______________________________________')
a = ['a', 'b', 'c']
n = [1,2,3]
x = [a,n]
print (x)
print (x[0])
print (x[0][1])
print ('______________________________________')
a, b = 0, 1
while a < 10:
print (a)
a, b = b, a+b
print (' ')
i = 256*256
print ('The value of i is', i)
print ('______________________________________')
a, b = 0, 1
while a < 1000:
print (a, end=',')
a,b = b, a+b
print (' ')
|
#!/usr/bin/python
import os
import pygame
from glm import ivec2, vec2
from game.base.inputs import Inputs
from game.base.signal import Signal
from game.constants import SPRITES_DIR, DEBUG
from game.base.stats import Stats
from game.states.game import Game
from game.states.intro import Intro
from game.states.menu import Menu
from game.states.credits import Credits
from game.states.intermission import Intermission
import time
class App:
STATES = {
"intro": Intro,
"game": Game,
"menu": Menu,
"intermission": Intermission,
"credits": Credits,
}
# MAX_KEYS = 512
def __init__(self, initial_state):
"""
The main beginning of our application.
Initializes pygame and the initial state.
"""
# pygame.mixer.pre_init(44100, 16, 2, 4096)
pygame.init()
self.size = ivec2(1920, 1080) / 2
"""Display size"""
self.cache = {}
"""Resources with filenames as keys"""
pygame.display.set_caption("Butterfly Destroyers")
self.screen = pygame.display.set_mode(self.size)
self.on_event = Signal()
self.quit = False
self.clock = pygame.time.Clock()
self.inputs = Inputs()
self.time = 0
self.dirty = True
self.data = {} # data persisting between modes
# self.keys = [False] * self.MAX_KEYS
self._state = None
self.last_state = None
self.next_state = initial_state
self.process_state_change()
def load(self, filename, resource_func):
"""
Attempt to load a resource from the cache, otherwise, loads it
:param resource_func: a function that loads the resource if its
not already available in the cache
"""
if filename not in self.cache:
r = self.cache[filename] = resource_func()
return r
return self.cache[filename]
def load_img(self, filename, scale=1, flipped=False):
"""
Load the image at the given path in a pygame surface.
The file name is the name of the file without the full path.
Files are looked for in the SPRITES_DIR
Results are cached.
Scale is an optional integer to scale the image by a given factor.
"""
def load_fn():
img = pygame.image.load(os.path.join(SPRITES_DIR, filename))
if scale != 1:
w, h = img.get_size()
img = pygame.transform.scale(img, ivec2(vec2(w, h) * scale))
if flipped:
img = pygame.transform.flip(img, True, False)
return img
return self.load((filename, scale, flipped), load_fn)
# def pend(self):
# self.dirty = True
def run(self):
"""
Main game loop.
Runs until the `quit` flag is set
Runs update(dt) and render() of the current game state (default: Game)
"""
last_t = time.time_ns()
accum = 0
self.fps = 0
frames = 0
dt = 0
self.inputs.event([])
while (not self.quit) and self.state:
cur_t = time.time_ns()
dt += (cur_t - last_t) / (1000 * 1000 * 1000)
# if dt < 0.001:
# time.sleep(1 / 300)
# continue # accumulate dt for skipped frames
last_t = cur_t
accum += dt
frames += 1
if accum > 1:
self.fps = frames
frames = 0
accum -= 1
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
return 0
self.on_event(event)
self.inputs.event(events)
if self.state is None:
break
if DEBUG:
print("FRAME, dt =", dt, "FPS,", self.fps)
self.inputs.update(dt)
if self.update(dt) is False:
break
if self.render() is False:
break
dt = 0 # reset to accumulate
def add_event_listener(self, obj):
slot = self.on_event.connect(obj.event)
obj.slots.append(slot)
return slot
def update(self, dt):
"""
Called every frame to update our game logic
:param dt: time since last frame in seconds
:return: returns False to quit gameloop
"""
if not self.state:
return False
if self.next_state:
self.process_state_change()
self.state.update(dt)
def render(self):
"""
Called every frame to render our game state and update pygame display
:return: returns False to quit gameloop
"""
# if not self.dirty:
# return
# self.dirty = False
if self.state is None:
return False
self.state.render()
pygame.display.update()
@property
def state(self):
return self._state
@state.setter
def state(self, s):
"""
Schedule state change on next frame
"""
self.next_state = s
def process_state_change(self):
"""
Process pending state changes
"""
lvl = None
try:
lvl = int(self.next_state)
pass
except ValueError:
pass
if lvl:
stats = self.data["stats"] = self.data.get("stats", Stats())
stats.level = lvl
self.next_state = "game"
if self.next_state:
self._state = self.STATES[self.next_state.lower()](self)
self.next_state = None
|
#Chris Hasty 17.2 Exercise ( Books Database )
import sqlite3
import pandas as pd
connection = sqlite3.connect('books.db')
pd.options.display.max_columns =10
pd.read_sql('SELECT * FROM authors', connection, index_col=['id'])
pd.read_sql('SELECT * From titles', connection)
df = pd.read_sql('SELECT * FROM author_ISBN', connection)
df.head()
pd.read_sql('SELECT first, last FROM authors', connection)
pd.read_sql("""SELECT title, edition, copyright FROM titles WHERE copyright > '2016'""", connection)
pd.read_sql("""SELECT id, first, last FROM authors WHERE last LIKE 'D%'""", connection, index_col=['id'])
pd.read_sql("""SELECT id, first, last FROM authors WHERE last LIKE '_b%'""", connection, index_col=['id'])
pd.read_sql("SELECT title FROM titles ORDER BY title ASC",connection)
pd.read_sql("""SELECT id, first, last FROM authors ORDER BY last,first""", connection, index_col=['id'])
pd.read_sql("""SELECT id, first, last FROM authors ORDER BY last DESC,first ASC""", connection, index_col=['id'])
pd.read_sql("""SELECT isbn, title, edition, copyright FROM titles WHERE title LIKE '%How to Program' ORDER BY title""", connection)
pd.read_sql("""SELECT first, last, isbn FROM authors INNER JOIN author_ISBN ON authors.id = author_ISBN.id ORDER BY
last, first""", connection).head()
cursor = connection.cursor()
cursor = cursor.execute("""INSERT INTO authors (first, last) VALUES ('Sue','Red')""")
pd.read_sql('SELECT id, first, last FROM authors', connection, index_col=['id'])
cursor = cursor.execute("""UPDATE authors SET last = 'Black' WHERE last = 'Red' AND first='Sue'""")
cursor.rowcount
pd.read_sql('SELECT id, first, last FROM authors', connection, index_col=['id'])
cursor.execute('DELETE FROM authors WHERE id=6')
cursor.rowcount
pd.read_sql('SELECT id, first, last FROM authors', connection, index_col=['id'])
connection.close()
# pd.read_sql("""SELECT title, edition FROM titles ORDER BY edition DESC""", connection).head(3)
# pd.read_sql("""SELECT * FROM authors WHERE first LIKE 'A%'""", connection)
# pd.read_sql("""SELECT isbn, title, edition, copyright FROM titles WHERE title NOT LIKE '%How to Program' ORDER BY
# title""", connection)
|
# coding=utf-8
from myspider.items import NewsItem
from scrapy.http import Request
import scrapy
import re
class jandan_article(scrapy.Spider):
name = 'jandan_article'
allowed_domains = ['jandan.net']
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-Cn,zh;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'jandan.net'
}
custom_settings = {
'ITEM_PIPELINES': {
'myspider.pipeline.mongo_pipeline.MongoPipeline': 1,
}
}
def start_requests(self):
yield Request(url='https://jandan.net', headers=self.headers, callback=self.parse)
def parse(self, response):
sel = scrapy.Selector(response)
links_in_a_page = sel.xpath("//a[@href]")
for link_sel in links_in_a_page:
link = str(link_sel.re('href="(.*?)"')[0])
link = response.urljoin(link)
if re.match(r'https://jandan.net/page/\d{1,4}$', link):
yield Request(url=link, headers=self.headers, callback=self.parse)
elif re.match(r'https://jandan.net/20\d{2}/(0[1-9]|1[0-2])/(0[1-9]|[1-2][0-9]|3[0-1])/(.*?).html$', link):
yield Request(url=link, headers=self.headers, callback=self.parse_detail)
def parse_detail(self, response):
item = NewsItem()
item['url'] = response.url
item['title'] = response.xpath("//title/text()").extract_first().replace('\r\n', '')
item['author'] = response.xpath("//a[@class='post-author']/text()").extract_first()
item['content'] = "". join(response.xpath("//div[@class='post f']//p | //div[@class='post f']/h4").extract())
item['pic'] = response.xpath("//div[@id='content']//img/@data-original").extract_first()
item['pdate'] = response.xpath("//div[@class='time_s']/text()").extract_first().replace('@ ', '').replace(' , ', ' ')
yield item
|
import requests
r = requests.get("https://lpu.in")
|
Import('*')
try:
env_gskiplist # env value to override for settings
except:
env_gskiplist = Environment()
env_gskiplist.Append(LIBS='glib2-0')
env_gskiplist.Append(CFLAGS=['-pthread','--std=c99'])
static_gskiplist = env_gskiplist.StaticLibrary("libgsimplecache", [Glob("*.c")])
OS_gskiplist = env_gskiplist.SharedObject(target="gsimplecacheOS.os", source =[Glob("*.c")])
env_gskiplist.Depends(OS_gskiplist, Glob("*.h"))
env_gskiplist.Depends(static_gskiplist, Glob("*.h"))
Alias("OS_gskiplist", OS_gskiplist)
Return('static_gskiplist', 'OS_gskiplist')
|
# Copyright 2021 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the abstract register class."""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Iterable, Mapping
from collections.abc import Sequence as abcSequence
from typing import (
TYPE_CHECKING,
Any,
NamedTuple,
Optional,
Type,
TypeVar,
Union,
cast,
)
import numpy as np
from numpy.typing import ArrayLike
from pulser.json.utils import obj_to_dict
from pulser.register.weight_maps import DetuningMap
if TYPE_CHECKING:
from pulser.register.register_layout import RegisterLayout
T = TypeVar("T", bound="BaseRegister")
QubitId = Union[int, str]
class _LayoutInfo(NamedTuple):
"""Auxiliary class to store the register layout information."""
layout: RegisterLayout
trap_ids: tuple[int, ...]
class BaseRegister(ABC):
"""The abstract class for a register."""
@abstractmethod
def __init__(self, qubits: Mapping[Any, ArrayLike], **kwargs: Any):
"""Initializes a custom Register."""
if not isinstance(qubits, dict):
raise TypeError(
"The qubits have to be stored in a dictionary "
"matching qubit ids to position coordinates."
)
if not qubits:
raise ValueError(
"Cannot create a Register with an empty qubit " "dictionary."
)
self._ids: tuple[QubitId, ...] = tuple(qubits.keys())
self._coords = [np.array(v, dtype=float) for v in qubits.values()]
self._dim = self._coords[0].size
self._layout_info: Optional[_LayoutInfo] = None
self._init_kwargs(**kwargs)
def _init_kwargs(self, **kwargs: Any) -> None:
if kwargs:
if kwargs.keys() != {"layout", "trap_ids"}:
raise ValueError(
"If specifying 'kwargs', they must only be 'layout' and "
"'trap_ids'."
)
layout: RegisterLayout = kwargs["layout"]
trap_ids: tuple[int, ...] = tuple(kwargs["trap_ids"])
self._validate_layout(layout, trap_ids)
self._layout_info = _LayoutInfo(layout, trap_ids)
@property
def qubits(self) -> dict[QubitId, np.ndarray]:
"""Dictionary of the qubit names and their position coordinates."""
return dict(zip(self._ids, self._coords))
@property
def qubit_ids(self) -> tuple[QubitId, ...]:
"""The qubit IDs of this register."""
return self._ids
@property
def layout(self) -> Optional[RegisterLayout]:
"""The layout used to define the register."""
return self._layout_info.layout if self._layout_info else None
def find_indices(self, id_list: abcSequence[QubitId]) -> list[int]:
"""Computes indices of qubits.
This can especially be useful when building a Pulser Sequence
with a parameter denoting qubits.
Example:
Let ``reg`` be a register with qubit Ids "a", "b" and "c":
>>> reg.find_indices(["a", "b", "c", "a"])
It returns ``[0, 1, 2, 0]``, following the qubit order of the
register.
Then, it is possible to use these indices when building a
sequence, typically by assigning them to an array of variables
that can be provided as an argument to ``target_index``
and ``phase_shift_index``.
Args:
id_list: IDs of the qubits to find.
Returns:
Indices of the qubits to denote, only valid for the
given mapping.
"""
if not set(id_list) <= set(self.qubit_ids):
raise ValueError(
"The IDs list must be selected among the IDs of the register's"
" qubits."
)
return [self.qubit_ids.index(id_) for id_ in id_list]
@classmethod
def from_coordinates(
cls: Type[T],
coords: np.ndarray,
center: bool = True,
prefix: Optional[str] = None,
labels: Optional[abcSequence[QubitId]] = None,
**kwargs: Any,
) -> T:
"""Creates the register from an array of coordinates.
Args:
coords: The coordinates of each qubit to include in the
register.
Args:
center: Whether or not to center the entire array around the
origin.
prefix: The prefix for the qubit ids. If defined, each qubit
id starts with the prefix, followed by an int from 0 to N-1
(e.g. prefix='q' -> IDs: 'q0', 'q1', 'q2', ...).
labels: The list of qubit ids. If defined, each qubit id will be
set to the corresponding value.
Returns:
A register with qubits placed on the given coordinates.
"""
if center:
coords = coords - np.mean(coords, axis=0) # Centers the array
if prefix is not None:
pre = str(prefix)
qubits = {pre + str(i): pos for i, pos in enumerate(coords)}
if labels is not None:
raise NotImplementedError(
"It is impossible to specify a prefix and "
"a set of labels at the same time"
)
elif labels is not None:
if len(coords) != len(labels):
raise ValueError(
f"Label length ({len(labels)}) does not"
f"match number of coordinates ({len(coords)})"
)
qubits = dict(zip(cast(Iterable, labels), coords))
else:
qubits = dict(cast(Iterable, enumerate(coords)))
return cls(qubits, **kwargs)
def _validate_layout(
self, register_layout: RegisterLayout, trap_ids: tuple[int, ...]
) -> None:
"""Sets the RegisterLayout that originated this register."""
trap_coords = register_layout.coords
if register_layout.dimensionality != self._dim:
raise ValueError(
"The RegisterLayout dimensionality is not the same as this "
"register's."
)
if len(set(trap_ids)) != len(trap_ids):
raise ValueError("Every 'trap_id' must be a unique integer.")
if len(trap_ids) != len(self._ids):
raise ValueError(
"The amount of 'trap_ids' must be equal to the number of atoms"
" in the register."
)
for reg_coord, trap_id in zip(self._coords, trap_ids):
if np.any(reg_coord != trap_coords[trap_id]):
raise ValueError(
"The chosen traps from the RegisterLayout don't match this"
" register's coordinates."
)
def define_detuning_map(
self, detuning_weights: Mapping[QubitId, float]
) -> DetuningMap:
"""Defines a DetuningMap for some qubits of the register.
Args:
detuning_weights: A mapping between the IDs of the targeted qubits
and detuning weights (between 0 and 1, their sum must be equal
to 1).
Returns:
A DetuningMap associating detuning weights to the trap coordinates
of the targeted qubits.
"""
if not set(detuning_weights.keys()) <= set(self.qubit_ids):
raise ValueError(
"The qubit ids linked to detuning weights have to be defined"
" in the register."
)
return DetuningMap(
[self.qubits[qubit_id] for qubit_id in detuning_weights],
list(detuning_weights.values()),
)
@abstractmethod
def _to_dict(self) -> dict[str, Any]:
"""Serializes the object.
During deserialization, it will be reconstructed using
'from_coordinates', so that it uses lists instead of a dictionary
(in JSON, lists elements keep their types, but dictionaries keys do
not).
"""
cls_dict = obj_to_dict(
None,
_build=False,
_name=self.__class__.__name__,
_module=self.__class__.__module__,
)
kwargs = (
{} if self._layout_info is None else self._layout_info._asdict()
)
return obj_to_dict(
self,
cls_dict,
[np.ndarray.tolist(qubit_coords) for qubit_coords in self._coords],
False,
None,
self._ids,
**kwargs,
_submodule=self.__class__.__name__,
_name="from_coordinates",
)
def __eq__(self, other: Any) -> bool:
if type(other) is not type(self):
return False
return list(self._ids) == list(other._ids) and all(
(
np.allclose( # Accounts for rounding errors
self._coords[i],
other._coords[other._ids.index(id)],
)
for i, id in enumerate(self._ids)
)
)
|
import urllib.request, math, os
def results(query, kwparse, fromYear, fromMonth, toYear, toMonth, numberofentries):
query = str(numberofentries) + "_" + query
if not os.path.exists(query):
os.makedirs(query) # making a seprate directory for storing webpages
else:
print("Folder with similar name already exsists. Please delete the folder and Run again.")
exit()
start = 0
# Determine the max number of pages (dividing by 10 as on website we get 10 results per page)
pagelimit = numberofentries / 10
pagelimit = math.ceil(pagelimit) #rounding up number to next greater integer
for page in range (0, pagelimit):
url = 'https://www.oldbaileyonline.org/search.jsp?gen=1&form=searchHomePage&_divs_fulltext='
url += query
url += '&kwparse=' + kwparse
url += '&fromYear=' + fromYear
url += '&fromMonth=' + fromMonth
url += '&toYear=' + toYear
url += '&toMonth=' + toMonth
url += '&start=' + str(start)
url += '&count=0'
response = urllib.request.urlopen(url)
content = response.read()
filename = query + '/' + 'SearchResults' + str(start) #save the results with different file names
f = open(filename + ".html", 'wb')
f.write(content)
f.close
start = start + 10
|
import csv
import numpy as np
import Config
def load_data(filename):
lines = open(filename, 'r').readlines()
lines = lines[1:] #removing the header
tokens = []
data = []
labels = []
for line in lines:
data.append(line.split("\t")[1])
tempLabel = line.split("\t")[2]
if float(tempLabel)>0.5:
tempLabel = [1,0];
else:
tempLabel = [0,1];
labels.append(tempLabel)
# tokens.append(word_tokenize(line))
# print(train_data)
# print(train_labels)
count=0;
for i in xrange(len(data)):
if(i>=1):
# print labels[-i]
count = count+1;
if(count == 5):
break;
# print len(labels)
# print len(data)
# print len(labels)
if(filename.find("dev")!=-1):
data = data[:int(0.1*len(data))]
labels = labels[:int(0.1*len(labels))]
# print len(data)
# print len(labels)
return [data,labels]
load_data('data/train.csv')
def batch_iteration(data,batch_size,no_epochs,shuffle=True):
# explicitly feeding keep_prob as well
# feed_dict = {self.train_inputs: batch_inputs, self.train_labels: batch_labels, self.keep_prob: 0.5}
# data=np.array(data);
# dataSize = len(data);
# data = list(zip(trainFeats, trainLabels))
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/Config.batch_size)+1;
batchTrainList = []
# if shuffle:
# shuffle_indices = np.random.permutation(np.arange(data_size))
# shuffled_data = data[shuffle_indices]
# else:
# shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield data[start_index:end_index]
# for epoch in xrange(num_batches_per_epoch):
# # for batchNum in xrange(numBatchesPerEpoch):
# start = (epoch * batch_size) % len(trainFeats)
# end = ((epoch + 1) * batch_size) % len(trainFeats)
# print "start is:",start;
# print "end is:",end;
# if end < start:
# start -= end
# end = len(trainFeats)
# batch_inputs, batch_labels = trainFeats[start:end], trainLabels[start:end]
# batchTrainList.append([batch_inputs,batch_labels])
print "bliiiiiiiiiiiiiiiiiiiiiii"
# return batchTrainList;
# yield data[startIndex:endIndex];
|
# Generated by Django 2.2.2 on 2019-06-20 11:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('info', '0003_auto_20190620_0822'),
]
operations = [
migrations.AlterField(
model_name='news',
name='published_at',
field=models.DateTimeField(auto_now=True),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#The MIT License (MIT)
#
#Copyright (c) <2013> <Colin Duquesnoy and others, see AUTHORS.txt>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#
"""
Contains the mode that control the external changes of file.
"""
import os
from pyqode.core import logger
from pyqode.core.mode import Mode
from pyqode.qt import QtCore, QtGui
class FileWatcherMode(Mode):
"""
FileWatcher mode. (Verify the external changes from opened file)
This mode adds the following properties to :attr:`pyqode.core.QCodeEdit.settings`
====================== ====================== ======= ====================== ================
Key Section Type Default value Description
====================== ====================== ======= ====================== ================
autoReloadChangedFiles General bool False Auto reload files that changed externally.
====================== ====================== ======= ====================== ================
"""
#: Mode identifier
IDENTIFIER = "fileWatcherMode"
#: Mode description
DESCRIPTION = "Watch the editor's file and take care of the reloading."
@property
def autoReloadChangedFiles(self):
return self.editor.settings.value("autoReloadChangedFiles")
@autoReloadChangedFiles.setter
def autoReloadChangedFiles(self, value):
self.editor.settings.setValue("autoReloadChangedFiles", value)
def __init__(self):
super(FileWatcherMode, self).__init__()
self.__fileSystemWatcher = QtCore.QFileSystemWatcher()
self.__flgNotify = False
self.__changeWaiting = False
def __notifyChange(self):
"""
Notify user from external change if autoReloadChangedFiles is False then
reload the changed file in the editor
"""
self.__flgNotify = True
auto = self.editor.settings.value("autoReloadChangedFiles")
if (auto or QtGui.QMessageBox.question(
self.editor, "File changed",
"The file <i>%s</i> has has changed externally.\n"
"Do you want reload it?" % os.path.basename(
self.editor.filePath),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No) ==
QtGui.QMessageBox.Yes):
self.editor.openFile(self.editor.filePath)
self.__changeWaiting = False
self.__flgNotify = False
def __onFileChanged(self, path):
"""
On file changed, notify the user if we have focus, otherwise delay the
notification to the focusIn event
"""
content, encoding = self.editor.readFile(
path, encoding=self.editor.fileEncoding)
if content == self.editor.toPlainText():
logger.debug("FileWatcherMode: Internal change, skipping")
return
self.__changeWaiting = True
if self.editor.hasFocus() and self.__flgNotify:
self.__notifyChange()
@QtCore.Slot()
def __onEditorFilePathChanged(self):
"""
Change the watched file
"""
path = self.editor.filePath
if len(self.__fileSystemWatcher.files()):
self.__fileSystemWatcher.removePaths(
self.__fileSystemWatcher.files())
if path and path not in self.__fileSystemWatcher.files():
self.__fileSystemWatcher.addPath(path)
@QtCore.Slot()
def __onEditorFocusIn(self):
"""
Notify if there are pending changes
"""
if self.__changeWaiting:
self.__notifyChange()
def _onInstall(self, editor):
"""
Adds autoReloadChangedFiles settings on install.
"""
Mode._onInstall(self, editor)
self.editor.settings.addProperty("autoReloadChangedFiles", False)
def _onStateChanged(self, state):
"""
Connects/Disconnects to the mouseWheelActivated and keyPressed event
"""
if state is True:
# self.editor.textSaved.connect(self.__onEditorTextSaved)
# self.editor.textSaving.connect(self.__onEditorTextSaving)
self.__fileSystemWatcher.fileChanged.connect(self.__onFileChanged)
self.editor.newTextSet.connect(self.__onEditorFilePathChanged)
self.editor.focusedIn.connect(self.__onEditorFocusIn)
else:
# self.editor.textSaved.disconnect(self.__onEditorTextSaved)
# self.editor.textSaving.connect(self.__onEditorTextSaving)
self.editor.newTextSet.disconnect(self.__onEditorFilePathChanged)
self.editor.focusedIn.disconnect(self.__onEditorFocusIn)
self.__fileSystemWatcher.removePath(self.editor.filePath)
self.__fileSystemWatcher.fileChanged.disconnect(self.__onFileChanged)
if __name__ == '__main__':
from pyqode.core import QGenericCodeEdit
class Example(QGenericCodeEdit):
def __init__(self):
QGenericCodeEdit.__init__(self, parent=None)
self.installMode(FileWatcherMode())
self.openFile(__file__)
self.resize(QtCore.QSize(1000, 600))
import sys
app = QtGui.QApplication(sys.argv)
e = Example()
e.show()
sys.exit(app.exec_())
|
from django.shortcuts import render, redirect ,HttpResponse
import random
def index(request):
if "numbers" not in request.session:
request.session ["numbers"]=random.randint(1, 100)
print( request.session ["numbers"])
return render(request,"index.html")
def guess(request):
if request.method=="POST":
request.session['user']=int(request.POST['number'])
if request.session['user'] ==request.session["numbers"]:
return redirect("/")
elif request.session['user'] > request.session["numbers"]:
return redirect("/")
elif request.session['user']< request.session["numbers"]:
return redirect("/")
def back(request):
return redirect("/")
def reset(request):
request.session.clear()
return redirect("/")
|
import tensorflow as tf
import numpy as np
import os
import sys
from PIL import Image, ImageOps
from utils import get_shape, batch_norm, lkrelu
class Discriminator(object):
def __init__(self, inputs, is_training, stddev=0.02, center=True, scale=True, reuse=None):
self._is_training = is_training
self._stddev = stddev
with tf.variable_scope('D', initializer=tf.truncated_normal_initializer(stddev=self._stddev), reuse=reuse):
self._center = center
self._scale = scale
self._prob = 0.5
self._inputs = inputs
self._discriminator = self._build_discriminator(inputs, reuse=reuse)
def build_layer(self, name, inputs, k, bn=True, use_dropout=False):
layer = dict()
with tf.variable_scope(name):
layer['filters'] = tf.get_variable('filters', [4, 4, get_shape(inputs)[-1], k])
layer['conv'] = tf.nn.conv2d(inputs, layer['filters'], strides=[1, 2, 2, 1], padding='SAME')
layer['bn'] = batch_norm(layer['conv'], center=self._center, scale=self._scale, training=self._is_training) if bn else layer['conv']
layer['dropout'] = tf.nn.dropout(layer['bn'], self._prob) if use_dropout else layer['bn']
layer['fmap'] = lkrelu(layer['dropout'], slope=0.2)
return layer
def _build_discriminator(self, inputs, reuse=None):
discriminator = dict()
discriminator['l1'] = self.build_layer('l1', inputs, 64, bn=False)
discriminator['l2'] = self.build_layer('l2', discriminator['l1']['fmap'], 128)
discriminator['l3'] = self.build_layer('l3', discriminator['l2']['fmap'], 256)
discriminator['l4'] = self.build_layer('l4', discriminator['l3']['fmap'], 512)
with tf.variable_scope('15'):
l5 = dict()
l5['filters'] = tf.get_variable('filters', [4, 4, get_shape(discriminator['l4']['fmap'])[-1], 1])
l5['conv'] = tf.nn.conv2d(discriminator['l4']['fmap'], l5['filters'], strides=[1, 1, 1, 1], padding="SAME")
l5['bn'] = batch_norm(l5['conv'], center=self._center, scale=self._scale, training=self._is_training)
l5['fmap'] = tf.nn.sigmoid(l5['bn'])
discriminator['l5'] = l5
return discriminator
|
import unittest
from katas.kyu_6.evil_autocorrect_prank import autocorrect
class AutocorrectPrankTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(autocorrect('u'), 'your sister')
def test_equals_2(self):
self.assertEqual(autocorrect('you'), 'your sister')
def test_equals_3(self):
self.assertEqual(autocorrect('Youuuuu'), 'your sister')
def test_equals_4(self):
self.assertEqual(autocorrect('youtube'), 'youtube')
def test_equals_5(self):
self.assertEqual(autocorrect(
'You u youville utube you youyouyou uuu raiyou united youuuu u y'
'ou'),
'your sister your sister youville utube your sister youyouyou uu'
'u raiyou united your sister your sister your sister')
|
import numpy as np
import scipy as sp
import matplotlib.pyplot as py
import random as rd
import math as m
global ylim
global xlim
global nb_cust
global kNN
global clim
global Capacity
ylim = 200
xlim = 200
clim = 20
nb_cust = 10
kNN = 5
Capacity = 75
# Creation of a test instance
inst_test2 = [(0, 0), (11, 120), (-142, -149), (-83, 39), (-168, -46), (-83, -146), (4, -99),
(32, -16), (-117, 12), (-132, 33), (51, 44), (-29, 76), (-98, -33), (-26, -190),
(-89, 128), (124, -95), (-108, -1), (24, -158), (-115, -106), (80, -160), (-167, 3), (185, -72)]
r1_test2 = [0, 3, 6, 9, 12, 15, 18, 21]
r2_test2 = [0, 1, 4, 7, 10, 13, 16, 19]
r3_test3 = [0, 2, 5, 8, 11, 14, 17, 20]
edge2_1 = (9,12)
# Creation of a random instance
def create_instance(n):
inst = [(0, 0)]
demand = []
route1 = [0]
route2 = [0]
route3 = [0]
for i in range(n):
x = rd.randint(-xlim, xlim)
y = rd.randint(-ylim, ylim)
c = rd.randint(0, clim)
inst.append((x, y))
demand.append(c)
if i % 3 == 0:
route1.append(i)
elif i % 3 == 1:
route2.append(i)
else:
route3.append(i)
return inst, route1, route2, route3,demand
# Print the routes
def print_instance(inst):
dep = inst[0]
cust = inst[1:]
py.plot(dep[0], dep[1], color='blue', marker='o')
for i in cust:
py.plot(i[0], i[1], color='red', marker='o')
def print_route(route, inst, c):
x = []
y = []
for i in range(len(route)):
x.append(inst[route[i]][0])
y.append(inst[route[i]][1])
x.append(inst[route[0]][0])
y.append(inst[route[0]][1])
py.plot(x, y, label="route " + str(c))
def print_routes(routes, inst):
c = 1
for i in routes:
print_route(i, inst, c)
c += 1
def print_current_sol(routes, inst):
print_instance(inst)
print_routes(routes, inst)
# Compute the cost of a solution
def distance(p1, p2):
return m.sqrt((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2)
def cost_sol(routes, inst):
c = 0
for r in routes:
for i in range(len(r)-1):
a = inst[r[i]]
b = inst[r[i+1]]
c += distance(a, b)
c += distance(inst[r[len(r)-1]], inst[r[0]])
return c
# Compute the kNN for each node
def voisins(k, inst):
v = []
for i in range(len(inst)):
vi = []
couples = []
for j in range(len(inst)):
if i != j:
vi.append([distance(inst[i], inst[j]), j])
vi.sort()
for l in vi:
couples.append(l[1])
v.append(couples[:k])
return v
# Find the route, which contains customer i
def find_route(i, routes):
for k in range(len(routes)):
if i in routes[k]:
return routes[k]
# Return the nearest route of the edge given. Can return 0
def another_route(a, voisins, routes):
r1 = find_route(a, routes)
for i in voisins[a]:
r2 = find_route(i, routes)
if r2 != r1:
return ((r1, r2), i)
return ()
#Compute the saving of the new edge
def saving(i, ri, j, rj, inst):
ri.append(0)
rj.append(0)
s = distance(inst[ri[i]], inst[ri[i+1]])
s += distance(inst[ri[i]], inst[ri[i-1]])
s -= distance(inst[ri[i+1]], inst[ri[i-1]])
s += distance(inst[rj[j]], inst[rj[j+1]])
s -= distance(inst[ri[i]], inst[rj[j]])
s -= distance(inst[ri[i]], inst[rj[j+1]])
ri.pop()
rj.pop()
return s
# to do: don't violate the capacity constraint
def eval_cand(edge, voisins, routes, inst):
(a, b) = edge
if b != 0:
(r1, r2), v = another_route(b, voisins, routes)
i_v, i = r2.index(v), r1.index(b)
else:
(r1, r2), v = another_route(a, voisins, routes)
i_v, i = r2.index(v), r1.index(a)
return (saving(i, r1, i_v, r2, inst), (i, i_v), (r1, r2))
def best_cand(route, np, voisins, routes, inst):
S = []
for p in route:
i = route.index(p)
if p != np:
S.append(eval_cand((route[i-1], p), voisins, routes, inst))
S.sort()
return S[-1]
def ejection_chain(l, edge, voisins, routes, inst):
print(cost_sol(routes, inst))
s, I, R = eval_cand(edge, voisins, routes, inst)
R[1].insert(I[1]+1, R[0][I[0]])
R[0].remove(R[0][I[0]])
print(cost_sol(routes, inst))
for k in range(l-1):
curr_route = R[1]
s, I, R = best_cand(curr_route, R[0][I[0]], voisins, routes, inst)
R[1].insert(I[1]+1, R[0][I[0]])
R[0].remove(R[0][I[0]])
print(cost_sol(routes, inst))
return routes
routes = [r1_test2, r2_test2,r3_test3]
print_current_sol(routes, inst_test2)
py.plot([inst_test2[edge2_1[0]][0], inst_test2[edge2_1[1]][0]], [
inst_test2[edge2_1[0]][1], inst_test2[edge2_1[1]][1]], color='black', label='chosen')
py.title("Test de l'opérateur ejection_chain")
py.legend()
py.show()
v = voisins(kNN, inst_test2)
new_routes = ejection_chain(14,edge2_1,v,routes,inst_test2)
print(new_routes)
print_current_sol(new_routes,inst_test2)
py.show()
|
import rsa
keys_list = {}
publicKey, privateKey = rsa.newkeys(512)
keys_list[publicKey] = privateKey
message = "ankit"
enc = rsa.encrypt(message.encode(), publicKey)
print("origibal String: ", message)
print("encrypted String: ", enc)
decMessage = rsa.decrypt(enc, privateKey).decode()
print("decrypted message: ", decMessage)
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
data = pd.read_csv("all_data_100000_patches.csv")
#data at 100,000 updates in non-ecology mode
dataNoEco = data[data.loc[:,"ecology"]== "N" ]
#data at 100,000 updates in ecology mode
dataEco = data[data.loc[:, "ecology"]== "Y" ]
#Patches x Shannon Diversity
#sns.boxplot(x=data["Patches"], y=data["Shannon.Diversity"])
plot = sns.boxplot(x=dataEco["patches"], y=dataEco["shannondiv"])
plot.set( xlabel = 'Patches', ylabel = 'Shannon Diversity By Task Done')
plt.show()
#Patches x Average task Diversity
#sns.boxplot(x=data["Patches"], y=data["Average.Task.Diversity"])
sns.boxplot(x=dataEco["patches"], y=dataEco["avgtaskdiv"])
plt.show()
#Patches x Average Shannon Diversity
#sns.boxplot(x=data["Patches"], y=data["Average.Phenotype.Diversity"])
plot = sns.boxplot(x=dataEco["patches"], y=dataEco["avgshannondiv"])
plot.set(xlabel='Patches', ylabel = 'Average Shannon Diversity')
plt.show()
plot = sns.boxplot(x=dataEco["patches"], y=dataEco["avgshannondiv"])
plot.set(xlabel='Patches', ylabel = 'Average Shannon Diversity')
plt.show()
#Patches x Unqiue Phenotypes Task
#sns.boxplot(x=data["Patches"], y=data["Unique.Phenotypes.Task"])
sns.boxplot(x=dataEco["patches"], y=dataEco["uniquephenotypetask"])
plt.show()
#Patches x Unique Phenotype Count
#sns.boxplot(x=data["Patches"], y=data["Unique.Phenotype.Count"])
sns.boxplot(x=dataEco["patches"], y=dataEco["uniquephenotypecount"])
plt.show()
|
import sys, os
print("~/read_column.py [txtfile] [delimiter]")
with open(sys.argv[1]) as f:
fline = f.readline().split(sys.argv[2])
linenum=0
for line in fline:
linenum+=1
if '"' in line:print(linenum, ":", line.replace('"',''))
else:print(linenum, ":", line)
|
import threading
from socket import *
import sys
import os
import pyaudio
import wave
import glob
import tensorflow as tf
import numpy as np
from datetime import datetime
from header import *
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if len(sys.argv) < 2:
print("compile error: please input total node number")
exit(1)
totalNodeNum = int(sys.argv[1])
info = []
posX = []
posY = []
for i in range(0,totalNodeNum):
info.append(0)
posX.append(-1)
posY.append(-1)
class ClientThread(threading.Thread):
def __init__(self, clientAddress, connectionSocket):
threading.Thread.__init__(self)
self.csocket = connectionSocket
self.caddr = clientAddress
def cal(self, nodeNum):
global info, totalNodeNum
predX = 0
predY = 0
tmp = 0
for i in range(0, totalNodeNum):
predX += int(info[i])*int(posX[i])
predY += int(info[i])*int(posY[i])
tmp += int(info[i])
if tmp != 0:
predX = predX / tmp
predY = predY / tmp
else:
predX = 'not found'
predY = 'not found'
now = datetime.now()
time = now.strftime('%H:%M:%S:%f')
print("-----",nodeNum,">",time,": Drone's location: (", predX, ",", predY, ")-----")
def run(self):
global info, posX, posY
print("*****Client Address", self.caddr[0], "connected.*****")
message = self.csocket.recv(1024).decode()
modifiedMessage = message.split(':')
nodeNum = int(modifiedMessage[0])
posX[nodeNum] = int(modifiedMessage[1])
posY[nodeNum] = int(modifiedMessage[2])
while True:
try:
with tf.device("/gpu:0"):
start_t = datetime.now()
sess = tf.Session()
init = tf.global_variables_initializer()
tf.reset_default_graph()
sess.run(init)
printer(str(nodeNum)+">Start")
fileName = self.csocket.recv(35).decode()
printer(str(nodeNum)+">socket receive")
print(fileName)
while not os.path.exists(fileName):
continue
while os.path.getsize(fileName)/1024 < 15:
continue
printer(str(nodeNum)+">file receive")
files = glob.glob(fileName)
raw_data = load(files)
printer(str(nodeNum)+">file load")
# pre-processing
mfcc_data, y = mfcc4(raw_data, 1)
printer(str(nodeNum)+">MFCC")
X = np.concatenate((mfcc_data), axis=0)
X_input = X.reshape(-1, N_MFCC, N_FRAME, CHANNELS)
y = np.hstack(y)
n_labels = y.shape[0]
y_encoded = np.zeros((n_labels, N_UNIQ_LABELS))
y_encoded[np.arange(n_labels),y] = 1
X = tf.placeholder(tf.float32, shape=[None,N_MFCC*N_FRAME*CHANNELS])
X = tf.reshape(X, [-1, N_MFCC, N_FRAME, CHANNELS])
Y = tf.placeholder(tf.float32, shape=[None, N_UNIQ_LABELS])
keep_prob = tf.placeholder(tf.float32)
# CNN layer
logits = conv(X)
printer(str(nodeNum)+">Layer")
# cost optimizer needed??? -> time consuming
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
#optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(cost)
#printer(str(nodeNum)+">cost-optimizer")
# model saver
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, '../model/CNN/cnn_model')
printer(str(nodeNum)+">Model saver")
# prediction
y_pred = sess.run(tf.argmax(logits,1),feed_dict={X:X_input,keep_prob:1})
#y_true = sess.run(tf.argmax(y_encoded,1))
from sklearn.metrics import accuracy_score
result = "%d" %((accuracy_score(y, y_pred)*100)%101)
printer(result)
info[nodeNum] = result
self.cal(nodeNum)
end_t = datetime.now()
print('one cycle time : ', (end_t-start_t))
except KeyboardInterrupt:
self.csocket.close()
print('bye bye~')
exit()
PORT = 21536
serverSocket = socket(AF_INET, SOCK_STREAM)
serverSocket.bind((ADDRESS, PORT))
print("The server is ready to receive on port", PORT)
while True:
try:
serverSocket.listen(1)
(connectionSocket, clientAddress) = serverSocket.accept()
newThread = ClientThread(clientAddress, connectionSocket)
newThread.start()
except KeyboardInterrupt:
connectionSocket.close()
print('Bye bye~')
exit()
exit()
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2021/4/8 19:07
# @Author :'liuyu'
# @Version:V 0.1
# @File :
# @desc :
import tensorflow as tf
import json
import os,re
import logging
logging.basicConfig(level= logging.INFO)
def calc_num_batches(total_num,batch_size):
return total_num // batch_size + int(total_num % batch_size != 0)
def convert_idx_to_token_tensor(inputs,idx2token):
def my_func(inputs):
return " ".join(idx2token[elem] for elem in inputs)
return tf.compat.v1.py_func(my_func,[inputs],tf.string)
def postprocess(hypotheses, idx2token):
'''Processes translation outputs.
hypotheses: list of encoded predictions
idx2token: dictionary
Returns
processed hypotheses
'''
_hypotheses = []
for h in hypotheses:
sent = "".join(idx2token[idx] for idx in h) # 合并peice 成vocab
sent = sent.split("</s>")[0].strip() # 去掉结尾符
sent = sent.replace("▁", " ") # remove bpe symbols # 单词间分隔符用空格代替
_hypotheses.append(sent.strip())
return _hypotheses
def save_hparams(hparams,path):
if not os.path.exists(path): os.makedirs(path)
hp = json.dumps(vars(hparams))
with open(os.path.join(path,"hparams"),'w') as fout:
fout.write(hp)
def save_variable_specs(fpath):
def _get_size(shp):
size = 1
for d in range(len(shp)):
size *=shp[d]
return size
params, num_params = [], 0
for v in tf.compat.v1.global_variables():
params.append("{}==={}".format(v.name, v.shape))
num_params += _get_size(v.shape)
print("num_params: ", num_params)
with open(fpath, 'w') as fout:
fout.write("num_params: {}\n".format(num_params))
fout.write("\n".join(params))
logging.info("Variables info has been saved.")
def get_hypotheses(num_batches,num_samples,sess,tensor,dict):
hypotheses = []
for _ in range(num_batches):
h = sess.run(tensor)
hypotheses.extend(h.tolist())
hypotheses = postprocess(hypotheses,dict)
return hypotheses[:num_samples]
def load_hparams(parser,path):
if not os.path.isdir(path):
path = os.path.dirname(path)
d = open(os.path.join(path,"hparams"),'r').read()
flag2val = json.loads(d)
for f,v in flag2val.items():
parser.f = v
def save_variable_specs(fpath):
def _get_size(shp):
size = 1
for d in range(len(shp)):
size *= shp[d]
return size
params,num_params =[],0
for v in tf.compat.v1.global_variables():
params.append("{}==={}".format(v.name,v.shape))
num_params += _get_size(v.shape)
print("num_params: ",num_params)
with open(fpath,'w') as fout:
fout.write("num_params: {}\n".format(num_params))
fout.write("\n".join(params))
logging.info("Variables info has been saved.")
def get_hypotheses(num_batches,num_samples,sess,tensor,dict):
hypotheses = []
for _ in range(num_batches):
h = sess.run(tensor)
hypotheses.extend(h.tolist())
return hypotheses[:num_samples]
|
import math
def f(x):
return pow(2,x+2)-4
|
from django_tables2 import tables, Column
from .models import ChangeLog
class ChangeLogListTable(tables.Table):
""" Describes ChangeLog list table (django-tables2 package) """
changed = Column(orderable=True,)
model = Column(orderable=False)
record_id = Column(orderable=False)
user = Column(orderable=False)
action_on_model = Column(orderable=False)
data = Column(orderable=False)
ipaddress = Column(exclude_from_export=True)
class Meta:
model = ChangeLog
template_name = "django_tables2/bootstrap.html"
exclude = ('ipaddress',)
|
"""
PRACTICE Test 3, problem 6.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Muqing Zheng. October 2015.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
import time
def main():
""" Calls the TEST functions in this module. """
# test_good_input()
test_sum_numbers()
def test_good_input():
""" Tests the good_input function. """
# ------------------------------------------------------------------
# We supplied tests for this function.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the good_input function:')
print('--------------------------------------------------')
good_input()
def good_input():
"""
Repeatedly prompts for and gets a number from the user.
If the user enters anything that is NOT a number,
tells the user so and gives the user a chance to try again.
Stops when the user enters 0 and then prints the sum
of the numbers entered, with an appropriate message.
Sample run (with user input to the right of the colons):
Enter a number (0 to quit): 14
Enter a number (0 to quit): 3.33
Enter a number (0 to quit): blah
You entered a string that is NOT a number. Try again.
Enter a number (0 to quit): ok 6 ok?
You entered a string that is NOT a number. Try again.
Enter a number (0 to quit): 1.50
Enter a number (0 to quit): done
You entered a string that is NOT a number. Try again.
Enter a number (0 to quit): 0
The sum is: 18.33
"""
# ------------------------------------------------------------------
# TODO: 2. Implement and test this function.
# The testing code is already written for you (above).
# -----------------------------------------------------------------
sum = 0
while True:
entry_num = input('Enter a number (0 to quit):')
try:
if float(entry_num) == 0.0:
break
else:
sum += float(entry_num)
except:
print('You entered a string that is NOT a number. Try again')
entry_num = input('Enter a number (0 to quit):')
print('The sum is:', sum)
def test_sum_numbers():
""" Tests the sum_numbers function. """
# ------------------------------------------------------------------
# We supplied tests for this function.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_numbers function:')
print('--------------------------------------------------')
# Test 1: Should return the sum of the numbers.
expected = 1233.5
answer = sum_numbers('numbers1.txt')
print('\nTest 1:')
time.sleep(0.5) # To ensure that this prints before error messages.
if answer is None:
print('!!! The code ** FAILED ** this test.')
print('!!! It did not (yet) return a value.')
else:
print('Rounded to 5 decimal places')
print(' Expected is:', round(expected, 5))
print(' Actual is: ', round(answer, 5))
if round(expected, 5) == round(answer, 5):
print('The code PASSED this test')
else:
print('!!! The code ** FAILED ** this test.')
# Test 2: Should also return the sum of the numbers.
expected = 134.0
answer = sum_numbers('numbers2.txt')
print('\nTest 2:')
time.sleep(0.5) # To ensure that this prints before error messages.
if answer is None:
print('!!! The code ** FAILED ** this test.')
print('!!! It did not (yet) return a value.')
else:
print('Rounded to 5 decimal places')
print(' Expected is:', round(expected, 5))
print(' Actual is: ', round(answer, 5))
if round(expected, 5) == round(answer, 5):
print('The code PASSED this test')
else:
print('!!! The code ** FAILED ** this test.')
# Test 3: Should raise a RuntimeError('Bad file data') exception.
print('\nTest 3:')
print('Testing for raising a RuntimeError exception:')
time.sleep(0.5) # To ensure that this prints before error messages.
try:
answer = sum_numbers('numbers3.txt')
print('!!! The code ** FAILED ** this test.')
print('!!! It should have raised an Exception but did not do so.')
except RuntimeError as exception:
if str(exception) == 'Bad file data':
print('The code PASSED this test (raising a RuntimeError).')
else:
print('!!! The code ** FAILED ** this test.')
print('!!! The code raised a RuntimeError but the message')
print('!!! should have been:', 'Bad file data')
print('!!! and actually was:', str(exception))
except:
print('!!! The code ** FAILED ** this test.')
print('!!! The code correctly raised an exception')
print('!!! but NOT the required RuntimeError exception.')
# Test 4: Should raise a FileNotFound exception.
print('\nTest 4:')
print('Testing for experiencing a FileNotFoundError exception:')
time.sleep(0.5) # To ensure that this prints before error messages.
try:
answer = sum_numbers('numbers4.txt')
print('!!! The code ** FAILED ** this test.')
print('!!! It should have allowed this caller to handle')
print('!!! the FileNotFound exception but did not do so.')
except FileNotFoundError:
print('The code PASSED this test (raising a FileNotFoundError).')
except:
print('!!! The code ** FAILED ** this test.')
print('!!! Although it raised (or experienced) an Exception,')
print('!!! it should have allowed this caller to handle')
print('!!! the FileNotFound exception but did not do so.')
def sum_numbers(filename):
"""
Reads the file specified by the given filename and determines
(see details below):
-- The number of GOOD lines - each GOOD line is a string
that represents a number.
-- The number of BAD lines - any line that is NOT good is BAD.
-- The sum of all the numbers on the GOOD lines.
For example, if the file contains
1234
-34
one
22.5
Hello, how are you?
Don't count 5 this one. It is BAD.
11
then there are 4 GOOD lines and 3 BAD lines (and 7 total lines).
The sum of the numbers on the GOOD lines is 1233.5 in this example.
Note that lines that represent numbers may have white space
on either side of the number.
This function returns the sum of the numbers on the GOOD lines
UNLESS fewer than half of the lines are GOOD. In that case,
this function raises a RuntimeError('Bad file data') exception.
This function should NOT handle any file-reading Exceptions.
That is, it should let the caller(s) handle any such Exceptions.
Preconditions:
:type filename: str that represents a file
"""
# ------------------------------------------------------------------
# TODO: 3. Implement and test this function.
# The testing code is already written for you (above).
# ------------------------------------------------------------------
count = 0
sum = 0
fil = open(filename, 'r')
data = fil.read().strip()
print(fil.read())
list = data.split('\n')
for k in range(len(list)):
try:
sum += float(list[k])
except:
count += 1
k += 1
fil.close()
if count > 0.5 * len(list):
raise RuntimeError('Bad file data')
else:
return sum
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
import plotly.express as px
import pandas as pd
line1 = pd.read_csv("line_chart.csv")
# print(line1.tail())
graph = px.line(line1,x="Year", y = "Per capita income", color="Country", title="Yearly Per Capita Income(Country)")
print("Bouncing you there...")
graph.show()
|
import os
import numpy as np
from math import floor
from datetime import datetime
from datetime import timedelta
import asyncio
from time import sleep
from shared.LogParser import LogParser
from shared.RedisManager import RedisManager
from shared.BaseConfig import BaseConfig
from shared.ServiceManager import ServiceManager
# ------------------------------------------------------------------
# check if ACS is available (assumed yes if the 'ACSROOT' env variable is defined)
# ------------------------------------------------------------------
has_acs = ('ACSROOT' in os.environ) # (os.uname()[1] == 'dawn.ifh.de')
# ------------------------------------------------------------------
def datetime_to_secs(datetime_now):
return (datetime_now - BaseConfig.datetime_epoch).total_seconds()
# ------------------------------------------------------------------
def secs_to_datetime(secs_now):
return BaseConfig.datetime_epoch + timedelta(seconds=float(secs_now))
# ------------------------------------------------------------------
def delta_seconds(date0, date1, is_microseconds=False):
if is_microseconds:
n_seconds = (date1 - date0).days * 86400 * \
1000000 + (date1 - date0).microseconds
else:
n_seconds = (date1 - date0).days * 86400 + (date1 - date0).seconds
return n_seconds
# ------------------------------------------------------------------
def date_to_string(date_in, time_string='', date_string=''):
if time_string == '':
time_string = BaseConfig.time_str_formats['time']
if date_string == '':
date_string = BaseConfig.time_str_formats['date']
output = ''
if date_string is not None:
output += str(date_in.date().strftime(date_string))
if time_string is not None:
if output != '':
output += ' '
output += str(date_in.time().strftime(time_string))
# output = (
# str(date_in.date().strftime(date_string))
# + ',' + str(date_in.time().strftime(time_string))
# )
return str(output)
# ------------------------------------------------------------------
def get_time(sec_scale):
"""time since epoch in milisecond
"""
if sec_scale == 'sec':
scale = 0
elif sec_scale == 'msec':
scale = 3
else:
raise
secs = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()
return int(secs * pow(10, scale))
# ------------------------------------------------------------------
def dict_module_func(data, key, val, new_values):
if key == 'id' and val in new_values:
data['val'] = new_values[val]
return
# ------------------------------------------------------------------
def traverse_object(data, new_values, module_func=None):
if module_func is None:
module_func = dict_module_func
if isinstance(data, dict):
for k, v in data.items():
if isinstance(v, (dict, list, set, tuple)):
traverse_object(data=v, new_values=new_values, module_func=module_func)
else:
module_func(data, k, v, new_values)
elif isinstance(data, (list, set, tuple)):
for v in data:
traverse_object(data=v, new_values=new_values, module_func=module_func)
return
# ------------------------------------------------------------------
def flatten_dict(data_in, id='id', child_ids='children', sibling_ids='siblings'):
data_out = dict()
def flatten(data, depth):
if isinstance(data, dict) and (id in data.keys()):
depth += 1
if isinstance(data, dict):
for key, ele in data.items():
if isinstance(ele, (dict, list, set, tuple)):
flatten(data=ele, depth=depth)
elif key == id:
data_out[ele] = {
'depth': depth,
'height': None,
'parent': None,
child_ids: [],
sibling_ids: [],
'data': data
}
else:
continue
elif isinstance(data, (list, set, tuple)):
for ele in data:
flatten(data=ele, depth=depth)
return
flatten(data_in, -1)
max_depth = max([x['depth'] for x in data_out.values()])
for key, ele in data_out.items():
ele['height'] = max_depth - ele['depth']
if child_ids in ele['data']:
for child in ele['data'][child_ids]:
if isinstance(child, dict) and (id in child):
data_out[child[id]]['parent'] = key
ele[child_ids].append(child[id])
else:
# fixme - try/except instead ...
my_assert(
None, ' - expect a dict with a key [' + str(id) + '] ?!?! '
+ str(child), False
)
for key0, ele0 in data_out.items():
for key1, ele1 in data_out.items():
if (ele0['parent'] is not None) and \
(ele0['parent'] == ele1['parent']) and \
(key0 != key1):
ele0[sibling_ids].append(key1)
return data_out
# ------------------------------------------------------------------
def get_rnd_seed():
"""unique initialisation for rnd generator
"""
return int(get_time(sec_scale='msec'))
# ------------------------------------------------------------------
def get_rnd(n_digits=2, out_type=int, is_unique_seed=True):
n_digits = max(n_digits, 1)
rnd_min = pow(10, n_digits - 1)
rnd_max = pow(10, n_digits) - 1
if is_unique_seed:
output = BaseConfig.rnd_gen_unique.randint(rnd_min, rnd_max)
else:
output = BaseConfig.rnd_gen.randint(rnd_min, rnd_max)
output = out_type(output)
if out_type is float:
output = round(output * pow(10, -n_digits), n_digits)
# output = out_type(('%0' + str(n_digits) + 'f') % output)
return output
# ------------------------------------------------------------------
def pd_resampler(arr_in):
"""resampling function for reducing the size of datasets
"""
if len(arr_in) > 0:
return arr_in[0]
else:
return np.nan
# ------------------------------------------------------------------
def has_data_resampler(arr_in):
if len(arr_in) > 0:
return 1
else:
return np.nan
# ------------------------------------------------------------------
def format_units(units_in):
units = units_in
if units == '..' or units == '-':
units = ''
return units
# ------------------------------------------------------------------
def format_float_to_string(x):
return str('{0:.4e}'.format(x))
# ------------------------------------------------------------------
def is_coroutine(func):
is_crt = (asyncio.iscoroutine(func) or asyncio.iscoroutinefunction(func))
return is_crt
# ------------------------------------------------------------------
class time_of_night(ServiceManager):
# ------------------------------------------------------------------
def __init__(
self,
base_config,
service_name,
interrupt_sig,
end_time_sec=None,
timescale=None,
*args,
**kwargs
):
self.class_name = self.__class__.__name__
service_name = (service_name if service_name is not None else self.class_name)
super().__init__(service_name=service_name)
self.log = LogParser(base_config=base_config, title=__name__)
self.base_config = base_config
self.service_name = service_name
self.interrupt_sig = interrupt_sig
# 28800 -> 8 hour night
self.end_time_sec = 28800 if end_time_sec is None else end_time_sec
# 0.035 -> have 30 minutes last for one minute in real time
self.timescale = 0.07 if end_time_sec is None else timescale
# 0.0035 -> have 30 minutes last for 6 sec in real time
# if not has_acs:
# self.timescale /= 2
# self.timescale /= 20
self.redis = RedisManager(
name=self.class_name, base_config=base_config, log=self.log
)
self.n_night = -1
# sleep duration for thread loops
self.loop_sleep_sec = 1
# range in seconds of time-series data to be stored for eg monitoring points
self.epoch = datetime.utcfromtimestamp(0)
self.time_series_n_seconds = 60 * 30
self.second_scale = 1000
self.reset_night()
# make sure this is the only active instance
self.init_active_instance()
self.setup_threads()
return
# ------------------------------------------------------------------
def setup_threads(self):
self.add_thread(target=self.loop_main)
return
# ------------------------------------------------------------------
def get_total_time_seconds(self):
return self.end_time_sec
# ------------------------------------------------------------------
def get_n_night(self):
return self.n_night
# ------------------------------------------------------------------
def get_timescale(self):
return self.timescale
# ------------------------------------------------------------------
def get_current_time(self, n_digits=3):
if n_digits >= 0 and n_digits is not None:
return (
int(floor(self.time_now_sec))
if n_digits == 0 else round(self.time_now_sec, n_digits)
)
else:
return self.time_now_sec
# ------------------------------------------------------------------
def get_second_scale(self):
return self.second_scale
# ------------------------------------------------------------------
def get_reset_time(self):
return self.real_reset_time_sec
# ------------------------------------------------------------------
def get_real_time_sec(self):
"""the global function for the current system time
"""
return int((datetime.utcnow() - self.epoch).total_seconds() * self.second_scale)
# ------------------------------------------------------------------
def get_time_series_start_time_sec(self):
return self.get_real_time_sec() - self.time_series_n_seconds * self.second_scale
# ------------------------------------------------------------------
def get_start_time_sec(self):
return 0
# ------------------------------------------------------------------
def reset_night(self, log=None):
self.n_night += 1
self.real_reset_time_sec = self.get_real_time_sec()
time_now_sec = int(floor(self.get_start_time_sec()))
self.time_now_sec = time_now_sec
if log is not None:
self.log.info([
['r', '- reset_night(): '],
['y', 'time_now_sec:', self.time_now_sec, ', '],
['b', 'n_night:', self.n_night, ', '],
['g', 'real_reset_time_sec:', self.real_reset_time_sec],
])
pipe = self.redis.get_pipe()
pipe.set(name='time_of_night_' + 'scale', data=self.timescale)
pipe.set(name='time_of_night_' + 'start', data=time_now_sec)
pipe.set(name='time_of_night_' + 'end', data=self.end_time_sec)
pipe.set(name='time_of_night_' + 'now', data=time_now_sec)
pipe.execute()
return
# ------------------------------------------------------------------
def loop_main(self):
self.log.info([['g', ' - starting time_of_night.loop_main ...']])
while self.can_loop():
self.time_now_sec += self.loop_sleep_sec / self.timescale
if self.time_now_sec > self.end_time_sec:
self.reset_night()
self.redis.set(
name='time_of_night_' + 'now', data=int(floor(self.time_now_sec))
)
sleep(self.loop_sleep_sec)
self.log.info([['c', ' - ending time_of_night.loop_main ...']])
return
# ------------------------------------------------------------------
def get_time_of_night(parent):
pipe = parent.redis.get_pipe()
pipe.get('time_of_night_' + 'start')
pipe.get('time_of_night_' + 'end')
pipe.get('time_of_night_' + 'now')
time_of_night = pipe.execute()
if len(time_of_night) != 3:
parent.log.warning([[
'r', ' - ', parent.widget_type, ' - could not get time_of_night - '
], ['p', str(time_of_night)], ['r', ' - will use fake range ...']])
time_of_night = [0, 100, 0]
data = {'start': time_of_night[0], 'end': time_of_night[1], 'now': time_of_night[2]}
return data
|
import random
from faker import Faker
class FakeData:
"""Class for generating an fake data"""
def __init__(self):
self.faker = Faker()
# default data type values
_default_types = (
'string', 'integer', 'boolean',
'float', 'array_int', 'array_str',
'datetime'
)
def _generate_random_string(self):
"""Create an random string
:return: An random string
"""
values = [self.faker.name(), self.faker.text(), self.faker.address()]
return random.choice(values)
def _generate_values(self, data, count=8):
"""
:param data: An data which need to generate
:param count: An count of list
:return: An array with objects
"""
return [data() for _ in range(count)]
def _set_value(self, field, key):
"""
:param field: An dict object with field information
:param key: An key, of object
:return: An object with a fake data, based on value
Example:
input: {'name': 'string'}
output: {'name': 'Lionel Messi'}
"""
if field[key] == 'string':
field[key] = self._generate_random_string()
if field[key] == 'integer':
field[key] = self.faker.random_number()
if field[key] == 'float':
field[key] = self.faker.pyfloat()
if field[key] == 'array_str':
field[key] = self._generate_values(self.faker.name)
if field[key] == 'array_int':
field[key] = self._generate_values(self.faker.random_number)
if field[key] == 'boolean':
field[key] = self.faker.pybool()
if field[key] == 'datetime':
field[key] = self.faker.date_time()
return field
def generate_value(self, fields):
"""
:param fields: An array with fields objects
:return An array with dict objects
Example:
input: [{'string': name}, {'last_name': string}]
output: [{'name': 'Freddie Mercury'}, {'last_name': '30 Seconds To Mars'}]
"""
for field in fields:
for key in list(field.keys()):
if field[key] not in self._default_types:
raise TypeError(
f"The data type of '{key}' not supported, "
f"check your flaskbox.yml file"
)
self._set_value(field, key)
return fields
# instance of FakeData class
fake_data = FakeData()
|
import turtle
turtle.speed(100)
turtle.pensize(10)
angle = 20
for i in range(18):
turtle.left(angle)
for i in range(4):
turtle.forward(120)
turtle.left(90)
turtle.mainloop()
|
import json
import random
import time
import numpy as np
import yaml
def fread(path):
"""
将文件读出来
:param path: file path
:return: list
"""
with open(path, "r") as f:
s = f.read()
v = [float(i) for i in s.split()]
return v
# 将数据写入文件
def fwrite(path, v):
"""
写入数据文件
:param path: file path
:param v: list needed to write
:return:
"""
with open(path, "w") as f:
for item in v:
f.write("%s\n" % item)
def save(data, file_path):
"""
save data using json format
:param data: a object
:param file_path: where to write
:return:
"""
with open(file_path, 'w') as file:
json.dump(data, file)
return True
def load(file_path):
"""
load data in the file_path
:param file_path: file_path
:return: the object
"""
with open(file_path) as json_file:
data = json.load(json_file)
return data
def get_para(name=''):
yaml_file = './configs/parameters.yaml'
with open(yaml_file, 'r') as file:
file_data = file.read()
data = yaml.load(file_data)
# attrs = name.split('.')
# for attr in attrs:
# data = data[attr]
return data
def cli():
# Default message.
parser = argparse.ArgumentParser(description='Bit level fault injection experiment', \
epilog='Configure your experiment from the command line.')
parser.add_argument('-m', '--model', required=True, type=str, \
help='Pick a model to run. Models listed in models/model_config.py')
parser.add_argument('-lw', '--load_weights', action='store_true', help='Load saved weights from cache.')
parser.add_argument('-ld_name', '--weight_name', default=None, type=str, \
help='Specifiy the weights to use.')
parser.add_argument('-qi', '--qi', default=2, type=int, help='Integer bits for quantization')
parser.add_argument('-qf', '--qf', default=6, type=int, help='Fractional bits for quantization')
parser.add_argument('-seed', '--seed', default=0xdeadbeef, type=int, help='Random seed for bit-level fault injector')
parser.add_argument('-frate', '--frate', default=0.0001, type=float, help='Fault Rate')
parser.add_argument('-c','--configuration', type=str, default=None, help='Specify a configuration file.')
parser.add_argument('-cache','--cache', type=str, default=None, help='Specify a cache dir.')
parser.add_argument('-results','--results', type=str, default=None, help='Specify results dir.')
args = parser.parse_args()
return args
|
import socket
import cv2
import sys
cascPath = 'C:\\Users\\BIPUL\\Documents\\socket programming\\haarcascade_frontalface_alt.xml'
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
#cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x+w, y+h),(255,255,255),0)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if cv2.waitKey(1) & 0xFF == ord('c'):
print('Captured')
cv2.imwrite("face11.jpg", frame)
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
s=socket.socket()
host='192.168.43.132'
port=5005
s.connect((host,port))
f=open('face11.jpg','rb')
print('sending..')
l=f.read(1024)
while(l):
print('sending..')
s.send(l)
l=f.read(1024)
f.close()
print("done sending")
s.shutdown(socket.SHUT_WR)
print(s.recv(1024))
print(s.recv(1024))
s.close()
a=socket.socket()
host='192.168.43.71'
port=6005
a.bind((host,port))
f=open('confidential.txt','wb')
a.listen(5)
while True:
c, addr= a.accept()
print('Got connection from', addr)
print('receiving...')
l=c.recv(1024)
while(l):
print("receiving...")
f.write(l)
l=c.recv(1024)
f.close()
print("done receiving")
c.send(b'thanks for connecting')
c.close()
|
# coding=utf-8
from lxml import etree
import requests
import random
class SpiderTB_Xpath():
ua_list = [
"Mozilla/5.0 (Windows NT 6.1; ) Apple.... ",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0)... ",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X.... ",
"Mozilla/5.0 (Macintosh; Intel Mac OS... "
]
headers = {"headers": random.choice(ua_list)}
url_base = "http://tieba.baidu.com/f?"
url_base2 = "http://tieba.baidu.com"
def visit(self, tb_name, page):
'''
作用: 返回一个response
tb_name:贴吧名
start_page:起始访问页
end_page:结束访问页
'''
pn = (page-1)*50
param_dict = {"kw": tb_name,"pn":pn}
response = requests.get(self.url_base, params=param_dict, headers=self.headers)
#print(response.text)
html_text = response.text
return html_text
def textfilter(self,html_text):
'''
作用:文本过滤
html: html页面文本
'''
#将文本转换为xml
selector = etree.HTML(html_text)
#选择需要的数据,过滤
links = selector.xpath('//div[@class="threadlist_title pull_left j_th_tit "]/a[@class="j_th_tit "]/@href')
return links
def loadtext(self,links,url_base2):
'''
作用:进入对应楼层,访问数据
links:楼层连接后缀
url_base2:官网连接前缀
'''
for link in links:
ba_url = url_base2 + link
self.writetext(ba_url)
def writetext(self,ba_url):
'''
作用:将贴吧数据写入到本地
ba_url:楼层连接
'''
response = requests.get(ba_url,headers = self.headers)
ba_html = response.text
selector = etree.HTML(ba_html)
text = selector.xpath('//div[@class="d_post_content j_d_post_content clearfix"]/text()')
filename = "江西理工大学南昌校区吧.txt"
with open(filename,'a',encoding='utf-8') as f:
f.write("*"*50)
f.write("\n")
for a in text:
f.write(a)
f.write("\n")
if __name__ == "__main__":
tb_name = input("请输入你想逛的贴吧:")
start_page = int(input("起始访问页(数字):"))
end_page = int(input("结束访问页(数字):"))
spider = SpiderTB_Xpath()
for page in range(start_page,end_page+1):
html_text = spider.visit(tb_name, page)
links = spider.textfilter(html_text)
spider.loadtext(links, spider.url_base2)
print("第%d页打印完成!"%page)
|
import pika
import time
import pandas as pd
import json
credentials = pika.PlainCredentials('thinhle', 'meomeo')
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='171.244.51.228', port=5672, virtual_host='/',
credentials=credentials))
channel = connection.channel()
channel.queue_declare(queue='demo_vnas', durable=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
# print(x)
print(body.decode())
# time.sleep(body.count(b'.'))
print(" [x] Done")
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue='demo_vnas', on_message_callback=callback)
channel.start_consuming()
|
#!/usr/bin/env python
import time
from time import sleep
f = open("/usr/local/tcollector/collectors/0/05_B_100_vibX", "r")
print 'input TSDB Start'
while True:
line = f.readline()
if not line: break
print str(line)
time.sleep(0.001)
print 'input Done'
f.close()
|
from django.db import models
# Create your models here.
class ShopModel(models.Model):
name = models.CharField(max_length=10)
address = models.CharField(max_length=20)
# item_name=models.CharField(max_length=20)
class Meta:
db_table = 'shop'
class ItemModel(models.Model):
name = models.CharField(max_length=10)
price = models.CharField(max_length=20)
shop_name = models.ForeignKey(ShopModel, on_delete=models.CASCADE, related_name='item')
class Meta:
db_table = 'item'
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('src_imgs/pic2.jpg',0)
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
# edges = cv2.Canny(img,100,250)
wide = cv2.Canny(img, 10, 200)
tight = cv2.Canny(img, 225, 250)
edges = auto_canny(img)
plt.subplot(221)
plt.imshow(img,cmap = 'gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(222),plt.imshow(edges,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(223),plt.imshow(tight,cmap = 'gray')
plt.title('Tight '), plt.xticks([]), plt.yticks([])
plt.subplot(224),plt.imshow(wide,cmap = 'gray')
plt.title('wide '), plt.xticks([]), plt.yticks([])
plt.show()
|
#!/usr/bin/env python
"""
@author: Jean-Lou Dupont
"""
import httplib
def doHead(site, url):
conn=httplib.HTTPConnection(site)
conn.request("HEAD", url)
resp=conn.getresponse()
return resp
def doGet(site, url):
"""
Performs an HTTP GET request
"""
conn=httplib.HTTPConnection(site)
conn.request("GET", url)
resp=conn.getresponse()
return resp
def getEtag(site, url):
"""
Retrieves the ETag header of a URI
through an HTTP HEAD method
"""
conn=httplib.HTTPConnection(site)
conn.request("HEAD", url)
resp=conn.getresponse()
return resp.getheader("etag")
def safeGet(site, url):
"""
Performs an HTTP GET whilst trapping exceptions
@return ("ok", Data) | (error, Error)
"""
try:
resp=doGet(site,url)
ret=("ok", resp)
except Exception,e:
ret=("error", e)
finally:
return ret
if __name__=="__main__":
site="stackoverflow.com"
url="/feeds/user/171461"
resp=safeGet(site, url)
code, data=resp
if (code=="resp"):
print data.getheaders()
else:
print "Error: %s" % str(e)
|
from typing import List, Dict
class Database:
COLUMNS_HUMAN = [
'human_id', 'gender', 'age', 'preliminary_diagnosis',
'admission_to_the_hospital', 'arrival_date', 'approximate_growth', 'hair_type',
'room_number', 'full_name'
]
COLUMNS_ROOM = ['room_number', 'room_id', 'room_type', 'full_name',
'room_phone']
COLUMNS_ARRIVAL = ['arrival_date', 'room_number', 'full_name', 'age']
COLUMNS_FEMALE = ['arrival_date', 'full_name', 'age']
def __init__(self, connection, cursor):
self.connection = connection
self.cursor = cursor
def human_add(self, human_id: int, gender: str, age: int, preliminary_diagnosis: str,
admission_to_the_hospital: str, arrival_date: str, approximate_growth: int,
hair_type: str, room_number: int, full_name: str):
add_human_query = """
INSERT INTO table_human_data (human_id, gender, age, preliminary_diagnosis,admission_to_the_hospital ,
arrival_date,approximate_growth, hair_type ,room_number, full_name)
VALUES (%s,%s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
val = [human_id, gender, age, preliminary_diagnosis, admission_to_the_hospital,
arrival_date, approximate_growth, hair_type, room_number, full_name]
self.cursor.execute(add_human_query, val)
self.connection.commit()
human_id = self.cursor.lastrowid
def human_data_upgrade(self, human_id: int, gender: str, age: int, preliminary_diagnosis: str,
admission_to_the_hospital: str, arrival_date: str, approximate_growth: int,
hair_type: str, room_number: int, full_name: str):
change_human_query = """
UPDATE table_human_data
SET human_id=%s, gender = %s, age = %s, preliminary_diagnosis = %s, admission_to_the_hospital = %s,
arrival_date = %s, approximate_growth = %s, hair_type = %s, room_number = %s,
full_name = %s
WHERE table_human_data.human_id = %s;
"""
val = [human_id, gender, age, preliminary_diagnosis, admission_to_the_hospital,
arrival_date, approximate_growth, hair_type, room_number, full_name, human_id]
self.cursor.execute(change_human_query, val)
self.connection.commit()
def get_all_humans(self) -> List[Dict]:
get_all_humans_query = """
SELECT *
FROM table_human_data
"""
self.cursor.execute(get_all_humans_query)
all_humans = self.cursor.fetchall()
all_humans = [dict(zip(self.COLUMNS_HUMAN, curr_human)) for curr_human in all_humans]
return all_humans
def human_delete(self, human_id: int):
delete_human_query = """
DELETE FROM table_human_data
WHERE human_id = %s;
"""
val = [human_id]
self.cursor.execute(delete_human_query, val)
self.connection.commit()
def room_add(self, room_number: int, room_id: str, room_type: str, full_name: str,
room_phone: int):
add_room_query = """
INSERT INTO table_room_data (room_number, room_id, room_type, full_name,
room_phone)
VALUES (%s,%s, %s, %s, %s)
"""
val = [room_id, room_number, room_type, full_name,
room_phone]
self.cursor.execute(add_room_query, val)
self.connection.commit()
def room_data_upgrade(self, room_number: int, room_id: str, room_type: str, full_name: str,
room_phone: int):
change_room_query = """
UPDATE table_room_data
SET room_number=%s, room_id=%s, room_type=%s, full_name=%s,
room_phone=%s
WHERE table_room_data.room_id = %s;
"""
val = [room_number, room_id, room_type, full_name,
room_phone, room_id]
self.cursor.execute(change_room_query, val)
self.connection.commit()
def room_delete(self, room_id: int):
delete_room_query = """
DELETE FROM table_room_data
WHERE room_id = %s;
"""
val = [room_id]
self.cursor.execute(delete_room_query, val)
self.connection.commit()
def get_all_rooms(self) -> List[Dict]:
get_all_rooms_query = """
SELECT *
FROM table_room_data
"""
self.cursor.execute(get_all_rooms_query)
all_rooms = self.cursor.fetchall()
all_rooms = [dict(zip(self.COLUMNS_ROOM, curr_room)) for curr_room in all_rooms]
return all_rooms
def phone_and_room_check(self, full_name: str):
phone_and_room_check_query = """
SELECT room_number,room_phone
FROM table_room_data WHERE table_room_data.full_name=%s
"""
val = [full_name]
self.cursor.execute(phone_and_room_check_query, val)
self.connection.commit()
def arrival_date_check(self, arrival_date: str) -> List[Dict]:
arrival_date_check_query = """
SELECT arrival_date, room_number, full_name, age
FROM table_human_data WHERE table_human_data.arrival_date=%s
"""
val = [arrival_date]
self.cursor.execute(arrival_date_check_query, val)
all_arrivals = self.cursor.fetchall()
all_arrivals = [dict(zip(self.COLUMNS_ARRIVAL, curr_arrival)) for curr_arrival in all_arrivals]
return all_arrivals
def female_age_check(self, age: int) -> List[Dict]:
female_age_check_query = """
SELECT arrival_date, full_name, age
FROM table_human_data WHERE table_human_data.gender='F' and age<=%s;
"""
val = [age]
self.cursor.execute(female_age_check_query, val)
all_females = self.cursor.fetchall()
all_females = [dict(zip(self.COLUMNS_FEMALE, curr_female)) for curr_female in all_females]
return all_females
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def find_height(root: TreeNode):
if root is None:
return 0
return max(find_height(root.left), find_height(root.right)) + 1
root_node = TreeNode(10)
root_node.left = TreeNode(5)
root_node.right = TreeNode(15)
root_node.left.left = TreeNode(2)
print(find_height(root_node))
|
"""
Liquid time constant snn
"""
import os
import shutil
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torch.nn import init
from torch.autograd import Variable
import math
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def model_save(fn, model, criterion, optimizer):
with open(fn, 'wb') as f:
torch.save([model, criterion, optimizer], f)
def model_load(fn):
with open(fn, 'rb') as f:
model, criterion, optimizer = torch.load(f)
return model, criterion, optimizer
def save_checkpoint(state, is_best, prefix, filename='_snnvgg_checkpoint.pth.tar'):
print('saving at ', prefix+filename)
torch.save(state, prefix+filename)
if is_best:
shutil.copyfile(prefix+filename, prefix+ '_snnlvgg_model_best.pth.tar')
def count_parameters(model):
return sum(p.numel() for p in model.network.parameters() if p.requires_grad)
###############################################################################################
############################### Define SNN layer #########################################
###############################################################################################
b_j0 = 0.1 # neural threshold baseline
R_m = 3 # membrane resistance
dt = 1
gamma = .5 # gradient scale
lens = 0.3
def gaussian(x, mu=0., sigma=.5):
return torch.exp(-((x - mu) ** 2) / (2 * sigma ** 2)) / torch.sqrt(2 * torch.tensor(math.pi)) / sigma
class ActFun_adp(torch.autograd.Function):
@staticmethod
def forward(ctx, input): # input = membrane potential- threshold
ctx.save_for_backward(input)
return input.gt(0).float() # is firing ???
@staticmethod
def backward(ctx, grad_output): # approximate the gradients
input, = ctx.saved_tensors
grad_input = grad_output.clone()
# temp = abs(input) < lens
scale = 6.0
hight = .15
# temp = torch.exp(-(input**2)/(2*lens**2))/torch.sqrt(2*torch.tensor(math.pi))/lens
temp = gaussian(input, mu=0., sigma=lens) * (1. + hight) \
- gaussian(input, mu=lens, sigma=scale * lens) * hight \
- gaussian(input, mu=-lens, sigma=scale * lens) * hight
# temp = gaussian(input, mu=0., sigma=lens)
return grad_input * temp.float() * gamma
# return grad_input
act_fun_adp = ActFun_adp.apply
def mem_update_adp(inputs, mem, spike, tau_m, dt=1):
alpha = tau_m
B = .5
d_mem = -mem + inputs
mem = mem + d_mem*alpha
inputs_ = mem - B
spike = act_fun_adp(inputs_) # act_fun : approximation firing function
mem = (1-spike)*mem
return mem, spike
def output_Neuron(inputs, mem, tau_m, dt=1):
"""
The read out neuron is leaky integrator without spike
"""
d_mem = -mem + inputs
mem = mem+d_mem*tau_m
return mem
###############################################################################################
###############################################################################################
###############################################################################################
from LTC_layers import *
class SNN(nn.Module):
def __init__(self, input_size, hidden_size,output_size, n_timesteps, P=10):
super(SNN, self).__init__()
print('SNN-ltc CNN ', P)
self.net = ['k3c64s2', 'd-k3c64s1','k1c64s1','d-k3c128s2','k1c128s2']
k_p = "k(.*?)c"
c_p = "c(.*?)s"
self.P = P
self.step = n_timesteps // self.P
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_timesteps = n_timesteps
self.rnn_name = 'SNN-mobilenet-v1'
in_size = input_size
# print(in_size)
self.network = []
self.network_size = []
for i in range(len(self.net)):
# print('in size: ',in_size)
layer = self.net[i]
k,c,s = int(re.search(k_p, s).group(1)),int(re.search(c_p, s).group(1)),int(s.split('s')[-1])
if layer[0] == 'd':
a = SNN_DepthConv_cell(in_size,c,k,s,1,)
else:
a = SNN_Conv_cell(in_size,c,k,s,1,)
self.network.append(a)
in_size = a.compute_output_size()
print(in_size)
self.network_size.append(in_size)
self.dp_f = nn.Dropout2d(0.1)
f_size = in_size[0]*in_size[1]*in_size[2]
self.snn1 = SNN_rec_cell(f_size, hidden_size,is_rec=True)
self.network_size.append([hidden_size])
# self.snn2 = SNN_dense_cell(hidden_size, hidden_size)
self.layer3_x = nn.Linear(hidden_size, output_size)
nn.init.constant_(self.layer3_x.bias,0)
nn.init.constant_(self.layer3_x.weight,0)
self.network_size.append([output_size])
self.tau_m_o = nn.Parameter(torch.Tensor(output_size))
nn.init.constant_(self.tau_m_o, 5.)
self.act3 = nn.Sigmoid()
def forward(self, inputs, h):
self.fr = 0
# T = inputs.size()[0]
# outputs = []
hiddens = []
h = list(h)
b,c,w,r = inputs.shape
x_in = inputs.view(b,c,w,r)
for layer_i in range(len(self.network)):
layer_ = self.network[layer_i]
h[2*layer_i],h[1+2*layer_i] = layer_(x_in,h[2*layer_i],h[1+2*layer_i])
x_in = h[1+2*layer_i]
self.fr = self.fr+ x_in.detach().cpu().numpy().mean()
# spk_conv = self.dp_f(x_in)
# f_spike = torch.flatten(spk_conv,1)
f_spike = torch.flatten(x_in,1)
layer_i +=1
h[2*layer_i],h[1+2*layer_i]= self.snn1.forward(f_spike,h[2*layer_i],h[1+2*layer_i])
self.fr = self.fr+ h[1+2*layer_i].detach().cpu().numpy().mean()
dense3_x = self.layer3_x(h[1+2*layer_i])
tauM2 = self.act3(self.tau_m_o)
h[-2] = output_Neuron(dense3_x,mem=h[-2],tau_m = tauM2)
h[-1] =h[-2]
h = tuple(h)
f_output = F.log_softmax(h[-1], dim=1)
# outputs.append(f_output)
hiddens.append(h)
self.fr = self.fr/(len(self.network)+1.)
final_state = h
return f_output, final_state, hiddens
class SeqModel(nn.Module):
def __init__(self, ninp, nhid, nout, dropout=0.0, dropouti=0.0, dropouth=0.0, wdrop=0.0,
temporalwdrop=False, wnorm=True, n_timesteps=784, nfc=256, parts=10):
super(SeqModel, self).__init__()
self.nout = nout # Should be the number of classes
self.nhid = nhid
self.rnn_name = 'SNN vgg'
self.network = SNN(input_size=ninp, hidden_size=nhid, output_size=nout,n_timesteps=n_timesteps, P=parts)
self.layer_size = self.network.network_size
print(self.layer_size)
self.l2_loss = nn.MSELoss()
def forward(self, inputs, hidden):
# inputs = inputs.permute(2, 0, 1)
# print(inputs.shape) # L,B,d
b,l,c,w,h = inputs.shape
outputs = []
for i in range(l):
f_output, hidden, hiddens= self.network.forward(inputs[:,i,:,:,:], hidden)
outputs.append(f_output)
recon_loss = torch.zeros(1, device=inputs.device)
return outputs, hidden, recon_loss
def init_hidden(self, bsz):
weight = next(self.parameters()).data
states = []
for l in self.layer_size:
if len(l) == 3:
states.append(weight.new(bsz,l[0],l[1],l[2]).uniform_())
states.append(weight.new(bsz,l[0],l[1],l[2]).zero_())
elif len(l) == 1:
states.append(weight.new(bsz,l[0]).uniform_())
states.append(weight.new(bsz,l[0]).zero_())
states.append(weight.new(bsz,self.nout).zero_())
states.append(weight.new(bsz,self.nout).zero_())
# print(self.layer_size)
# print([s.shape for s in states])
return tuple(states)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.